2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
47 #include <rdma/rdma_cm.h>
48 #include <rdma/rdma_cm_ib.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_cm.h>
51 #include <rdma/ib_sa.h>
52 #include <rdma/iw_cm.h>
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("Generic RDMA CM Agent");
56 MODULE_LICENSE("Dual BSD/GPL");
58 #define CMA_CM_RESPONSE_TIMEOUT 20
59 #define CMA_MAX_CM_RETRIES 15
60 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
62 static void cma_add_one(struct ib_device
*device
);
63 static void cma_remove_one(struct ib_device
*device
);
65 static struct ib_client cma_client
= {
68 .remove
= cma_remove_one
71 static struct ib_sa_client sa_client
;
72 static struct rdma_addr_client addr_client
;
73 static LIST_HEAD(dev_list
);
74 static LIST_HEAD(listen_any_list
);
75 static DEFINE_MUTEX(lock
);
76 static struct workqueue_struct
*cma_wq
;
77 static DEFINE_IDR(sdp_ps
);
78 static DEFINE_IDR(tcp_ps
);
79 static DEFINE_IDR(udp_ps
);
80 static DEFINE_IDR(ipoib_ps
);
84 struct list_head list
;
85 struct ib_device
*device
;
86 struct completion comp
;
88 struct list_head id_list
;
105 struct rdma_bind_list
{
107 struct hlist_head owners
;
112 * Device removal can occur at anytime, so we need extra handling to
113 * serialize notifying the user of device removal with other callbacks.
114 * We do this by disabling removal notification while a callback is in process,
115 * and reporting it after the callback completes.
117 struct rdma_id_private
{
118 struct rdma_cm_id id
;
120 struct rdma_bind_list
*bind_list
;
121 struct hlist_node node
;
122 struct list_head list
; /* listen_any_list or cma_device.list */
123 struct list_head listen_list
; /* per device listens */
124 struct cma_device
*cma_dev
;
125 struct list_head mc_list
;
128 enum cma_state state
;
130 struct mutex qp_mutex
;
132 struct completion comp
;
134 struct mutex handler_mutex
;
138 struct ib_sa_query
*query
;
152 struct cma_multicast
{
153 struct rdma_id_private
*id_priv
;
155 struct ib_sa_multicast
*ib
;
157 struct list_head list
;
159 struct sockaddr_storage addr
;
163 struct work_struct work
;
164 struct rdma_id_private
*id
;
165 enum cma_state old_state
;
166 enum cma_state new_state
;
167 struct rdma_cm_event event
;
170 struct cma_ndev_work
{
171 struct work_struct work
;
172 struct rdma_id_private
*id
;
173 struct rdma_cm_event event
;
186 u8 ip_version
; /* IP version: 7:4 */
188 union cma_ip_addr src_addr
;
189 union cma_ip_addr dst_addr
;
194 u8 sdp_version
; /* Major version: 7:4 */
195 u8 ip_version
; /* IP version: 7:4 */
196 u8 sdp_specific1
[10];
198 __be16 sdp_specific2
;
199 union cma_ip_addr src_addr
;
200 union cma_ip_addr dst_addr
;
208 #define CMA_VERSION 0x00
209 #define SDP_MAJ_VERSION 0x2
211 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
216 spin_lock_irqsave(&id_priv
->lock
, flags
);
217 ret
= (id_priv
->state
== comp
);
218 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
222 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
223 enum cma_state comp
, enum cma_state exch
)
228 spin_lock_irqsave(&id_priv
->lock
, flags
);
229 if ((ret
= (id_priv
->state
== comp
)))
230 id_priv
->state
= exch
;
231 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
235 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
241 spin_lock_irqsave(&id_priv
->lock
, flags
);
242 old
= id_priv
->state
;
243 id_priv
->state
= exch
;
244 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
248 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
250 return hdr
->ip_version
>> 4;
253 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
255 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
258 static inline u8
sdp_get_majv(u8 sdp_version
)
260 return sdp_version
>> 4;
263 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
265 return hh
->ip_version
>> 4;
268 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
270 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
273 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
275 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
278 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
279 struct cma_device
*cma_dev
)
281 atomic_inc(&cma_dev
->refcount
);
282 id_priv
->cma_dev
= cma_dev
;
283 id_priv
->id
.device
= cma_dev
->device
;
284 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
287 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
289 if (atomic_dec_and_test(&cma_dev
->refcount
))
290 complete(&cma_dev
->comp
);
293 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
295 list_del(&id_priv
->list
);
296 cma_deref_dev(id_priv
->cma_dev
);
297 id_priv
->cma_dev
= NULL
;
300 static int cma_set_qkey(struct rdma_id_private
*id_priv
)
302 struct ib_sa_mcmember_rec rec
;
308 switch (id_priv
->id
.ps
) {
310 id_priv
->qkey
= RDMA_UDP_QKEY
;
313 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
314 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
315 id_priv
->id
.port_num
, &rec
.mgid
,
318 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
326 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
328 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
329 struct cma_device
*cma_dev
;
333 rdma_addr_get_sgid(dev_addr
, &gid
);
334 list_for_each_entry(cma_dev
, &dev_list
, list
) {
335 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
336 &id_priv
->id
.port_num
, NULL
);
338 cma_attach_to_dev(id_priv
, cma_dev
);
345 static void cma_deref_id(struct rdma_id_private
*id_priv
)
347 if (atomic_dec_and_test(&id_priv
->refcount
))
348 complete(&id_priv
->comp
);
351 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
352 enum cma_state state
)
354 mutex_lock(&id_priv
->handler_mutex
);
355 if (id_priv
->state
!= state
) {
356 mutex_unlock(&id_priv
->handler_mutex
);
362 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
364 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
367 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
368 void *context
, enum rdma_port_space ps
)
370 struct rdma_id_private
*id_priv
;
372 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
374 return ERR_PTR(-ENOMEM
);
376 id_priv
->state
= CMA_IDLE
;
377 id_priv
->id
.context
= context
;
378 id_priv
->id
.event_handler
= event_handler
;
380 spin_lock_init(&id_priv
->lock
);
381 mutex_init(&id_priv
->qp_mutex
);
382 init_completion(&id_priv
->comp
);
383 atomic_set(&id_priv
->refcount
, 1);
384 mutex_init(&id_priv
->handler_mutex
);
385 INIT_LIST_HEAD(&id_priv
->listen_list
);
386 INIT_LIST_HEAD(&id_priv
->mc_list
);
387 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
391 EXPORT_SYMBOL(rdma_create_id
);
393 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
395 struct ib_qp_attr qp_attr
;
396 int qp_attr_mask
, ret
;
398 qp_attr
.qp_state
= IB_QPS_INIT
;
399 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
403 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
407 qp_attr
.qp_state
= IB_QPS_RTR
;
408 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
412 qp_attr
.qp_state
= IB_QPS_RTS
;
414 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
419 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
421 struct ib_qp_attr qp_attr
;
422 int qp_attr_mask
, ret
;
424 qp_attr
.qp_state
= IB_QPS_INIT
;
425 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
429 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
432 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
433 struct ib_qp_init_attr
*qp_init_attr
)
435 struct rdma_id_private
*id_priv
;
439 id_priv
= container_of(id
, struct rdma_id_private
, id
);
440 if (id
->device
!= pd
->device
)
443 qp
= ib_create_qp(pd
, qp_init_attr
);
447 if (cma_is_ud_ps(id_priv
->id
.ps
))
448 ret
= cma_init_ud_qp(id_priv
, qp
);
450 ret
= cma_init_conn_qp(id_priv
, qp
);
455 id_priv
->qp_num
= qp
->qp_num
;
456 id_priv
->srq
= (qp
->srq
!= NULL
);
462 EXPORT_SYMBOL(rdma_create_qp
);
464 void rdma_destroy_qp(struct rdma_cm_id
*id
)
466 struct rdma_id_private
*id_priv
;
468 id_priv
= container_of(id
, struct rdma_id_private
, id
);
469 mutex_lock(&id_priv
->qp_mutex
);
470 ib_destroy_qp(id_priv
->id
.qp
);
471 id_priv
->id
.qp
= NULL
;
472 mutex_unlock(&id_priv
->qp_mutex
);
474 EXPORT_SYMBOL(rdma_destroy_qp
);
476 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
477 struct rdma_conn_param
*conn_param
)
479 struct ib_qp_attr qp_attr
;
480 int qp_attr_mask
, ret
;
482 mutex_lock(&id_priv
->qp_mutex
);
483 if (!id_priv
->id
.qp
) {
488 /* Need to update QP attributes from default values. */
489 qp_attr
.qp_state
= IB_QPS_INIT
;
490 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
494 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
498 qp_attr
.qp_state
= IB_QPS_RTR
;
499 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
504 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
505 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
507 mutex_unlock(&id_priv
->qp_mutex
);
511 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
512 struct rdma_conn_param
*conn_param
)
514 struct ib_qp_attr qp_attr
;
515 int qp_attr_mask
, ret
;
517 mutex_lock(&id_priv
->qp_mutex
);
518 if (!id_priv
->id
.qp
) {
523 qp_attr
.qp_state
= IB_QPS_RTS
;
524 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
529 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
530 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
532 mutex_unlock(&id_priv
->qp_mutex
);
536 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
538 struct ib_qp_attr qp_attr
;
541 mutex_lock(&id_priv
->qp_mutex
);
542 if (!id_priv
->id
.qp
) {
547 qp_attr
.qp_state
= IB_QPS_ERR
;
548 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
550 mutex_unlock(&id_priv
->qp_mutex
);
554 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
555 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
557 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
560 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
561 ib_addr_get_pkey(dev_addr
),
562 &qp_attr
->pkey_index
);
566 qp_attr
->port_num
= id_priv
->id
.port_num
;
567 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
569 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
570 ret
= cma_set_qkey(id_priv
);
574 qp_attr
->qkey
= id_priv
->qkey
;
575 *qp_attr_mask
|= IB_QP_QKEY
;
577 qp_attr
->qp_access_flags
= 0;
578 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
583 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
586 struct rdma_id_private
*id_priv
;
589 id_priv
= container_of(id
, struct rdma_id_private
, id
);
590 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
591 case RDMA_TRANSPORT_IB
:
592 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
593 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
595 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
597 if (qp_attr
->qp_state
== IB_QPS_RTR
)
598 qp_attr
->rq_psn
= id_priv
->seq_num
;
600 case RDMA_TRANSPORT_IWARP
:
601 if (!id_priv
->cm_id
.iw
) {
602 qp_attr
->qp_access_flags
= 0;
603 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
605 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
615 EXPORT_SYMBOL(rdma_init_qp_attr
);
617 static inline int cma_zero_addr(struct sockaddr
*addr
)
619 struct in6_addr
*ip6
;
621 if (addr
->sa_family
== AF_INET
)
622 return ipv4_is_zeronet(
623 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
625 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
626 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
627 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
631 static inline int cma_loopback_addr(struct sockaddr
*addr
)
633 if (addr
->sa_family
== AF_INET
)
634 return ipv4_is_loopback(
635 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
637 return ipv6_addr_loopback(
638 &((struct sockaddr_in6
*) addr
)->sin6_addr
);
641 static inline int cma_any_addr(struct sockaddr
*addr
)
643 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
646 static inline __be16
cma_port(struct sockaddr
*addr
)
648 if (addr
->sa_family
== AF_INET
)
649 return ((struct sockaddr_in
*) addr
)->sin_port
;
651 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
654 static inline int cma_any_port(struct sockaddr
*addr
)
656 return !cma_port(addr
);
659 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
660 u8
*ip_ver
, __be16
*port
,
661 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
665 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
669 *ip_ver
= sdp_get_ip_ver(hdr
);
670 *port
= ((struct sdp_hh
*) hdr
)->port
;
671 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
672 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
675 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
678 *ip_ver
= cma_get_ip_ver(hdr
);
679 *port
= ((struct cma_hdr
*) hdr
)->port
;
680 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
681 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
685 if (*ip_ver
!= 4 && *ip_ver
!= 6)
690 static void cma_save_net_info(struct rdma_addr
*addr
,
691 struct rdma_addr
*listen_addr
,
692 u8 ip_ver
, __be16 port
,
693 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
695 struct sockaddr_in
*listen4
, *ip4
;
696 struct sockaddr_in6
*listen6
, *ip6
;
700 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
701 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
702 ip4
->sin_family
= listen4
->sin_family
;
703 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
704 ip4
->sin_port
= listen4
->sin_port
;
706 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
707 ip4
->sin_family
= listen4
->sin_family
;
708 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
709 ip4
->sin_port
= port
;
712 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
713 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
714 ip6
->sin6_family
= listen6
->sin6_family
;
715 ip6
->sin6_addr
= dst
->ip6
;
716 ip6
->sin6_port
= listen6
->sin6_port
;
718 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
719 ip6
->sin6_family
= listen6
->sin6_family
;
720 ip6
->sin6_addr
= src
->ip6
;
721 ip6
->sin6_port
= port
;
728 static inline int cma_user_data_offset(enum rdma_port_space ps
)
734 return sizeof(struct cma_hdr
);
738 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
740 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
741 case RDMA_TRANSPORT_IB
:
743 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
750 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
752 struct rdma_id_private
*dev_id_priv
;
755 * Remove from listen_any_list to prevent added devices from spawning
756 * additional listen requests.
759 list_del(&id_priv
->list
);
761 while (!list_empty(&id_priv
->listen_list
)) {
762 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
763 struct rdma_id_private
, listen_list
);
764 /* sync with device removal to avoid duplicate destruction */
765 list_del_init(&dev_id_priv
->list
);
766 list_del(&dev_id_priv
->listen_list
);
769 rdma_destroy_id(&dev_id_priv
->id
);
775 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
776 enum cma_state state
)
780 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
782 case CMA_ROUTE_QUERY
:
783 cma_cancel_route(id_priv
);
786 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)
787 && !id_priv
->cma_dev
)
788 cma_cancel_listens(id_priv
);
795 static void cma_release_port(struct rdma_id_private
*id_priv
)
797 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
803 hlist_del(&id_priv
->node
);
804 if (hlist_empty(&bind_list
->owners
)) {
805 idr_remove(bind_list
->ps
, bind_list
->port
);
811 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
813 struct cma_multicast
*mc
;
815 while (!list_empty(&id_priv
->mc_list
)) {
816 mc
= container_of(id_priv
->mc_list
.next
,
817 struct cma_multicast
, list
);
819 ib_sa_free_multicast(mc
->multicast
.ib
);
824 void rdma_destroy_id(struct rdma_cm_id
*id
)
826 struct rdma_id_private
*id_priv
;
827 enum cma_state state
;
829 id_priv
= container_of(id
, struct rdma_id_private
, id
);
830 state
= cma_exch(id_priv
, CMA_DESTROYING
);
831 cma_cancel_operation(id_priv
, state
);
834 if (id_priv
->cma_dev
) {
836 switch (rdma_node_get_transport(id
->device
->node_type
)) {
837 case RDMA_TRANSPORT_IB
:
838 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
839 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
841 case RDMA_TRANSPORT_IWARP
:
842 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
843 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
848 cma_leave_mc_groups(id_priv
);
850 cma_detach_from_dev(id_priv
);
854 cma_release_port(id_priv
);
855 cma_deref_id(id_priv
);
856 wait_for_completion(&id_priv
->comp
);
858 if (id_priv
->internal_id
)
859 cma_deref_id(id_priv
->id
.context
);
861 kfree(id_priv
->id
.route
.path_rec
);
864 EXPORT_SYMBOL(rdma_destroy_id
);
866 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
870 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
874 ret
= cma_modify_qp_rts(id_priv
, NULL
);
878 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
884 cma_modify_qp_err(id_priv
);
885 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
890 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
892 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
893 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
900 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
901 struct ib_cm_rep_event_param
*rep_data
,
904 event
->param
.conn
.private_data
= private_data
;
905 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
906 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
907 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
908 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
909 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
910 event
->param
.conn
.srq
= rep_data
->srq
;
911 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
914 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
916 struct rdma_id_private
*id_priv
= cm_id
->context
;
917 struct rdma_cm_event event
;
920 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
921 cma_disable_callback(id_priv
, CMA_CONNECT
)) ||
922 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
923 cma_disable_callback(id_priv
, CMA_DISCONNECT
)))
926 memset(&event
, 0, sizeof event
);
927 switch (ib_event
->event
) {
928 case IB_CM_REQ_ERROR
:
929 case IB_CM_REP_ERROR
:
930 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
931 event
.status
= -ETIMEDOUT
;
933 case IB_CM_REP_RECEIVED
:
934 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
936 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
937 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
938 event
.status
= cma_rep_recv(id_priv
);
939 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
940 RDMA_CM_EVENT_ESTABLISHED
;
942 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
943 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
944 ib_event
->private_data
);
946 case IB_CM_RTU_RECEIVED
:
947 case IB_CM_USER_ESTABLISHED
:
948 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
950 case IB_CM_DREQ_ERROR
:
951 event
.status
= -ETIMEDOUT
; /* fall through */
952 case IB_CM_DREQ_RECEIVED
:
953 case IB_CM_DREP_RECEIVED
:
954 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
956 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
958 case IB_CM_TIMEWAIT_EXIT
:
959 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
961 case IB_CM_MRA_RECEIVED
:
964 case IB_CM_REJ_RECEIVED
:
965 cma_modify_qp_err(id_priv
);
966 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
967 event
.event
= RDMA_CM_EVENT_REJECTED
;
968 event
.param
.conn
.private_data
= ib_event
->private_data
;
969 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
972 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
977 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
979 /* Destroy the CM ID by returning a non-zero value. */
980 id_priv
->cm_id
.ib
= NULL
;
981 cma_exch(id_priv
, CMA_DESTROYING
);
982 mutex_unlock(&id_priv
->handler_mutex
);
983 rdma_destroy_id(&id_priv
->id
);
987 mutex_unlock(&id_priv
->handler_mutex
);
991 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
992 struct ib_cm_event
*ib_event
)
994 struct rdma_id_private
*id_priv
;
995 struct rdma_cm_id
*id
;
996 struct rdma_route
*rt
;
997 union cma_ip_addr
*src
, *dst
;
1002 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1003 &ip_ver
, &port
, &src
, &dst
))
1006 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1011 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1012 ip_ver
, port
, src
, dst
);
1015 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1016 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1021 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1022 if (rt
->num_paths
== 2)
1023 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1025 if (cma_any_addr((struct sockaddr
*) &rt
->addr
.src_addr
)) {
1026 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1027 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1028 ib_addr_set_pkey(&rt
->addr
.dev_addr
, rt
->path_rec
[0].pkey
);
1030 ret
= rdma_translate_ip((struct sockaddr
*) &rt
->addr
.src_addr
,
1031 &rt
->addr
.dev_addr
);
1035 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1037 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1038 id_priv
->state
= CMA_CONNECT
;
1042 rdma_destroy_id(id
);
1047 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1048 struct ib_cm_event
*ib_event
)
1050 struct rdma_id_private
*id_priv
;
1051 struct rdma_cm_id
*id
;
1052 union cma_ip_addr
*src
, *dst
;
1057 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1063 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1064 &ip_ver
, &port
, &src
, &dst
))
1067 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1068 ip_ver
, port
, src
, dst
);
1070 if (!cma_any_addr((struct sockaddr
*) &id
->route
.addr
.src_addr
)) {
1071 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1072 &id
->route
.addr
.dev_addr
);
1077 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1078 id_priv
->state
= CMA_CONNECT
;
1081 rdma_destroy_id(id
);
1085 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1086 struct ib_cm_req_event_param
*req_data
,
1087 void *private_data
, int offset
)
1089 event
->param
.conn
.private_data
= private_data
+ offset
;
1090 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1091 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1092 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1093 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1094 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1095 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1096 event
->param
.conn
.srq
= req_data
->srq
;
1097 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1100 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1102 struct rdma_id_private
*listen_id
, *conn_id
;
1103 struct rdma_cm_event event
;
1106 listen_id
= cm_id
->context
;
1107 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1108 return -ECONNABORTED
;
1110 memset(&event
, 0, sizeof event
);
1111 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1112 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1113 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1114 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1115 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1116 event
.param
.ud
.private_data_len
=
1117 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1119 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1120 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1121 ib_event
->private_data
, offset
);
1128 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1130 ret
= cma_acquire_dev(conn_id
);
1131 mutex_unlock(&lock
);
1133 goto release_conn_id
;
1135 conn_id
->cm_id
.ib
= cm_id
;
1136 cm_id
->context
= conn_id
;
1137 cm_id
->cm_handler
= cma_ib_handler
;
1139 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1142 * Acquire mutex to prevent user executing rdma_destroy_id()
1143 * while we're accessing the cm_id.
1146 if (cma_comp(conn_id
, CMA_CONNECT
) &&
1147 !cma_is_ud_ps(conn_id
->id
.ps
))
1148 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1149 mutex_unlock(&lock
);
1150 mutex_unlock(&conn_id
->handler_mutex
);
1154 /* Destroy the CM ID by returning a non-zero value. */
1155 conn_id
->cm_id
.ib
= NULL
;
1158 cma_exch(conn_id
, CMA_DESTROYING
);
1159 mutex_unlock(&conn_id
->handler_mutex
);
1160 rdma_destroy_id(&conn_id
->id
);
1163 mutex_unlock(&listen_id
->handler_mutex
);
1167 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1169 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1172 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1173 struct ib_cm_compare_data
*compare
)
1175 struct cma_hdr
*cma_data
, *cma_mask
;
1176 struct sdp_hh
*sdp_data
, *sdp_mask
;
1178 struct in6_addr ip6_addr
;
1180 memset(compare
, 0, sizeof *compare
);
1181 cma_data
= (void *) compare
->data
;
1182 cma_mask
= (void *) compare
->mask
;
1183 sdp_data
= (void *) compare
->data
;
1184 sdp_mask
= (void *) compare
->mask
;
1186 switch (addr
->sa_family
) {
1188 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1189 if (ps
== RDMA_PS_SDP
) {
1190 sdp_set_ip_ver(sdp_data
, 4);
1191 sdp_set_ip_ver(sdp_mask
, 0xF);
1192 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1193 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1195 cma_set_ip_ver(cma_data
, 4);
1196 cma_set_ip_ver(cma_mask
, 0xF);
1197 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1198 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1202 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1203 if (ps
== RDMA_PS_SDP
) {
1204 sdp_set_ip_ver(sdp_data
, 6);
1205 sdp_set_ip_ver(sdp_mask
, 0xF);
1206 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1207 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1208 sizeof sdp_mask
->dst_addr
.ip6
);
1210 cma_set_ip_ver(cma_data
, 6);
1211 cma_set_ip_ver(cma_mask
, 0xF);
1212 cma_data
->dst_addr
.ip6
= ip6_addr
;
1213 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1214 sizeof cma_mask
->dst_addr
.ip6
);
1222 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1224 struct rdma_id_private
*id_priv
= iw_id
->context
;
1225 struct rdma_cm_event event
;
1226 struct sockaddr_in
*sin
;
1229 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
1232 memset(&event
, 0, sizeof event
);
1233 switch (iw_event
->event
) {
1234 case IW_CM_EVENT_CLOSE
:
1235 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1237 case IW_CM_EVENT_CONNECT_REPLY
:
1238 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1239 *sin
= iw_event
->local_addr
;
1240 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1241 *sin
= iw_event
->remote_addr
;
1242 switch (iw_event
->status
) {
1244 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1248 event
.event
= RDMA_CM_EVENT_REJECTED
;
1251 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1254 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1258 case IW_CM_EVENT_ESTABLISHED
:
1259 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1265 event
.status
= iw_event
->status
;
1266 event
.param
.conn
.private_data
= iw_event
->private_data
;
1267 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1268 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1270 /* Destroy the CM ID by returning a non-zero value. */
1271 id_priv
->cm_id
.iw
= NULL
;
1272 cma_exch(id_priv
, CMA_DESTROYING
);
1273 mutex_unlock(&id_priv
->handler_mutex
);
1274 rdma_destroy_id(&id_priv
->id
);
1278 mutex_unlock(&id_priv
->handler_mutex
);
1282 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1283 struct iw_cm_event
*iw_event
)
1285 struct rdma_cm_id
*new_cm_id
;
1286 struct rdma_id_private
*listen_id
, *conn_id
;
1287 struct sockaddr_in
*sin
;
1288 struct net_device
*dev
= NULL
;
1289 struct rdma_cm_event event
;
1291 struct ib_device_attr attr
;
1293 listen_id
= cm_id
->context
;
1294 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1295 return -ECONNABORTED
;
1297 /* Create a new RDMA id for the new IW CM ID */
1298 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1299 listen_id
->id
.context
,
1301 if (IS_ERR(new_cm_id
)) {
1305 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1306 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1307 conn_id
->state
= CMA_CONNECT
;
1309 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1311 ret
= -EADDRNOTAVAIL
;
1312 mutex_unlock(&conn_id
->handler_mutex
);
1313 rdma_destroy_id(new_cm_id
);
1316 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1318 mutex_unlock(&conn_id
->handler_mutex
);
1319 rdma_destroy_id(new_cm_id
);
1324 ret
= cma_acquire_dev(conn_id
);
1325 mutex_unlock(&lock
);
1327 mutex_unlock(&conn_id
->handler_mutex
);
1328 rdma_destroy_id(new_cm_id
);
1332 conn_id
->cm_id
.iw
= cm_id
;
1333 cm_id
->context
= conn_id
;
1334 cm_id
->cm_handler
= cma_iw_handler
;
1336 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1337 *sin
= iw_event
->local_addr
;
1338 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1339 *sin
= iw_event
->remote_addr
;
1341 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1343 mutex_unlock(&conn_id
->handler_mutex
);
1344 rdma_destroy_id(new_cm_id
);
1348 memset(&event
, 0, sizeof event
);
1349 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1350 event
.param
.conn
.private_data
= iw_event
->private_data
;
1351 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1352 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1353 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1354 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1356 /* User wants to destroy the CM ID */
1357 conn_id
->cm_id
.iw
= NULL
;
1358 cma_exch(conn_id
, CMA_DESTROYING
);
1359 mutex_unlock(&conn_id
->handler_mutex
);
1360 rdma_destroy_id(&conn_id
->id
);
1364 mutex_unlock(&conn_id
->handler_mutex
);
1369 mutex_unlock(&listen_id
->handler_mutex
);
1373 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1375 struct ib_cm_compare_data compare_data
;
1376 struct sockaddr
*addr
;
1380 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1382 if (IS_ERR(id_priv
->cm_id
.ib
))
1383 return PTR_ERR(id_priv
->cm_id
.ib
);
1385 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1386 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1387 if (cma_any_addr(addr
))
1388 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1390 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1391 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1395 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1396 id_priv
->cm_id
.ib
= NULL
;
1402 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1405 struct sockaddr_in
*sin
;
1407 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1408 iw_conn_req_handler
,
1410 if (IS_ERR(id_priv
->cm_id
.iw
))
1411 return PTR_ERR(id_priv
->cm_id
.iw
);
1413 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1414 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1416 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1419 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1420 id_priv
->cm_id
.iw
= NULL
;
1426 static int cma_listen_handler(struct rdma_cm_id
*id
,
1427 struct rdma_cm_event
*event
)
1429 struct rdma_id_private
*id_priv
= id
->context
;
1431 id
->context
= id_priv
->id
.context
;
1432 id
->event_handler
= id_priv
->id
.event_handler
;
1433 return id_priv
->id
.event_handler(id
, event
);
1436 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1437 struct cma_device
*cma_dev
)
1439 struct rdma_id_private
*dev_id_priv
;
1440 struct rdma_cm_id
*id
;
1443 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1447 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1449 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1450 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1451 ip_addr_size((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
1453 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1454 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1455 atomic_inc(&id_priv
->refcount
);
1456 dev_id_priv
->internal_id
= 1;
1458 ret
= rdma_listen(id
, id_priv
->backlog
);
1460 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1461 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1464 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1466 struct cma_device
*cma_dev
;
1469 list_add_tail(&id_priv
->list
, &listen_any_list
);
1470 list_for_each_entry(cma_dev
, &dev_list
, list
)
1471 cma_listen_on_dev(id_priv
, cma_dev
);
1472 mutex_unlock(&lock
);
1475 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1477 struct rdma_id_private
*id_priv
;
1480 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1481 if (id_priv
->state
== CMA_IDLE
) {
1482 ((struct sockaddr
*) &id
->route
.addr
.src_addr
)->sa_family
= AF_INET
;
1483 ret
= rdma_bind_addr(id
, (struct sockaddr
*) &id
->route
.addr
.src_addr
);
1488 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1491 id_priv
->backlog
= backlog
;
1493 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1494 case RDMA_TRANSPORT_IB
:
1495 ret
= cma_ib_listen(id_priv
);
1499 case RDMA_TRANSPORT_IWARP
:
1500 ret
= cma_iw_listen(id_priv
, backlog
);
1509 cma_listen_on_all(id_priv
);
1513 id_priv
->backlog
= 0;
1514 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1517 EXPORT_SYMBOL(rdma_listen
);
1519 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1521 struct rdma_id_private
*id_priv
;
1523 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1524 id_priv
->tos
= (u8
) tos
;
1526 EXPORT_SYMBOL(rdma_set_service_type
);
1528 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1531 struct cma_work
*work
= context
;
1532 struct rdma_route
*route
;
1534 route
= &work
->id
->id
.route
;
1537 route
->num_paths
= 1;
1538 *route
->path_rec
= *path_rec
;
1540 work
->old_state
= CMA_ROUTE_QUERY
;
1541 work
->new_state
= CMA_ADDR_RESOLVED
;
1542 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1543 work
->event
.status
= status
;
1546 queue_work(cma_wq
, &work
->work
);
1549 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1550 struct cma_work
*work
)
1552 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1553 struct ib_sa_path_rec path_rec
;
1554 ib_sa_comp_mask comp_mask
;
1555 struct sockaddr_in6
*sin6
;
1557 memset(&path_rec
, 0, sizeof path_rec
);
1558 rdma_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1559 rdma_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1560 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1561 path_rec
.numb_path
= 1;
1562 path_rec
.reversible
= 1;
1563 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1564 (struct sockaddr
*) &addr
->dst_addr
);
1566 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1567 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1568 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1570 if (addr
->src_addr
.ss_family
== AF_INET
) {
1571 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1572 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1574 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1575 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1576 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1579 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1580 id_priv
->id
.port_num
, &path_rec
,
1581 comp_mask
, timeout_ms
,
1582 GFP_KERNEL
, cma_query_handler
,
1583 work
, &id_priv
->query
);
1585 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1588 static void cma_work_handler(struct work_struct
*_work
)
1590 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1591 struct rdma_id_private
*id_priv
= work
->id
;
1594 mutex_lock(&id_priv
->handler_mutex
);
1595 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1598 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1599 cma_exch(id_priv
, CMA_DESTROYING
);
1603 mutex_unlock(&id_priv
->handler_mutex
);
1604 cma_deref_id(id_priv
);
1606 rdma_destroy_id(&id_priv
->id
);
1610 static void cma_ndev_work_handler(struct work_struct
*_work
)
1612 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1613 struct rdma_id_private
*id_priv
= work
->id
;
1616 mutex_lock(&id_priv
->handler_mutex
);
1617 if (id_priv
->state
== CMA_DESTROYING
||
1618 id_priv
->state
== CMA_DEVICE_REMOVAL
)
1621 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1622 cma_exch(id_priv
, CMA_DESTROYING
);
1627 mutex_unlock(&id_priv
->handler_mutex
);
1628 cma_deref_id(id_priv
);
1630 rdma_destroy_id(&id_priv
->id
);
1634 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1636 struct rdma_route
*route
= &id_priv
->id
.route
;
1637 struct cma_work
*work
;
1640 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1645 INIT_WORK(&work
->work
, cma_work_handler
);
1646 work
->old_state
= CMA_ROUTE_QUERY
;
1647 work
->new_state
= CMA_ROUTE_RESOLVED
;
1648 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1650 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1651 if (!route
->path_rec
) {
1656 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1662 kfree(route
->path_rec
);
1663 route
->path_rec
= NULL
;
1669 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1670 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1672 struct rdma_id_private
*id_priv
;
1675 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1676 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1679 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1680 if (!id
->route
.path_rec
) {
1685 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1688 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1691 EXPORT_SYMBOL(rdma_set_ib_paths
);
1693 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1695 struct cma_work
*work
;
1697 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1702 INIT_WORK(&work
->work
, cma_work_handler
);
1703 work
->old_state
= CMA_ROUTE_QUERY
;
1704 work
->new_state
= CMA_ROUTE_RESOLVED
;
1705 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1706 queue_work(cma_wq
, &work
->work
);
1710 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1712 struct rdma_id_private
*id_priv
;
1715 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1716 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1719 atomic_inc(&id_priv
->refcount
);
1720 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1721 case RDMA_TRANSPORT_IB
:
1722 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1724 case RDMA_TRANSPORT_IWARP
:
1725 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1736 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1737 cma_deref_id(id_priv
);
1740 EXPORT_SYMBOL(rdma_resolve_route
);
1742 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1744 struct cma_device
*cma_dev
;
1745 struct ib_port_attr port_attr
;
1752 if (list_empty(&dev_list
)) {
1756 list_for_each_entry(cma_dev
, &dev_list
, list
)
1757 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1758 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1759 port_attr
.state
== IB_PORT_ACTIVE
)
1763 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1766 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1770 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1774 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
1775 (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
) ?
1776 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
1778 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1779 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1780 id_priv
->id
.port_num
= p
;
1781 cma_attach_to_dev(id_priv
, cma_dev
);
1783 mutex_unlock(&lock
);
1787 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1788 struct rdma_dev_addr
*dev_addr
, void *context
)
1790 struct rdma_id_private
*id_priv
= context
;
1791 struct rdma_cm_event event
;
1793 memset(&event
, 0, sizeof event
);
1794 mutex_lock(&id_priv
->handler_mutex
);
1797 * Grab mutex to block rdma_destroy_id() from removing the device while
1798 * we're trying to acquire it.
1801 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1802 mutex_unlock(&lock
);
1806 if (!status
&& !id_priv
->cma_dev
)
1807 status
= cma_acquire_dev(id_priv
);
1808 mutex_unlock(&lock
);
1811 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1813 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1814 event
.status
= status
;
1816 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1817 ip_addr_size(src_addr
));
1818 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1821 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1822 cma_exch(id_priv
, CMA_DESTROYING
);
1823 mutex_unlock(&id_priv
->handler_mutex
);
1824 cma_deref_id(id_priv
);
1825 rdma_destroy_id(&id_priv
->id
);
1829 mutex_unlock(&id_priv
->handler_mutex
);
1830 cma_deref_id(id_priv
);
1833 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1835 struct cma_work
*work
;
1836 struct sockaddr
*src
, *dst
;
1840 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1844 if (!id_priv
->cma_dev
) {
1845 ret
= cma_bind_loopback(id_priv
);
1850 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1851 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1853 src
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1854 if (cma_zero_addr(src
)) {
1855 dst
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
1856 if ((src
->sa_family
= dst
->sa_family
) == AF_INET
) {
1857 ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
=
1858 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1860 ipv6_addr_copy(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1861 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1866 INIT_WORK(&work
->work
, cma_work_handler
);
1867 work
->old_state
= CMA_ADDR_QUERY
;
1868 work
->new_state
= CMA_ADDR_RESOLVED
;
1869 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1870 queue_work(cma_wq
, &work
->work
);
1877 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1878 struct sockaddr
*dst_addr
)
1880 if (!src_addr
|| !src_addr
->sa_family
) {
1881 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
1882 if ((src_addr
->sa_family
= dst_addr
->sa_family
) == AF_INET6
) {
1883 ((struct sockaddr_in6
*) src_addr
)->sin6_scope_id
=
1884 ((struct sockaddr_in6
*) dst_addr
)->sin6_scope_id
;
1887 return rdma_bind_addr(id
, src_addr
);
1890 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1891 struct sockaddr
*dst_addr
, int timeout_ms
)
1893 struct rdma_id_private
*id_priv
;
1896 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1897 if (id_priv
->state
== CMA_IDLE
) {
1898 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1903 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1906 atomic_inc(&id_priv
->refcount
);
1907 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1908 if (cma_any_addr(dst_addr
))
1909 ret
= cma_resolve_loopback(id_priv
);
1911 ret
= rdma_resolve_ip(&addr_client
, (struct sockaddr
*) &id
->route
.addr
.src_addr
,
1912 dst_addr
, &id
->route
.addr
.dev_addr
,
1913 timeout_ms
, addr_handler
, id_priv
);
1919 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1920 cma_deref_id(id_priv
);
1923 EXPORT_SYMBOL(rdma_resolve_addr
);
1925 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1926 struct rdma_id_private
*id_priv
)
1928 struct sockaddr_in
*sin
;
1930 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1931 sin
->sin_port
= htons(bind_list
->port
);
1932 id_priv
->bind_list
= bind_list
;
1933 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1936 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1937 unsigned short snum
)
1939 struct rdma_bind_list
*bind_list
;
1942 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1947 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1948 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1954 ret
= -EADDRNOTAVAIL
;
1959 bind_list
->port
= (unsigned short) port
;
1960 cma_bind_port(bind_list
, id_priv
);
1963 idr_remove(ps
, port
);
1969 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1971 struct rdma_bind_list
*bind_list
;
1972 int port
, ret
, low
, high
;
1974 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1979 /* FIXME: add proper port randomization per like inet_csk_get_port */
1981 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1982 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1987 inet_get_local_port_range(&low
, &high
);
1989 if (next_port
!= low
) {
1990 idr_remove(ps
, port
);
1994 ret
= -EADDRNOTAVAIL
;
2001 next_port
= port
+ 1;
2004 bind_list
->port
= (unsigned short) port
;
2005 cma_bind_port(bind_list
, id_priv
);
2008 idr_remove(ps
, port
);
2014 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2016 struct rdma_id_private
*cur_id
;
2017 struct sockaddr_in
*sin
, *cur_sin
;
2018 struct rdma_bind_list
*bind_list
;
2019 struct hlist_node
*node
;
2020 unsigned short snum
;
2022 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2023 snum
= ntohs(sin
->sin_port
);
2024 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2027 bind_list
= idr_find(ps
, snum
);
2029 return cma_alloc_port(ps
, id_priv
, snum
);
2032 * We don't support binding to any address if anyone is bound to
2033 * a specific address on the same port.
2035 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2036 return -EADDRNOTAVAIL
;
2038 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2039 if (cma_any_addr((struct sockaddr
*) &cur_id
->id
.route
.addr
.src_addr
))
2040 return -EADDRNOTAVAIL
;
2042 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
2043 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
2047 cma_bind_port(bind_list
, id_priv
);
2051 static int cma_get_port(struct rdma_id_private
*id_priv
)
2056 switch (id_priv
->id
.ps
) {
2070 return -EPROTONOSUPPORT
;
2074 if (cma_any_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2075 ret
= cma_alloc_any_port(ps
, id_priv
);
2077 ret
= cma_use_port(ps
, id_priv
);
2078 mutex_unlock(&lock
);
2083 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2084 struct sockaddr
*addr
)
2086 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2087 struct sockaddr_in6
*sin6
;
2089 if (addr
->sa_family
!= AF_INET6
)
2092 sin6
= (struct sockaddr_in6
*) addr
;
2093 if ((ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
) &&
2094 !sin6
->sin6_scope_id
)
2097 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2102 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2104 struct rdma_id_private
*id_priv
;
2107 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
2108 return -EAFNOSUPPORT
;
2110 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2111 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2114 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2118 if (!cma_any_addr(addr
)) {
2119 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2124 ret
= cma_acquire_dev(id_priv
);
2125 mutex_unlock(&lock
);
2130 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2131 ret
= cma_get_port(id_priv
);
2137 if (id_priv
->cma_dev
) {
2139 cma_detach_from_dev(id_priv
);
2140 mutex_unlock(&lock
);
2143 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2146 EXPORT_SYMBOL(rdma_bind_addr
);
2148 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2149 struct rdma_route
*route
)
2151 struct cma_hdr
*cma_hdr
;
2152 struct sdp_hh
*sdp_hdr
;
2154 if (route
->addr
.src_addr
.ss_family
== AF_INET
) {
2155 struct sockaddr_in
*src4
, *dst4
;
2157 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2158 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2163 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2165 sdp_set_ip_ver(sdp_hdr
, 4);
2166 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2167 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2168 sdp_hdr
->port
= src4
->sin_port
;
2172 cma_hdr
->cma_version
= CMA_VERSION
;
2173 cma_set_ip_ver(cma_hdr
, 4);
2174 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2175 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2176 cma_hdr
->port
= src4
->sin_port
;
2180 struct sockaddr_in6
*src6
, *dst6
;
2182 src6
= (struct sockaddr_in6
*) &route
->addr
.src_addr
;
2183 dst6
= (struct sockaddr_in6
*) &route
->addr
.dst_addr
;
2188 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2190 sdp_set_ip_ver(sdp_hdr
, 6);
2191 sdp_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2192 sdp_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2193 sdp_hdr
->port
= src6
->sin6_port
;
2197 cma_hdr
->cma_version
= CMA_VERSION
;
2198 cma_set_ip_ver(cma_hdr
, 6);
2199 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2200 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2201 cma_hdr
->port
= src6
->sin6_port
;
2208 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2209 struct ib_cm_event
*ib_event
)
2211 struct rdma_id_private
*id_priv
= cm_id
->context
;
2212 struct rdma_cm_event event
;
2213 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2216 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
2219 memset(&event
, 0, sizeof event
);
2220 switch (ib_event
->event
) {
2221 case IB_CM_SIDR_REQ_ERROR
:
2222 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2223 event
.status
= -ETIMEDOUT
;
2225 case IB_CM_SIDR_REP_RECEIVED
:
2226 event
.param
.ud
.private_data
= ib_event
->private_data
;
2227 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2228 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2229 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2230 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2233 ret
= cma_set_qkey(id_priv
);
2235 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2236 event
.status
= -EINVAL
;
2239 if (id_priv
->qkey
!= rep
->qkey
) {
2240 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2241 event
.status
= -EINVAL
;
2244 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2245 id_priv
->id
.route
.path_rec
,
2246 &event
.param
.ud
.ah_attr
);
2247 event
.param
.ud
.qp_num
= rep
->qpn
;
2248 event
.param
.ud
.qkey
= rep
->qkey
;
2249 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2253 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2258 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2260 /* Destroy the CM ID by returning a non-zero value. */
2261 id_priv
->cm_id
.ib
= NULL
;
2262 cma_exch(id_priv
, CMA_DESTROYING
);
2263 mutex_unlock(&id_priv
->handler_mutex
);
2264 rdma_destroy_id(&id_priv
->id
);
2268 mutex_unlock(&id_priv
->handler_mutex
);
2272 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2273 struct rdma_conn_param
*conn_param
)
2275 struct ib_cm_sidr_req_param req
;
2276 struct rdma_route
*route
;
2279 req
.private_data_len
= sizeof(struct cma_hdr
) +
2280 conn_param
->private_data_len
;
2281 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2282 if (!req
.private_data
)
2285 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2286 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2287 conn_param
->private_data
, conn_param
->private_data_len
);
2289 route
= &id_priv
->id
.route
;
2290 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2294 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2295 cma_sidr_rep_handler
, id_priv
);
2296 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2297 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2301 req
.path
= route
->path_rec
;
2302 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2303 (struct sockaddr
*) &route
->addr
.dst_addr
);
2304 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2305 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2307 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2309 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2310 id_priv
->cm_id
.ib
= NULL
;
2313 kfree(req
.private_data
);
2317 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2318 struct rdma_conn_param
*conn_param
)
2320 struct ib_cm_req_param req
;
2321 struct rdma_route
*route
;
2325 memset(&req
, 0, sizeof req
);
2326 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2327 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2328 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2332 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2333 memcpy(private_data
+ offset
, conn_param
->private_data
,
2334 conn_param
->private_data_len
);
2336 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2338 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2339 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2343 route
= &id_priv
->id
.route
;
2344 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2347 req
.private_data
= private_data
;
2349 req
.primary_path
= &route
->path_rec
[0];
2350 if (route
->num_paths
== 2)
2351 req
.alternate_path
= &route
->path_rec
[1];
2353 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2354 (struct sockaddr
*) &route
->addr
.dst_addr
);
2355 req
.qp_num
= id_priv
->qp_num
;
2356 req
.qp_type
= IB_QPT_RC
;
2357 req
.starting_psn
= id_priv
->seq_num
;
2358 req
.responder_resources
= conn_param
->responder_resources
;
2359 req
.initiator_depth
= conn_param
->initiator_depth
;
2360 req
.flow_control
= conn_param
->flow_control
;
2361 req
.retry_count
= conn_param
->retry_count
;
2362 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2363 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2364 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2365 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2366 req
.srq
= id_priv
->srq
? 1 : 0;
2368 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2370 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2371 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2372 id_priv
->cm_id
.ib
= NULL
;
2375 kfree(private_data
);
2379 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2380 struct rdma_conn_param
*conn_param
)
2382 struct iw_cm_id
*cm_id
;
2383 struct sockaddr_in
* sin
;
2385 struct iw_cm_conn_param iw_param
;
2387 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2388 if (IS_ERR(cm_id
)) {
2389 ret
= PTR_ERR(cm_id
);
2393 id_priv
->cm_id
.iw
= cm_id
;
2395 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2396 cm_id
->local_addr
= *sin
;
2398 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2399 cm_id
->remote_addr
= *sin
;
2401 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2405 iw_param
.ord
= conn_param
->initiator_depth
;
2406 iw_param
.ird
= conn_param
->responder_resources
;
2407 iw_param
.private_data
= conn_param
->private_data
;
2408 iw_param
.private_data_len
= conn_param
->private_data_len
;
2410 iw_param
.qpn
= id_priv
->qp_num
;
2412 iw_param
.qpn
= conn_param
->qp_num
;
2413 ret
= iw_cm_connect(cm_id
, &iw_param
);
2415 if (ret
&& !IS_ERR(cm_id
)) {
2416 iw_destroy_cm_id(cm_id
);
2417 id_priv
->cm_id
.iw
= NULL
;
2422 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2424 struct rdma_id_private
*id_priv
;
2427 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2428 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2432 id_priv
->qp_num
= conn_param
->qp_num
;
2433 id_priv
->srq
= conn_param
->srq
;
2436 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2437 case RDMA_TRANSPORT_IB
:
2438 if (cma_is_ud_ps(id
->ps
))
2439 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2441 ret
= cma_connect_ib(id_priv
, conn_param
);
2443 case RDMA_TRANSPORT_IWARP
:
2444 ret
= cma_connect_iw(id_priv
, conn_param
);
2455 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2458 EXPORT_SYMBOL(rdma_connect
);
2460 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2461 struct rdma_conn_param
*conn_param
)
2463 struct ib_cm_rep_param rep
;
2466 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2470 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2474 memset(&rep
, 0, sizeof rep
);
2475 rep
.qp_num
= id_priv
->qp_num
;
2476 rep
.starting_psn
= id_priv
->seq_num
;
2477 rep
.private_data
= conn_param
->private_data
;
2478 rep
.private_data_len
= conn_param
->private_data_len
;
2479 rep
.responder_resources
= conn_param
->responder_resources
;
2480 rep
.initiator_depth
= conn_param
->initiator_depth
;
2481 rep
.failover_accepted
= 0;
2482 rep
.flow_control
= conn_param
->flow_control
;
2483 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2484 rep
.srq
= id_priv
->srq
? 1 : 0;
2486 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2491 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2492 struct rdma_conn_param
*conn_param
)
2494 struct iw_cm_conn_param iw_param
;
2497 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2501 iw_param
.ord
= conn_param
->initiator_depth
;
2502 iw_param
.ird
= conn_param
->responder_resources
;
2503 iw_param
.private_data
= conn_param
->private_data
;
2504 iw_param
.private_data_len
= conn_param
->private_data_len
;
2505 if (id_priv
->id
.qp
) {
2506 iw_param
.qpn
= id_priv
->qp_num
;
2508 iw_param
.qpn
= conn_param
->qp_num
;
2510 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2513 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2514 enum ib_cm_sidr_status status
,
2515 const void *private_data
, int private_data_len
)
2517 struct ib_cm_sidr_rep_param rep
;
2520 memset(&rep
, 0, sizeof rep
);
2521 rep
.status
= status
;
2522 if (status
== IB_SIDR_SUCCESS
) {
2523 ret
= cma_set_qkey(id_priv
);
2526 rep
.qp_num
= id_priv
->qp_num
;
2527 rep
.qkey
= id_priv
->qkey
;
2529 rep
.private_data
= private_data
;
2530 rep
.private_data_len
= private_data_len
;
2532 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2535 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2537 struct rdma_id_private
*id_priv
;
2540 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2541 if (!cma_comp(id_priv
, CMA_CONNECT
))
2544 if (!id
->qp
&& conn_param
) {
2545 id_priv
->qp_num
= conn_param
->qp_num
;
2546 id_priv
->srq
= conn_param
->srq
;
2549 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2550 case RDMA_TRANSPORT_IB
:
2551 if (cma_is_ud_ps(id
->ps
))
2552 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2553 conn_param
->private_data
,
2554 conn_param
->private_data_len
);
2555 else if (conn_param
)
2556 ret
= cma_accept_ib(id_priv
, conn_param
);
2558 ret
= cma_rep_recv(id_priv
);
2560 case RDMA_TRANSPORT_IWARP
:
2561 ret
= cma_accept_iw(id_priv
, conn_param
);
2573 cma_modify_qp_err(id_priv
);
2574 rdma_reject(id
, NULL
, 0);
2577 EXPORT_SYMBOL(rdma_accept
);
2579 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2581 struct rdma_id_private
*id_priv
;
2584 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2585 if (!cma_has_cm_dev(id_priv
))
2588 switch (id
->device
->node_type
) {
2589 case RDMA_NODE_IB_CA
:
2590 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2598 EXPORT_SYMBOL(rdma_notify
);
2600 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2601 u8 private_data_len
)
2603 struct rdma_id_private
*id_priv
;
2606 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2607 if (!cma_has_cm_dev(id_priv
))
2610 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2611 case RDMA_TRANSPORT_IB
:
2612 if (cma_is_ud_ps(id
->ps
))
2613 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2614 private_data
, private_data_len
);
2616 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2617 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2618 0, private_data
, private_data_len
);
2620 case RDMA_TRANSPORT_IWARP
:
2621 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2622 private_data
, private_data_len
);
2630 EXPORT_SYMBOL(rdma_reject
);
2632 int rdma_disconnect(struct rdma_cm_id
*id
)
2634 struct rdma_id_private
*id_priv
;
2637 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2638 if (!cma_has_cm_dev(id_priv
))
2641 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2642 case RDMA_TRANSPORT_IB
:
2643 ret
= cma_modify_qp_err(id_priv
);
2646 /* Initiate or respond to a disconnect. */
2647 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2648 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2650 case RDMA_TRANSPORT_IWARP
:
2651 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2660 EXPORT_SYMBOL(rdma_disconnect
);
2662 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2664 struct rdma_id_private
*id_priv
;
2665 struct cma_multicast
*mc
= multicast
->context
;
2666 struct rdma_cm_event event
;
2669 id_priv
= mc
->id_priv
;
2670 if (cma_disable_callback(id_priv
, CMA_ADDR_BOUND
) &&
2671 cma_disable_callback(id_priv
, CMA_ADDR_RESOLVED
))
2674 mutex_lock(&id_priv
->qp_mutex
);
2675 if (!status
&& id_priv
->id
.qp
)
2676 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2677 multicast
->rec
.mlid
);
2678 mutex_unlock(&id_priv
->qp_mutex
);
2680 memset(&event
, 0, sizeof event
);
2681 event
.status
= status
;
2682 event
.param
.ud
.private_data
= mc
->context
;
2684 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2685 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2686 id_priv
->id
.port_num
, &multicast
->rec
,
2687 &event
.param
.ud
.ah_attr
);
2688 event
.param
.ud
.qp_num
= 0xFFFFFF;
2689 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2691 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2693 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2695 cma_exch(id_priv
, CMA_DESTROYING
);
2696 mutex_unlock(&id_priv
->handler_mutex
);
2697 rdma_destroy_id(&id_priv
->id
);
2701 mutex_unlock(&id_priv
->handler_mutex
);
2705 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2706 struct sockaddr
*addr
, union ib_gid
*mgid
)
2708 unsigned char mc_map
[MAX_ADDR_LEN
];
2709 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2710 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2711 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2713 if (cma_any_addr(addr
)) {
2714 memset(mgid
, 0, sizeof *mgid
);
2715 } else if ((addr
->sa_family
== AF_INET6
) &&
2716 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
2718 /* IPv6 address is an SA assigned MGID. */
2719 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2720 } else if ((addr
->sa_family
== AF_INET6
)) {
2721 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
2722 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2723 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2724 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2726 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2727 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2728 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2729 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2733 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2734 struct cma_multicast
*mc
)
2736 struct ib_sa_mcmember_rec rec
;
2737 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2738 ib_sa_comp_mask comp_mask
;
2741 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2742 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2747 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
2748 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2749 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2750 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2751 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2754 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2755 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2756 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2757 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2758 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2760 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
2761 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
2762 IB_SA_MCMEMBER_REC_RATE_SELECTOR
;
2764 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2765 id_priv
->id
.port_num
, &rec
,
2766 comp_mask
, GFP_KERNEL
,
2767 cma_ib_mc_handler
, mc
);
2768 if (IS_ERR(mc
->multicast
.ib
))
2769 return PTR_ERR(mc
->multicast
.ib
);
2774 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2777 struct rdma_id_private
*id_priv
;
2778 struct cma_multicast
*mc
;
2781 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2782 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2783 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2786 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2790 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2791 mc
->context
= context
;
2792 mc
->id_priv
= id_priv
;
2794 spin_lock(&id_priv
->lock
);
2795 list_add(&mc
->list
, &id_priv
->mc_list
);
2796 spin_unlock(&id_priv
->lock
);
2798 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2799 case RDMA_TRANSPORT_IB
:
2800 ret
= cma_join_ib_multicast(id_priv
, mc
);
2808 spin_lock_irq(&id_priv
->lock
);
2809 list_del(&mc
->list
);
2810 spin_unlock_irq(&id_priv
->lock
);
2815 EXPORT_SYMBOL(rdma_join_multicast
);
2817 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2819 struct rdma_id_private
*id_priv
;
2820 struct cma_multicast
*mc
;
2822 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2823 spin_lock_irq(&id_priv
->lock
);
2824 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2825 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2826 list_del(&mc
->list
);
2827 spin_unlock_irq(&id_priv
->lock
);
2830 ib_detach_mcast(id
->qp
,
2831 &mc
->multicast
.ib
->rec
.mgid
,
2832 mc
->multicast
.ib
->rec
.mlid
);
2833 ib_sa_free_multicast(mc
->multicast
.ib
);
2838 spin_unlock_irq(&id_priv
->lock
);
2840 EXPORT_SYMBOL(rdma_leave_multicast
);
2842 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
2844 struct rdma_dev_addr
*dev_addr
;
2845 struct cma_ndev_work
*work
;
2847 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2849 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
2850 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
2851 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
2852 ndev
->name
, &id_priv
->id
);
2853 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2857 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
2859 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
2860 atomic_inc(&id_priv
->refcount
);
2861 queue_work(cma_wq
, &work
->work
);
2867 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
2870 struct net_device
*ndev
= (struct net_device
*)ctx
;
2871 struct cma_device
*cma_dev
;
2872 struct rdma_id_private
*id_priv
;
2873 int ret
= NOTIFY_DONE
;
2875 if (dev_net(ndev
) != &init_net
)
2878 if (event
!= NETDEV_BONDING_FAILOVER
)
2881 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
2885 list_for_each_entry(cma_dev
, &dev_list
, list
)
2886 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
2887 ret
= cma_netdev_change(ndev
, id_priv
);
2893 mutex_unlock(&lock
);
2897 static struct notifier_block cma_nb
= {
2898 .notifier_call
= cma_netdev_callback
2901 static void cma_add_one(struct ib_device
*device
)
2903 struct cma_device
*cma_dev
;
2904 struct rdma_id_private
*id_priv
;
2906 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2910 cma_dev
->device
= device
;
2912 init_completion(&cma_dev
->comp
);
2913 atomic_set(&cma_dev
->refcount
, 1);
2914 INIT_LIST_HEAD(&cma_dev
->id_list
);
2915 ib_set_client_data(device
, &cma_client
, cma_dev
);
2918 list_add_tail(&cma_dev
->list
, &dev_list
);
2919 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2920 cma_listen_on_dev(id_priv
, cma_dev
);
2921 mutex_unlock(&lock
);
2924 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2926 struct rdma_cm_event event
;
2927 enum cma_state state
;
2930 /* Record that we want to remove the device */
2931 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2932 if (state
== CMA_DESTROYING
)
2935 cma_cancel_operation(id_priv
, state
);
2936 mutex_lock(&id_priv
->handler_mutex
);
2938 /* Check for destruction from another callback. */
2939 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2942 memset(&event
, 0, sizeof event
);
2943 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2944 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2946 mutex_unlock(&id_priv
->handler_mutex
);
2950 static void cma_process_remove(struct cma_device
*cma_dev
)
2952 struct rdma_id_private
*id_priv
;
2956 while (!list_empty(&cma_dev
->id_list
)) {
2957 id_priv
= list_entry(cma_dev
->id_list
.next
,
2958 struct rdma_id_private
, list
);
2960 list_del(&id_priv
->listen_list
);
2961 list_del_init(&id_priv
->list
);
2962 atomic_inc(&id_priv
->refcount
);
2963 mutex_unlock(&lock
);
2965 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
2966 cma_deref_id(id_priv
);
2968 rdma_destroy_id(&id_priv
->id
);
2972 mutex_unlock(&lock
);
2974 cma_deref_dev(cma_dev
);
2975 wait_for_completion(&cma_dev
->comp
);
2978 static void cma_remove_one(struct ib_device
*device
)
2980 struct cma_device
*cma_dev
;
2982 cma_dev
= ib_get_client_data(device
, &cma_client
);
2987 list_del(&cma_dev
->list
);
2988 mutex_unlock(&lock
);
2990 cma_process_remove(cma_dev
);
2994 static int __init
cma_init(void)
2996 int ret
, low
, high
, remaining
;
2998 get_random_bytes(&next_port
, sizeof next_port
);
2999 inet_get_local_port_range(&low
, &high
);
3000 remaining
= (high
- low
) + 1;
3001 next_port
= ((unsigned int) next_port
% remaining
) + low
;
3003 cma_wq
= create_singlethread_workqueue("rdma_cm");
3007 ib_sa_register_client(&sa_client
);
3008 rdma_addr_register_client(&addr_client
);
3009 register_netdevice_notifier(&cma_nb
);
3011 ret
= ib_register_client(&cma_client
);
3017 unregister_netdevice_notifier(&cma_nb
);
3018 rdma_addr_unregister_client(&addr_client
);
3019 ib_sa_unregister_client(&sa_client
);
3020 destroy_workqueue(cma_wq
);
3024 static void __exit
cma_cleanup(void)
3026 ib_unregister_client(&cma_client
);
3027 unregister_netdevice_notifier(&cma_nb
);
3028 rdma_addr_unregister_client(&addr_client
);
3029 ib_sa_unregister_client(&sa_client
);
3030 destroy_workqueue(cma_wq
);
3031 idr_destroy(&sdp_ps
);
3032 idr_destroy(&tcp_ps
);
3033 idr_destroy(&udp_ps
);
3034 idr_destroy(&ipoib_ps
);
3037 module_init(cma_init
);
3038 module_exit(cma_cleanup
);