2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/igmp.h>
42 #include <linux/idr.h>
43 #include <linux/inetdevice.h>
44 #include <linux/slab.h>
45 #include <linux/module.h>
46 #include <net/route.h>
48 #include <net/net_namespace.h>
49 #include <net/netns/generic.h>
52 #include <net/ip_fib.h>
53 #include <net/ip6_route.h>
55 #include <rdma/rdma_cm.h>
56 #include <rdma/rdma_cm_ib.h>
57 #include <rdma/rdma_netlink.h>
59 #include <rdma/ib_cache.h>
60 #include <rdma/ib_cm.h>
61 #include <rdma/ib_sa.h>
62 #include <rdma/iw_cm.h>
64 #include "core_priv.h"
66 MODULE_AUTHOR("Sean Hefty");
67 MODULE_DESCRIPTION("Generic RDMA CM Agent");
68 MODULE_LICENSE("Dual BSD/GPL");
70 #define CMA_CM_RESPONSE_TIMEOUT 20
71 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
72 #define CMA_MAX_CM_RETRIES 15
73 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
74 #define CMA_IBOE_PACKET_LIFETIME 18
76 static const char * const cma_events
[] = {
77 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
78 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
79 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
80 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
81 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
82 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
83 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
84 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
85 [RDMA_CM_EVENT_REJECTED
] = "rejected",
86 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
87 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
88 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
89 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
90 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
91 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
92 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
95 const char *__attribute_const__
rdma_event_msg(enum rdma_cm_event_type event
)
99 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
100 cma_events
[index
] : "unrecognized event";
102 EXPORT_SYMBOL(rdma_event_msg
);
104 const char *__attribute_const__
rdma_reject_msg(struct rdma_cm_id
*id
,
107 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
108 return ibcm_reject_msg(reason
);
110 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
111 return iwcm_reject_msg(reason
);
114 return "unrecognized transport";
116 EXPORT_SYMBOL(rdma_reject_msg
);
118 bool rdma_is_consumer_reject(struct rdma_cm_id
*id
, int reason
)
120 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
121 return reason
== IB_CM_REJ_CONSUMER_DEFINED
;
123 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
124 return reason
== -ECONNREFUSED
;
129 EXPORT_SYMBOL(rdma_is_consumer_reject
);
131 const void *rdma_consumer_reject_data(struct rdma_cm_id
*id
,
132 struct rdma_cm_event
*ev
, u8
*data_len
)
136 if (rdma_is_consumer_reject(id
, ev
->status
)) {
137 *data_len
= ev
->param
.conn
.private_data_len
;
138 p
= ev
->param
.conn
.private_data
;
145 EXPORT_SYMBOL(rdma_consumer_reject_data
);
147 static void cma_add_one(struct ib_device
*device
);
148 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
150 static struct ib_client cma_client
= {
153 .remove
= cma_remove_one
156 static struct ib_sa_client sa_client
;
157 static struct rdma_addr_client addr_client
;
158 static LIST_HEAD(dev_list
);
159 static LIST_HEAD(listen_any_list
);
160 static DEFINE_MUTEX(lock
);
161 static struct workqueue_struct
*cma_wq
;
162 static unsigned int cma_pernet_id
;
171 static struct cma_pernet
*cma_pernet(struct net
*net
)
173 return net_generic(net
, cma_pernet_id
);
176 static struct idr
*cma_pernet_idr(struct net
*net
, enum rdma_port_space ps
)
178 struct cma_pernet
*pernet
= cma_pernet(net
);
182 return &pernet
->tcp_ps
;
184 return &pernet
->udp_ps
;
186 return &pernet
->ipoib_ps
;
188 return &pernet
->ib_ps
;
195 struct list_head list
;
196 struct ib_device
*device
;
197 struct completion comp
;
199 struct list_head id_list
;
200 enum ib_gid_type
*default_gid_type
;
201 u8
*default_roce_tos
;
204 struct rdma_bind_list
{
205 enum rdma_port_space ps
;
206 struct hlist_head owners
;
210 struct class_port_info_context
{
211 struct ib_class_port_info
*class_port_info
;
212 struct ib_device
*device
;
213 struct completion done
;
214 struct ib_sa_query
*sa_query
;
218 static int cma_ps_alloc(struct net
*net
, enum rdma_port_space ps
,
219 struct rdma_bind_list
*bind_list
, int snum
)
221 struct idr
*idr
= cma_pernet_idr(net
, ps
);
223 return idr_alloc(idr
, bind_list
, snum
, snum
+ 1, GFP_KERNEL
);
226 static struct rdma_bind_list
*cma_ps_find(struct net
*net
,
227 enum rdma_port_space ps
, int snum
)
229 struct idr
*idr
= cma_pernet_idr(net
, ps
);
231 return idr_find(idr
, snum
);
234 static void cma_ps_remove(struct net
*net
, enum rdma_port_space ps
, int snum
)
236 struct idr
*idr
= cma_pernet_idr(net
, ps
);
238 idr_remove(idr
, snum
);
245 void cma_ref_dev(struct cma_device
*cma_dev
)
247 atomic_inc(&cma_dev
->refcount
);
250 struct cma_device
*cma_enum_devices_by_ibdev(cma_device_filter filter
,
253 struct cma_device
*cma_dev
;
254 struct cma_device
*found_cma_dev
= NULL
;
258 list_for_each_entry(cma_dev
, &dev_list
, list
)
259 if (filter(cma_dev
->device
, cookie
)) {
260 found_cma_dev
= cma_dev
;
265 cma_ref_dev(found_cma_dev
);
267 return found_cma_dev
;
270 int cma_get_default_gid_type(struct cma_device
*cma_dev
,
273 if (!rdma_is_port_valid(cma_dev
->device
, port
))
276 return cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)];
279 int cma_set_default_gid_type(struct cma_device
*cma_dev
,
281 enum ib_gid_type default_gid_type
)
283 unsigned long supported_gids
;
285 if (!rdma_is_port_valid(cma_dev
->device
, port
))
288 supported_gids
= roce_gid_type_mask_support(cma_dev
->device
, port
);
290 if (!(supported_gids
& 1 << default_gid_type
))
293 cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)] =
299 int cma_get_default_roce_tos(struct cma_device
*cma_dev
, unsigned int port
)
301 if (!rdma_is_port_valid(cma_dev
->device
, port
))
304 return cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)];
307 int cma_set_default_roce_tos(struct cma_device
*cma_dev
, unsigned int port
,
310 if (!rdma_is_port_valid(cma_dev
->device
, port
))
313 cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)] =
318 struct ib_device
*cma_get_ib_dev(struct cma_device
*cma_dev
)
320 return cma_dev
->device
;
324 * Device removal can occur at anytime, so we need extra handling to
325 * serialize notifying the user of device removal with other callbacks.
326 * We do this by disabling removal notification while a callback is in process,
327 * and reporting it after the callback completes.
329 struct rdma_id_private
{
330 struct rdma_cm_id id
;
332 struct rdma_bind_list
*bind_list
;
333 struct hlist_node node
;
334 struct list_head list
; /* listen_any_list or cma_device.list */
335 struct list_head listen_list
; /* per device listens */
336 struct cma_device
*cma_dev
;
337 struct list_head mc_list
;
340 enum rdma_cm_state state
;
342 struct mutex qp_mutex
;
344 struct completion comp
;
346 struct mutex handler_mutex
;
350 struct ib_sa_query
*query
;
367 enum ib_gid_type gid_type
;
370 struct cma_multicast
{
371 struct rdma_id_private
*id_priv
;
373 struct ib_sa_multicast
*ib
;
375 struct list_head list
;
377 struct sockaddr_storage addr
;
384 struct work_struct work
;
385 struct rdma_id_private
*id
;
386 enum rdma_cm_state old_state
;
387 enum rdma_cm_state new_state
;
388 struct rdma_cm_event event
;
391 struct cma_ndev_work
{
392 struct work_struct work
;
393 struct rdma_id_private
*id
;
394 struct rdma_cm_event event
;
397 struct iboe_mcast_work
{
398 struct work_struct work
;
399 struct rdma_id_private
*id
;
400 struct cma_multicast
*mc
;
413 u8 ip_version
; /* IP version: 7:4 */
415 union cma_ip_addr src_addr
;
416 union cma_ip_addr dst_addr
;
419 #define CMA_VERSION 0x00
421 struct cma_req_info
{
422 struct ib_device
*device
;
424 union ib_gid local_gid
;
430 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
435 spin_lock_irqsave(&id_priv
->lock
, flags
);
436 ret
= (id_priv
->state
== comp
);
437 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
441 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
442 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
447 spin_lock_irqsave(&id_priv
->lock
, flags
);
448 if ((ret
= (id_priv
->state
== comp
)))
449 id_priv
->state
= exch
;
450 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
454 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
455 enum rdma_cm_state exch
)
458 enum rdma_cm_state old
;
460 spin_lock_irqsave(&id_priv
->lock
, flags
);
461 old
= id_priv
->state
;
462 id_priv
->state
= exch
;
463 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
467 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
469 return hdr
->ip_version
>> 4;
472 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
474 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
477 static int cma_igmp_send(struct net_device
*ndev
, union ib_gid
*mgid
, bool join
)
479 struct in_device
*in_dev
= NULL
;
483 in_dev
= __in_dev_get_rtnl(ndev
);
486 ip_mc_inc_group(in_dev
,
487 *(__be32
*)(mgid
->raw
+ 12));
489 ip_mc_dec_group(in_dev
,
490 *(__be32
*)(mgid
->raw
+ 12));
494 return (in_dev
) ? 0 : -ENODEV
;
497 static void _cma_attach_to_dev(struct rdma_id_private
*id_priv
,
498 struct cma_device
*cma_dev
)
500 cma_ref_dev(cma_dev
);
501 id_priv
->cma_dev
= cma_dev
;
502 id_priv
->gid_type
= 0;
503 id_priv
->id
.device
= cma_dev
->device
;
504 id_priv
->id
.route
.addr
.dev_addr
.transport
=
505 rdma_node_get_transport(cma_dev
->device
->node_type
);
506 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
509 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
510 struct cma_device
*cma_dev
)
512 _cma_attach_to_dev(id_priv
, cma_dev
);
514 cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
515 rdma_start_port(cma_dev
->device
)];
518 void cma_deref_dev(struct cma_device
*cma_dev
)
520 if (atomic_dec_and_test(&cma_dev
->refcount
))
521 complete(&cma_dev
->comp
);
524 static inline void release_mc(struct kref
*kref
)
526 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
528 kfree(mc
->multicast
.ib
);
532 static void cma_release_dev(struct rdma_id_private
*id_priv
)
535 list_del(&id_priv
->list
);
536 cma_deref_dev(id_priv
->cma_dev
);
537 id_priv
->cma_dev
= NULL
;
541 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
543 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
546 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
548 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
551 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
553 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
556 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
558 struct ib_sa_mcmember_rec rec
;
562 if (qkey
&& id_priv
->qkey
!= qkey
)
568 id_priv
->qkey
= qkey
;
572 switch (id_priv
->id
.ps
) {
575 id_priv
->qkey
= RDMA_UDP_QKEY
;
578 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
579 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
580 id_priv
->id
.port_num
, &rec
.mgid
,
583 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
591 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
593 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
594 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
595 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
598 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
602 if (addr
->sa_family
!= AF_IB
) {
603 ret
= rdma_translate_ip(addr
, dev_addr
, NULL
);
605 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
612 static inline int cma_validate_port(struct ib_device
*device
, u8 port
,
613 enum ib_gid_type gid_type
,
614 union ib_gid
*gid
, int dev_type
,
618 struct net_device
*ndev
= NULL
;
620 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
623 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
626 if (dev_type
== ARPHRD_ETHER
&& rdma_protocol_roce(device
, port
)) {
627 ndev
= dev_get_by_index(&init_net
, bound_if_index
);
628 if (ndev
&& ndev
->flags
& IFF_LOOPBACK
) {
629 pr_info("detected loopback device\n");
632 if (!device
->get_netdev
)
635 ndev
= device
->get_netdev(device
, port
);
640 gid_type
= IB_GID_TYPE_IB
;
643 ret
= ib_find_cached_gid_by_port(device
, gid
, gid_type
, port
,
652 static int cma_acquire_dev(struct rdma_id_private
*id_priv
,
653 struct rdma_id_private
*listen_id_priv
)
655 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
656 struct cma_device
*cma_dev
;
657 union ib_gid gid
, iboe_gid
, *gidp
;
661 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
662 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
666 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
669 memcpy(&gid
, dev_addr
->src_dev_addr
+
670 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
672 if (listen_id_priv
) {
673 cma_dev
= listen_id_priv
->cma_dev
;
674 port
= listen_id_priv
->id
.port_num
;
675 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
678 ret
= cma_validate_port(cma_dev
->device
, port
,
679 rdma_protocol_ib(cma_dev
->device
, port
) ?
681 listen_id_priv
->gid_type
, gidp
,
683 dev_addr
->bound_dev_if
);
685 id_priv
->id
.port_num
= port
;
690 list_for_each_entry(cma_dev
, &dev_list
, list
) {
691 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
692 if (listen_id_priv
&&
693 listen_id_priv
->cma_dev
== cma_dev
&&
694 listen_id_priv
->id
.port_num
== port
)
697 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
700 ret
= cma_validate_port(cma_dev
->device
, port
,
701 rdma_protocol_ib(cma_dev
->device
, port
) ?
703 cma_dev
->default_gid_type
[port
- 1],
704 gidp
, dev_addr
->dev_type
,
705 dev_addr
->bound_dev_if
);
707 id_priv
->id
.port_num
= port
;
715 cma_attach_to_dev(id_priv
, cma_dev
);
722 * Select the source IB device and address to reach the destination IB address.
724 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
726 struct cma_device
*cma_dev
, *cur_dev
;
727 struct sockaddr_ib
*addr
;
728 union ib_gid gid
, sgid
, *dgid
;
731 enum ib_port_state port_state
;
735 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
736 dgid
= (union ib_gid
*) &addr
->sib_addr
;
737 pkey
= ntohs(addr
->sib_pkey
);
739 list_for_each_entry(cur_dev
, &dev_list
, list
) {
740 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
741 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
744 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
747 if (ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
))
749 for (i
= 0; !ib_get_cached_gid(cur_dev
->device
, p
, i
,
752 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
755 id_priv
->id
.port_num
= p
;
759 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
760 dgid
->global
.subnet_prefix
) &&
761 port_state
== IB_PORT_ACTIVE
) {
764 id_priv
->id
.port_num
= p
;
774 cma_attach_to_dev(id_priv
, cma_dev
);
775 addr
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
776 memcpy(&addr
->sib_addr
, &sgid
, sizeof sgid
);
777 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
781 static void cma_deref_id(struct rdma_id_private
*id_priv
)
783 if (atomic_dec_and_test(&id_priv
->refcount
))
784 complete(&id_priv
->comp
);
787 struct rdma_cm_id
*rdma_create_id(struct net
*net
,
788 rdma_cm_event_handler event_handler
,
789 void *context
, enum rdma_port_space ps
,
790 enum ib_qp_type qp_type
)
792 struct rdma_id_private
*id_priv
;
794 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
796 return ERR_PTR(-ENOMEM
);
798 id_priv
->owner
= task_pid_nr(current
);
799 id_priv
->state
= RDMA_CM_IDLE
;
800 id_priv
->id
.context
= context
;
801 id_priv
->id
.event_handler
= event_handler
;
803 id_priv
->id
.qp_type
= qp_type
;
804 id_priv
->tos_set
= false;
805 spin_lock_init(&id_priv
->lock
);
806 mutex_init(&id_priv
->qp_mutex
);
807 init_completion(&id_priv
->comp
);
808 atomic_set(&id_priv
->refcount
, 1);
809 mutex_init(&id_priv
->handler_mutex
);
810 INIT_LIST_HEAD(&id_priv
->listen_list
);
811 INIT_LIST_HEAD(&id_priv
->mc_list
);
812 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
813 id_priv
->id
.route
.addr
.dev_addr
.net
= get_net(net
);
817 EXPORT_SYMBOL(rdma_create_id
);
819 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
821 struct ib_qp_attr qp_attr
;
822 int qp_attr_mask
, ret
;
824 qp_attr
.qp_state
= IB_QPS_INIT
;
825 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
829 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
833 qp_attr
.qp_state
= IB_QPS_RTR
;
834 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
838 qp_attr
.qp_state
= IB_QPS_RTS
;
840 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
845 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
847 struct ib_qp_attr qp_attr
;
848 int qp_attr_mask
, ret
;
850 qp_attr
.qp_state
= IB_QPS_INIT
;
851 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
855 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
858 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
859 struct ib_qp_init_attr
*qp_init_attr
)
861 struct rdma_id_private
*id_priv
;
865 id_priv
= container_of(id
, struct rdma_id_private
, id
);
866 if (id
->device
!= pd
->device
)
869 qp_init_attr
->port_num
= id
->port_num
;
870 qp
= ib_create_qp(pd
, qp_init_attr
);
874 if (id
->qp_type
== IB_QPT_UD
)
875 ret
= cma_init_ud_qp(id_priv
, qp
);
877 ret
= cma_init_conn_qp(id_priv
, qp
);
882 id_priv
->qp_num
= qp
->qp_num
;
883 id_priv
->srq
= (qp
->srq
!= NULL
);
889 EXPORT_SYMBOL(rdma_create_qp
);
891 void rdma_destroy_qp(struct rdma_cm_id
*id
)
893 struct rdma_id_private
*id_priv
;
895 id_priv
= container_of(id
, struct rdma_id_private
, id
);
896 mutex_lock(&id_priv
->qp_mutex
);
897 ib_destroy_qp(id_priv
->id
.qp
);
898 id_priv
->id
.qp
= NULL
;
899 mutex_unlock(&id_priv
->qp_mutex
);
901 EXPORT_SYMBOL(rdma_destroy_qp
);
903 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
904 struct rdma_conn_param
*conn_param
)
906 struct ib_qp_attr qp_attr
;
907 int qp_attr_mask
, ret
;
910 mutex_lock(&id_priv
->qp_mutex
);
911 if (!id_priv
->id
.qp
) {
916 /* Need to update QP attributes from default values. */
917 qp_attr
.qp_state
= IB_QPS_INIT
;
918 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
922 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
926 qp_attr
.qp_state
= IB_QPS_RTR
;
927 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
931 ret
= ib_query_gid(id_priv
->id
.device
, id_priv
->id
.port_num
,
932 rdma_ah_read_grh(&qp_attr
.ah_attr
)->sgid_index
,
937 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
940 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
941 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
943 mutex_unlock(&id_priv
->qp_mutex
);
947 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
948 struct rdma_conn_param
*conn_param
)
950 struct ib_qp_attr qp_attr
;
951 int qp_attr_mask
, ret
;
953 mutex_lock(&id_priv
->qp_mutex
);
954 if (!id_priv
->id
.qp
) {
959 qp_attr
.qp_state
= IB_QPS_RTS
;
960 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
965 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
966 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
968 mutex_unlock(&id_priv
->qp_mutex
);
972 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
974 struct ib_qp_attr qp_attr
;
977 mutex_lock(&id_priv
->qp_mutex
);
978 if (!id_priv
->id
.qp
) {
983 qp_attr
.qp_state
= IB_QPS_ERR
;
984 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
986 mutex_unlock(&id_priv
->qp_mutex
);
990 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
991 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
993 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
997 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
1000 pkey
= ib_addr_get_pkey(dev_addr
);
1002 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
1003 pkey
, &qp_attr
->pkey_index
);
1007 qp_attr
->port_num
= id_priv
->id
.port_num
;
1008 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
1010 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
1011 ret
= cma_set_qkey(id_priv
, 0);
1015 qp_attr
->qkey
= id_priv
->qkey
;
1016 *qp_attr_mask
|= IB_QP_QKEY
;
1018 qp_attr
->qp_access_flags
= 0;
1019 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
1024 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
1027 struct rdma_id_private
*id_priv
;
1030 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1031 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
1032 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
1033 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
1035 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
1038 if (qp_attr
->qp_state
== IB_QPS_RTR
)
1039 qp_attr
->rq_psn
= id_priv
->seq_num
;
1040 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
1041 if (!id_priv
->cm_id
.iw
) {
1042 qp_attr
->qp_access_flags
= 0;
1043 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
1045 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
1052 EXPORT_SYMBOL(rdma_init_qp_attr
);
1054 static inline int cma_zero_addr(struct sockaddr
*addr
)
1056 switch (addr
->sa_family
) {
1058 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
1060 return ipv6_addr_any(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
1062 return ib_addr_any(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1068 static inline int cma_loopback_addr(struct sockaddr
*addr
)
1070 switch (addr
->sa_family
) {
1072 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
1074 return ipv6_addr_loopback(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
1076 return ib_addr_loopback(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1082 static inline int cma_any_addr(struct sockaddr
*addr
)
1084 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
1087 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
1089 if (src
->sa_family
!= dst
->sa_family
)
1092 switch (src
->sa_family
) {
1094 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
1095 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1097 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1098 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1100 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
1101 &((struct sockaddr_ib
*) dst
)->sib_addr
);
1105 static __be16
cma_port(struct sockaddr
*addr
)
1107 struct sockaddr_ib
*sib
;
1109 switch (addr
->sa_family
) {
1111 return ((struct sockaddr_in
*) addr
)->sin_port
;
1113 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
1115 sib
= (struct sockaddr_ib
*) addr
;
1116 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
1117 be64_to_cpu(sib
->sib_sid_mask
)));
1123 static inline int cma_any_port(struct sockaddr
*addr
)
1125 return !cma_port(addr
);
1128 static void cma_save_ib_info(struct sockaddr
*src_addr
,
1129 struct sockaddr
*dst_addr
,
1130 struct rdma_cm_id
*listen_id
,
1131 struct sa_path_rec
*path
)
1133 struct sockaddr_ib
*listen_ib
, *ib
;
1135 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
1137 ib
= (struct sockaddr_ib
*)src_addr
;
1138 ib
->sib_family
= AF_IB
;
1140 ib
->sib_pkey
= path
->pkey
;
1141 ib
->sib_flowinfo
= path
->flow_label
;
1142 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
1143 ib
->sib_sid
= sa_path_get_service_id(path
);
1144 ib
->sib_scope_id
= 0;
1146 ib
->sib_pkey
= listen_ib
->sib_pkey
;
1147 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
1148 ib
->sib_addr
= listen_ib
->sib_addr
;
1149 ib
->sib_sid
= listen_ib
->sib_sid
;
1150 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
1152 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
1155 ib
= (struct sockaddr_ib
*)dst_addr
;
1156 ib
->sib_family
= AF_IB
;
1158 ib
->sib_pkey
= path
->pkey
;
1159 ib
->sib_flowinfo
= path
->flow_label
;
1160 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
1165 static void cma_save_ip4_info(struct sockaddr_in
*src_addr
,
1166 struct sockaddr_in
*dst_addr
,
1167 struct cma_hdr
*hdr
,
1171 *src_addr
= (struct sockaddr_in
) {
1172 .sin_family
= AF_INET
,
1173 .sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
,
1174 .sin_port
= local_port
,
1179 *dst_addr
= (struct sockaddr_in
) {
1180 .sin_family
= AF_INET
,
1181 .sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
,
1182 .sin_port
= hdr
->port
,
1187 static void cma_save_ip6_info(struct sockaddr_in6
*src_addr
,
1188 struct sockaddr_in6
*dst_addr
,
1189 struct cma_hdr
*hdr
,
1193 *src_addr
= (struct sockaddr_in6
) {
1194 .sin6_family
= AF_INET6
,
1195 .sin6_addr
= hdr
->dst_addr
.ip6
,
1196 .sin6_port
= local_port
,
1201 *dst_addr
= (struct sockaddr_in6
) {
1202 .sin6_family
= AF_INET6
,
1203 .sin6_addr
= hdr
->src_addr
.ip6
,
1204 .sin6_port
= hdr
->port
,
1209 static u16
cma_port_from_service_id(__be64 service_id
)
1211 return (u16
)be64_to_cpu(service_id
);
1214 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1215 struct sockaddr
*dst_addr
,
1216 struct ib_cm_event
*ib_event
,
1219 struct cma_hdr
*hdr
;
1222 hdr
= ib_event
->private_data
;
1223 if (hdr
->cma_version
!= CMA_VERSION
)
1226 port
= htons(cma_port_from_service_id(service_id
));
1228 switch (cma_get_ip_ver(hdr
)) {
1230 cma_save_ip4_info((struct sockaddr_in
*)src_addr
,
1231 (struct sockaddr_in
*)dst_addr
, hdr
, port
);
1234 cma_save_ip6_info((struct sockaddr_in6
*)src_addr
,
1235 (struct sockaddr_in6
*)dst_addr
, hdr
, port
);
1238 return -EAFNOSUPPORT
;
1244 static int cma_save_net_info(struct sockaddr
*src_addr
,
1245 struct sockaddr
*dst_addr
,
1246 struct rdma_cm_id
*listen_id
,
1247 struct ib_cm_event
*ib_event
,
1248 sa_family_t sa_family
, __be64 service_id
)
1250 if (sa_family
== AF_IB
) {
1251 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1252 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1253 ib_event
->param
.req_rcvd
.primary_path
);
1254 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1255 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1259 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1262 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1263 struct cma_req_info
*req
)
1265 const struct ib_cm_req_event_param
*req_param
=
1266 &ib_event
->param
.req_rcvd
;
1267 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1268 &ib_event
->param
.sidr_req_rcvd
;
1270 switch (ib_event
->event
) {
1271 case IB_CM_REQ_RECEIVED
:
1272 req
->device
= req_param
->listen_id
->device
;
1273 req
->port
= req_param
->port
;
1274 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1275 sizeof(req
->local_gid
));
1276 req
->has_gid
= true;
1278 sa_path_get_service_id(req_param
->primary_path
);
1279 req
->pkey
= be16_to_cpu(req_param
->primary_path
->pkey
);
1280 if (req
->pkey
!= req_param
->bth_pkey
)
1281 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1282 "RDMA CMA: in the future this may cause the request to be dropped\n",
1283 req_param
->bth_pkey
, req
->pkey
);
1285 case IB_CM_SIDR_REQ_RECEIVED
:
1286 req
->device
= sidr_param
->listen_id
->device
;
1287 req
->port
= sidr_param
->port
;
1288 req
->has_gid
= false;
1289 req
->service_id
= sidr_param
->service_id
;
1290 req
->pkey
= sidr_param
->pkey
;
1291 if (req
->pkey
!= sidr_param
->bth_pkey
)
1292 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1293 "RDMA CMA: in the future this may cause the request to be dropped\n",
1294 sidr_param
->bth_pkey
, req
->pkey
);
1303 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1304 const struct sockaddr_in
*dst_addr
,
1305 const struct sockaddr_in
*src_addr
)
1307 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1308 saddr
= src_addr
->sin_addr
.s_addr
;
1309 struct fib_result res
;
1314 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1315 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1316 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1317 ipv4_is_loopback(saddr
))
1320 memset(&fl4
, 0, sizeof(fl4
));
1321 fl4
.flowi4_iif
= net_dev
->ifindex
;
1326 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1327 ret
= err
== 0 && FIB_RES_DEV(res
) == net_dev
;
1333 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1334 const struct sockaddr_in6
*dst_addr
,
1335 const struct sockaddr_in6
*src_addr
)
1337 #if IS_ENABLED(CONFIG_IPV6)
1338 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1339 IPV6_ADDR_LINKLOCAL
;
1340 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1341 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1348 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1357 static bool validate_net_dev(struct net_device
*net_dev
,
1358 const struct sockaddr
*daddr
,
1359 const struct sockaddr
*saddr
)
1361 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1362 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1363 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1364 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1366 switch (daddr
->sa_family
) {
1368 return saddr
->sa_family
== AF_INET
&&
1369 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1372 return saddr
->sa_family
== AF_INET6
&&
1373 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1380 static struct net_device
*cma_get_net_dev(struct ib_cm_event
*ib_event
,
1381 const struct cma_req_info
*req
)
1383 struct sockaddr_storage listen_addr_storage
, src_addr_storage
;
1384 struct sockaddr
*listen_addr
= (struct sockaddr
*)&listen_addr_storage
,
1385 *src_addr
= (struct sockaddr
*)&src_addr_storage
;
1386 struct net_device
*net_dev
;
1387 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1390 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1393 return ERR_PTR(err
);
1395 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
, req
->pkey
,
1398 return ERR_PTR(-ENODEV
);
1400 if (!validate_net_dev(net_dev
, listen_addr
, src_addr
)) {
1402 return ERR_PTR(-EHOSTUNREACH
);
1408 static enum rdma_port_space
rdma_ps_from_service_id(__be64 service_id
)
1410 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1413 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1414 const struct cma_hdr
*hdr
)
1416 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1418 struct in6_addr ip6_addr
;
1420 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1423 switch (addr
->sa_family
) {
1425 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1426 if (cma_get_ip_ver(hdr
) != 4)
1428 if (!cma_any_addr(addr
) &&
1429 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1433 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1434 if (cma_get_ip_ver(hdr
) != 6)
1436 if (!cma_any_addr(addr
) &&
1437 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1449 static bool cma_protocol_roce_dev_port(struct ib_device
*device
, int port_num
)
1451 enum rdma_link_layer ll
= rdma_port_get_link_layer(device
, port_num
);
1452 enum rdma_transport_type transport
=
1453 rdma_node_get_transport(device
->node_type
);
1455 return ll
== IB_LINK_LAYER_ETHERNET
&& transport
== RDMA_TRANSPORT_IB
;
1458 static bool cma_protocol_roce(const struct rdma_cm_id
*id
)
1460 struct ib_device
*device
= id
->device
;
1461 const int port_num
= id
->port_num
?: rdma_start_port(device
);
1463 return cma_protocol_roce_dev_port(device
, port_num
);
1466 static bool cma_match_net_dev(const struct rdma_cm_id
*id
,
1467 const struct net_device
*net_dev
,
1470 const struct rdma_addr
*addr
= &id
->route
.addr
;
1473 /* This request is an AF_IB request or a RoCE request */
1474 return (!id
->port_num
|| id
->port_num
== port_num
) &&
1475 (addr
->src_addr
.ss_family
== AF_IB
||
1476 cma_protocol_roce_dev_port(id
->device
, port_num
));
1478 return !addr
->dev_addr
.bound_dev_if
||
1479 (net_eq(dev_net(net_dev
), addr
->dev_addr
.net
) &&
1480 addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
);
1483 static struct rdma_id_private
*cma_find_listener(
1484 const struct rdma_bind_list
*bind_list
,
1485 const struct ib_cm_id
*cm_id
,
1486 const struct ib_cm_event
*ib_event
,
1487 const struct cma_req_info
*req
,
1488 const struct net_device
*net_dev
)
1490 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1493 return ERR_PTR(-EINVAL
);
1495 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1496 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1497 if (id_priv
->id
.device
== cm_id
->device
&&
1498 cma_match_net_dev(&id_priv
->id
, net_dev
, req
->port
))
1500 list_for_each_entry(id_priv_dev
,
1501 &id_priv
->listen_list
,
1503 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1504 cma_match_net_dev(&id_priv_dev
->id
, net_dev
, req
->port
))
1510 return ERR_PTR(-EINVAL
);
1513 static struct rdma_id_private
*cma_id_from_event(struct ib_cm_id
*cm_id
,
1514 struct ib_cm_event
*ib_event
,
1515 struct net_device
**net_dev
)
1517 struct cma_req_info req
;
1518 struct rdma_bind_list
*bind_list
;
1519 struct rdma_id_private
*id_priv
;
1522 err
= cma_save_req_info(ib_event
, &req
);
1524 return ERR_PTR(err
);
1526 *net_dev
= cma_get_net_dev(ib_event
, &req
);
1527 if (IS_ERR(*net_dev
)) {
1528 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1529 /* Assuming the protocol is AF_IB */
1531 } else if (cma_protocol_roce_dev_port(req
.device
, req
.port
)) {
1532 /* TODO find the net dev matching the request parameters
1533 * through the RoCE GID table */
1536 return ERR_CAST(*net_dev
);
1540 bind_list
= cma_ps_find(*net_dev
? dev_net(*net_dev
) : &init_net
,
1541 rdma_ps_from_service_id(req
.service_id
),
1542 cma_port_from_service_id(req
.service_id
));
1543 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, &req
, *net_dev
);
1544 if (IS_ERR(id_priv
) && *net_dev
) {
1552 static inline int cma_user_data_offset(struct rdma_id_private
*id_priv
)
1554 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1557 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1559 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1561 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1565 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1567 struct rdma_id_private
*dev_id_priv
;
1570 * Remove from listen_any_list to prevent added devices from spawning
1571 * additional listen requests.
1574 list_del(&id_priv
->list
);
1576 while (!list_empty(&id_priv
->listen_list
)) {
1577 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
1578 struct rdma_id_private
, listen_list
);
1579 /* sync with device removal to avoid duplicate destruction */
1580 list_del_init(&dev_id_priv
->list
);
1581 list_del(&dev_id_priv
->listen_list
);
1582 mutex_unlock(&lock
);
1584 rdma_destroy_id(&dev_id_priv
->id
);
1587 mutex_unlock(&lock
);
1590 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1591 enum rdma_cm_state state
)
1594 case RDMA_CM_ADDR_QUERY
:
1595 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1597 case RDMA_CM_ROUTE_QUERY
:
1598 cma_cancel_route(id_priv
);
1600 case RDMA_CM_LISTEN
:
1601 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1602 cma_cancel_listens(id_priv
);
1609 static void cma_release_port(struct rdma_id_private
*id_priv
)
1611 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1612 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
1618 hlist_del(&id_priv
->node
);
1619 if (hlist_empty(&bind_list
->owners
)) {
1620 cma_ps_remove(net
, bind_list
->ps
, bind_list
->port
);
1623 mutex_unlock(&lock
);
1626 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1628 struct cma_multicast
*mc
;
1630 while (!list_empty(&id_priv
->mc_list
)) {
1631 mc
= container_of(id_priv
->mc_list
.next
,
1632 struct cma_multicast
, list
);
1633 list_del(&mc
->list
);
1634 if (rdma_cap_ib_mcast(id_priv
->cma_dev
->device
,
1635 id_priv
->id
.port_num
)) {
1636 ib_sa_free_multicast(mc
->multicast
.ib
);
1639 if (mc
->igmp_joined
) {
1640 struct rdma_dev_addr
*dev_addr
=
1641 &id_priv
->id
.route
.addr
.dev_addr
;
1642 struct net_device
*ndev
= NULL
;
1644 if (dev_addr
->bound_dev_if
)
1645 ndev
= dev_get_by_index(&init_net
,
1646 dev_addr
->bound_dev_if
);
1649 &mc
->multicast
.ib
->rec
.mgid
,
1654 kref_put(&mc
->mcref
, release_mc
);
1659 void rdma_destroy_id(struct rdma_cm_id
*id
)
1661 struct rdma_id_private
*id_priv
;
1662 enum rdma_cm_state state
;
1664 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1665 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1666 cma_cancel_operation(id_priv
, state
);
1669 * Wait for any active callback to finish. New callbacks will find
1670 * the id_priv state set to destroying and abort.
1672 mutex_lock(&id_priv
->handler_mutex
);
1673 mutex_unlock(&id_priv
->handler_mutex
);
1675 if (id_priv
->cma_dev
) {
1676 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
1677 if (id_priv
->cm_id
.ib
)
1678 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1679 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
1680 if (id_priv
->cm_id
.iw
)
1681 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1683 cma_leave_mc_groups(id_priv
);
1684 cma_release_dev(id_priv
);
1687 cma_release_port(id_priv
);
1688 cma_deref_id(id_priv
);
1689 wait_for_completion(&id_priv
->comp
);
1691 if (id_priv
->internal_id
)
1692 cma_deref_id(id_priv
->id
.context
);
1694 kfree(id_priv
->id
.route
.path_rec
);
1695 put_net(id_priv
->id
.route
.addr
.dev_addr
.net
);
1698 EXPORT_SYMBOL(rdma_destroy_id
);
1700 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1704 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1708 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1712 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1718 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret
);
1719 cma_modify_qp_err(id_priv
);
1720 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1725 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1726 struct ib_cm_rep_event_param
*rep_data
,
1729 event
->param
.conn
.private_data
= private_data
;
1730 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1731 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1732 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1733 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1734 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1735 event
->param
.conn
.srq
= rep_data
->srq
;
1736 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1739 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1741 struct rdma_id_private
*id_priv
= cm_id
->context
;
1742 struct rdma_cm_event event
;
1745 mutex_lock(&id_priv
->handler_mutex
);
1746 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1747 id_priv
->state
!= RDMA_CM_CONNECT
) ||
1748 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1749 id_priv
->state
!= RDMA_CM_DISCONNECT
))
1752 memset(&event
, 0, sizeof event
);
1753 switch (ib_event
->event
) {
1754 case IB_CM_REQ_ERROR
:
1755 case IB_CM_REP_ERROR
:
1756 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1757 event
.status
= -ETIMEDOUT
;
1759 case IB_CM_REP_RECEIVED
:
1760 if (cma_comp(id_priv
, RDMA_CM_CONNECT
) &&
1761 (id_priv
->id
.qp_type
!= IB_QPT_UD
))
1762 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1763 if (id_priv
->id
.qp
) {
1764 event
.status
= cma_rep_recv(id_priv
);
1765 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1766 RDMA_CM_EVENT_ESTABLISHED
;
1768 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1770 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1771 ib_event
->private_data
);
1773 case IB_CM_RTU_RECEIVED
:
1774 case IB_CM_USER_ESTABLISHED
:
1775 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1777 case IB_CM_DREQ_ERROR
:
1778 event
.status
= -ETIMEDOUT
; /* fall through */
1779 case IB_CM_DREQ_RECEIVED
:
1780 case IB_CM_DREP_RECEIVED
:
1781 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1782 RDMA_CM_DISCONNECT
))
1784 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1786 case IB_CM_TIMEWAIT_EXIT
:
1787 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1789 case IB_CM_MRA_RECEIVED
:
1792 case IB_CM_REJ_RECEIVED
:
1793 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv
->id
,
1794 ib_event
->param
.rej_rcvd
.reason
));
1795 cma_modify_qp_err(id_priv
);
1796 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1797 event
.event
= RDMA_CM_EVENT_REJECTED
;
1798 event
.param
.conn
.private_data
= ib_event
->private_data
;
1799 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1802 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
1807 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1809 /* Destroy the CM ID by returning a non-zero value. */
1810 id_priv
->cm_id
.ib
= NULL
;
1811 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1812 mutex_unlock(&id_priv
->handler_mutex
);
1813 rdma_destroy_id(&id_priv
->id
);
1817 mutex_unlock(&id_priv
->handler_mutex
);
1821 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1822 struct ib_cm_event
*ib_event
,
1823 struct net_device
*net_dev
)
1825 struct rdma_id_private
*id_priv
;
1826 struct rdma_cm_id
*id
;
1827 struct rdma_route
*rt
;
1828 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1829 struct sa_path_rec
*path
= ib_event
->param
.req_rcvd
.primary_path
;
1830 const __be64 service_id
= sa_path_get_service_id(path
);
1833 id
= rdma_create_id(listen_id
->route
.addr
.dev_addr
.net
,
1834 listen_id
->event_handler
, listen_id
->context
,
1835 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1839 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1840 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1841 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1842 listen_id
, ib_event
, ss_family
, service_id
))
1846 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1847 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1852 rt
->path_rec
[0] = *path
;
1853 if (rt
->num_paths
== 2)
1854 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1857 ret
= rdma_copy_addr(&rt
->addr
.dev_addr
, net_dev
, NULL
);
1861 if (!cma_protocol_roce(listen_id
) &&
1862 cma_any_addr(cma_src_addr(id_priv
))) {
1863 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1864 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1865 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1866 } else if (!cma_any_addr(cma_src_addr(id_priv
))) {
1867 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
1872 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1874 id_priv
->state
= RDMA_CM_CONNECT
;
1878 rdma_destroy_id(id
);
1882 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1883 struct ib_cm_event
*ib_event
,
1884 struct net_device
*net_dev
)
1886 struct rdma_id_private
*id_priv
;
1887 struct rdma_cm_id
*id
;
1888 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1889 struct net
*net
= listen_id
->route
.addr
.dev_addr
.net
;
1892 id
= rdma_create_id(net
, listen_id
->event_handler
, listen_id
->context
,
1893 listen_id
->ps
, IB_QPT_UD
);
1897 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1898 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1899 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1900 listen_id
, ib_event
, ss_family
,
1901 ib_event
->param
.sidr_req_rcvd
.service_id
))
1905 ret
= rdma_copy_addr(&id
->route
.addr
.dev_addr
, net_dev
, NULL
);
1909 if (!cma_any_addr(cma_src_addr(id_priv
))) {
1910 ret
= cma_translate_addr(cma_src_addr(id_priv
),
1911 &id
->route
.addr
.dev_addr
);
1917 id_priv
->state
= RDMA_CM_CONNECT
;
1920 rdma_destroy_id(id
);
1924 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1925 struct ib_cm_req_event_param
*req_data
,
1926 void *private_data
, int offset
)
1928 event
->param
.conn
.private_data
= private_data
+ offset
;
1929 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1930 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1931 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1932 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1933 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1934 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1935 event
->param
.conn
.srq
= req_data
->srq
;
1936 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1939 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1941 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1942 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1943 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1944 (id
->qp_type
== IB_QPT_UD
)) ||
1948 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1950 struct rdma_id_private
*listen_id
, *conn_id
= NULL
;
1951 struct rdma_cm_event event
;
1952 struct net_device
*net_dev
;
1955 listen_id
= cma_id_from_event(cm_id
, ib_event
, &net_dev
);
1956 if (IS_ERR(listen_id
))
1957 return PTR_ERR(listen_id
);
1959 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
)) {
1964 mutex_lock(&listen_id
->handler_mutex
);
1965 if (listen_id
->state
!= RDMA_CM_LISTEN
) {
1966 ret
= -ECONNABORTED
;
1970 memset(&event
, 0, sizeof event
);
1971 offset
= cma_user_data_offset(listen_id
);
1972 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1973 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1974 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
1975 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1976 event
.param
.ud
.private_data_len
=
1977 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1979 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
1980 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1981 ib_event
->private_data
, offset
);
1988 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1989 ret
= cma_acquire_dev(conn_id
, listen_id
);
1993 conn_id
->cm_id
.ib
= cm_id
;
1994 cm_id
->context
= conn_id
;
1995 cm_id
->cm_handler
= cma_ib_handler
;
1998 * Protect against the user destroying conn_id from another thread
1999 * until we're done accessing it.
2001 atomic_inc(&conn_id
->refcount
);
2002 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
2006 * Acquire mutex to prevent user executing rdma_destroy_id()
2007 * while we're accessing the cm_id.
2010 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) &&
2011 (conn_id
->id
.qp_type
!= IB_QPT_UD
))
2012 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
2013 mutex_unlock(&lock
);
2014 mutex_unlock(&conn_id
->handler_mutex
);
2015 mutex_unlock(&listen_id
->handler_mutex
);
2016 cma_deref_id(conn_id
);
2022 cma_deref_id(conn_id
);
2023 /* Destroy the CM ID by returning a non-zero value. */
2024 conn_id
->cm_id
.ib
= NULL
;
2026 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
2027 mutex_unlock(&conn_id
->handler_mutex
);
2029 mutex_unlock(&listen_id
->handler_mutex
);
2031 rdma_destroy_id(&conn_id
->id
);
2040 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2042 if (addr
->sa_family
== AF_IB
)
2043 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
2045 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
2047 EXPORT_SYMBOL(rdma_get_service_id
);
2049 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
2051 struct rdma_id_private
*id_priv
= iw_id
->context
;
2052 struct rdma_cm_event event
;
2054 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2055 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2057 mutex_lock(&id_priv
->handler_mutex
);
2058 if (id_priv
->state
!= RDMA_CM_CONNECT
)
2061 memset(&event
, 0, sizeof event
);
2062 switch (iw_event
->event
) {
2063 case IW_CM_EVENT_CLOSE
:
2064 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
2066 case IW_CM_EVENT_CONNECT_REPLY
:
2067 memcpy(cma_src_addr(id_priv
), laddr
,
2068 rdma_addr_size(laddr
));
2069 memcpy(cma_dst_addr(id_priv
), raddr
,
2070 rdma_addr_size(raddr
));
2071 switch (iw_event
->status
) {
2073 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2074 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2075 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2079 event
.event
= RDMA_CM_EVENT_REJECTED
;
2082 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2085 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
2089 case IW_CM_EVENT_ESTABLISHED
:
2090 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2091 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2092 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2098 event
.status
= iw_event
->status
;
2099 event
.param
.conn
.private_data
= iw_event
->private_data
;
2100 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2101 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2103 /* Destroy the CM ID by returning a non-zero value. */
2104 id_priv
->cm_id
.iw
= NULL
;
2105 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2106 mutex_unlock(&id_priv
->handler_mutex
);
2107 rdma_destroy_id(&id_priv
->id
);
2112 mutex_unlock(&id_priv
->handler_mutex
);
2116 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
2117 struct iw_cm_event
*iw_event
)
2119 struct rdma_cm_id
*new_cm_id
;
2120 struct rdma_id_private
*listen_id
, *conn_id
;
2121 struct rdma_cm_event event
;
2122 int ret
= -ECONNABORTED
;
2123 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2124 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2126 listen_id
= cm_id
->context
;
2128 mutex_lock(&listen_id
->handler_mutex
);
2129 if (listen_id
->state
!= RDMA_CM_LISTEN
)
2132 /* Create a new RDMA id for the new IW CM ID */
2133 new_cm_id
= rdma_create_id(listen_id
->id
.route
.addr
.dev_addr
.net
,
2134 listen_id
->id
.event_handler
,
2135 listen_id
->id
.context
,
2136 RDMA_PS_TCP
, IB_QPT_RC
);
2137 if (IS_ERR(new_cm_id
)) {
2141 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
2142 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2143 conn_id
->state
= RDMA_CM_CONNECT
;
2145 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
, NULL
);
2147 mutex_unlock(&conn_id
->handler_mutex
);
2148 rdma_destroy_id(new_cm_id
);
2152 ret
= cma_acquire_dev(conn_id
, listen_id
);
2154 mutex_unlock(&conn_id
->handler_mutex
);
2155 rdma_destroy_id(new_cm_id
);
2159 conn_id
->cm_id
.iw
= cm_id
;
2160 cm_id
->context
= conn_id
;
2161 cm_id
->cm_handler
= cma_iw_handler
;
2163 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
2164 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
2166 memset(&event
, 0, sizeof event
);
2167 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2168 event
.param
.conn
.private_data
= iw_event
->private_data
;
2169 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2170 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2171 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2174 * Protect against the user destroying conn_id from another thread
2175 * until we're done accessing it.
2177 atomic_inc(&conn_id
->refcount
);
2178 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
2180 /* User wants to destroy the CM ID */
2181 conn_id
->cm_id
.iw
= NULL
;
2182 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
2183 mutex_unlock(&conn_id
->handler_mutex
);
2184 cma_deref_id(conn_id
);
2185 rdma_destroy_id(&conn_id
->id
);
2189 mutex_unlock(&conn_id
->handler_mutex
);
2190 cma_deref_id(conn_id
);
2193 mutex_unlock(&listen_id
->handler_mutex
);
2197 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
2199 struct sockaddr
*addr
;
2200 struct ib_cm_id
*id
;
2203 addr
= cma_src_addr(id_priv
);
2204 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
2205 id
= ib_cm_insert_listen(id_priv
->id
.device
, cma_req_handler
, svc_id
);
2208 id_priv
->cm_id
.ib
= id
;
2213 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
2216 struct iw_cm_id
*id
;
2218 id
= iw_create_cm_id(id_priv
->id
.device
,
2219 iw_conn_req_handler
,
2224 id
->tos
= id_priv
->tos
;
2225 id_priv
->cm_id
.iw
= id
;
2227 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
2228 rdma_addr_size(cma_src_addr(id_priv
)));
2230 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
2233 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2234 id_priv
->cm_id
.iw
= NULL
;
2240 static int cma_listen_handler(struct rdma_cm_id
*id
,
2241 struct rdma_cm_event
*event
)
2243 struct rdma_id_private
*id_priv
= id
->context
;
2245 id
->context
= id_priv
->id
.context
;
2246 id
->event_handler
= id_priv
->id
.event_handler
;
2247 return id_priv
->id
.event_handler(id
, event
);
2250 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
2251 struct cma_device
*cma_dev
)
2253 struct rdma_id_private
*dev_id_priv
;
2254 struct rdma_cm_id
*id
;
2255 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2258 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
2261 id
= rdma_create_id(net
, cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
2262 id_priv
->id
.qp_type
);
2266 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
2268 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2269 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2270 rdma_addr_size(cma_src_addr(id_priv
)));
2272 _cma_attach_to_dev(dev_id_priv
, cma_dev
);
2273 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
2274 atomic_inc(&id_priv
->refcount
);
2275 dev_id_priv
->internal_id
= 1;
2276 dev_id_priv
->afonly
= id_priv
->afonly
;
2278 ret
= rdma_listen(id
, id_priv
->backlog
);
2280 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
2281 ret
, cma_dev
->device
->name
);
2284 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
2286 struct cma_device
*cma_dev
;
2289 list_add_tail(&id_priv
->list
, &listen_any_list
);
2290 list_for_each_entry(cma_dev
, &dev_list
, list
)
2291 cma_listen_on_dev(id_priv
, cma_dev
);
2292 mutex_unlock(&lock
);
2295 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2297 struct rdma_id_private
*id_priv
;
2299 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2300 id_priv
->tos
= (u8
) tos
;
2301 id_priv
->tos_set
= true;
2303 EXPORT_SYMBOL(rdma_set_service_type
);
2305 static void cma_query_handler(int status
, struct sa_path_rec
*path_rec
,
2308 struct cma_work
*work
= context
;
2309 struct rdma_route
*route
;
2311 route
= &work
->id
->id
.route
;
2314 route
->num_paths
= 1;
2315 *route
->path_rec
= *path_rec
;
2317 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2318 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2319 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2320 work
->event
.status
= status
;
2321 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2325 queue_work(cma_wq
, &work
->work
);
2328 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
2329 struct cma_work
*work
)
2331 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2332 struct sa_path_rec path_rec
;
2333 ib_sa_comp_mask comp_mask
;
2334 struct sockaddr_in6
*sin6
;
2335 struct sockaddr_ib
*sib
;
2337 memset(&path_rec
, 0, sizeof path_rec
);
2339 if (rdma_cap_opa_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
2340 path_rec
.rec_type
= SA_PATH_REC_TYPE_OPA
;
2342 path_rec
.rec_type
= SA_PATH_REC_TYPE_IB
;
2343 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2344 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2345 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2346 path_rec
.numb_path
= 1;
2347 path_rec
.reversible
= 1;
2348 sa_path_set_service_id(&path_rec
,
2349 rdma_get_service_id(&id_priv
->id
,
2350 cma_dst_addr(id_priv
)));
2352 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2353 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2354 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2356 switch (cma_family(id_priv
)) {
2358 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2359 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2362 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2363 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2364 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2367 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2368 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2369 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2373 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2374 id_priv
->id
.port_num
, &path_rec
,
2375 comp_mask
, timeout_ms
,
2376 GFP_KERNEL
, cma_query_handler
,
2377 work
, &id_priv
->query
);
2379 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2382 static void cma_work_handler(struct work_struct
*_work
)
2384 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
2385 struct rdma_id_private
*id_priv
= work
->id
;
2388 mutex_lock(&id_priv
->handler_mutex
);
2389 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
2392 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2393 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2397 mutex_unlock(&id_priv
->handler_mutex
);
2398 cma_deref_id(id_priv
);
2400 rdma_destroy_id(&id_priv
->id
);
2404 static void cma_ndev_work_handler(struct work_struct
*_work
)
2406 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
2407 struct rdma_id_private
*id_priv
= work
->id
;
2410 mutex_lock(&id_priv
->handler_mutex
);
2411 if (id_priv
->state
== RDMA_CM_DESTROYING
||
2412 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
2415 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2416 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2421 mutex_unlock(&id_priv
->handler_mutex
);
2422 cma_deref_id(id_priv
);
2424 rdma_destroy_id(&id_priv
->id
);
2428 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2430 struct rdma_route
*route
= &id_priv
->id
.route
;
2431 struct cma_work
*work
;
2434 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2439 INIT_WORK(&work
->work
, cma_work_handler
);
2440 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2441 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2442 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2444 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2445 if (!route
->path_rec
) {
2450 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
2456 kfree(route
->path_rec
);
2457 route
->path_rec
= NULL
;
2463 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
2464 struct sa_path_rec
*path_rec
, int num_paths
)
2466 struct rdma_id_private
*id_priv
;
2469 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2470 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2471 RDMA_CM_ROUTE_RESOLVED
))
2474 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
2476 if (!id
->route
.path_rec
) {
2481 id
->route
.num_paths
= num_paths
;
2484 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
2487 EXPORT_SYMBOL(rdma_set_ib_paths
);
2489 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2491 struct cma_work
*work
;
2493 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2498 INIT_WORK(&work
->work
, cma_work_handler
);
2499 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2500 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2501 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2502 queue_work(cma_wq
, &work
->work
);
2506 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
2509 struct net_device
*dev
;
2511 prio
= rt_tos2priority(tos
);
2512 dev
= is_vlan_dev(ndev
) ? vlan_dev_real_dev(ndev
) : ndev
;
2514 return netdev_get_prio_tc_map(dev
, prio
);
2516 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2517 if (is_vlan_dev(ndev
))
2518 return (vlan_dev_get_egress_qos_mask(ndev
, prio
) &
2519 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
2524 static enum ib_gid_type
cma_route_gid_type(enum rdma_network_type network_type
,
2525 unsigned long supported_gids
,
2526 enum ib_gid_type default_gid
)
2528 if ((network_type
== RDMA_NETWORK_IPV4
||
2529 network_type
== RDMA_NETWORK_IPV6
) &&
2530 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP
, &supported_gids
))
2531 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
2536 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
2538 struct rdma_route
*route
= &id_priv
->id
.route
;
2539 struct rdma_addr
*addr
= &route
->addr
;
2540 struct cma_work
*work
;
2542 struct net_device
*ndev
= NULL
;
2543 enum ib_gid_type gid_type
= IB_GID_TYPE_IB
;
2544 u8 default_roce_tos
= id_priv
->cma_dev
->default_roce_tos
[id_priv
->id
.port_num
-
2545 rdma_start_port(id_priv
->cma_dev
->device
)];
2546 u8 tos
= id_priv
->tos_set
? id_priv
->tos
: default_roce_tos
;
2549 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2554 INIT_WORK(&work
->work
, cma_work_handler
);
2556 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2557 if (!route
->path_rec
) {
2562 route
->num_paths
= 1;
2564 if (addr
->dev_addr
.bound_dev_if
) {
2565 unsigned long supported_gids
;
2567 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
2573 if (ndev
->flags
& IFF_LOOPBACK
) {
2575 if (!id_priv
->id
.device
->get_netdev
) {
2580 ndev
= id_priv
->id
.device
->get_netdev(id_priv
->id
.device
,
2581 id_priv
->id
.port_num
);
2588 supported_gids
= roce_gid_type_mask_support(id_priv
->id
.device
,
2589 id_priv
->id
.port_num
);
2590 gid_type
= cma_route_gid_type(addr
->dev_addr
.network
,
2593 route
->path_rec
->rec_type
=
2594 sa_conv_gid_to_pathrec_type(gid_type
);
2595 sa_path_set_ndev(route
->path_rec
, &init_net
);
2596 sa_path_set_ifindex(route
->path_rec
, ndev
->ifindex
);
2603 sa_path_set_dmac(route
->path_rec
, addr
->dev_addr
.dst_dev_addr
);
2605 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
2606 &route
->path_rec
->sgid
);
2607 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
2608 &route
->path_rec
->dgid
);
2610 /* Use the hint from IP Stack to select GID Type */
2611 if (gid_type
< ib_network_to_gid_type(addr
->dev_addr
.network
))
2612 gid_type
= ib_network_to_gid_type(addr
->dev_addr
.network
);
2613 route
->path_rec
->rec_type
= sa_conv_gid_to_pathrec_type(gid_type
);
2615 if (((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
)->sa_family
!= AF_IB
)
2616 /* TODO: get the hoplimit from the inet/inet6 device */
2617 route
->path_rec
->hop_limit
= addr
->dev_addr
.hoplimit
;
2619 route
->path_rec
->hop_limit
= 1;
2620 route
->path_rec
->reversible
= 1;
2621 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
2622 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
2623 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, tos
);
2624 route
->path_rec
->traffic_class
= tos
;
2625 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
2626 route
->path_rec
->rate_selector
= IB_SA_EQ
;
2627 route
->path_rec
->rate
= iboe_get_rate(ndev
);
2629 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
2630 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
2631 if (!route
->path_rec
->mtu
) {
2636 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2637 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2638 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2639 work
->event
.status
= 0;
2641 queue_work(cma_wq
, &work
->work
);
2646 kfree(route
->path_rec
);
2647 route
->path_rec
= NULL
;
2653 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
2655 struct rdma_id_private
*id_priv
;
2658 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2659 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
2662 atomic_inc(&id_priv
->refcount
);
2663 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
2664 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
2665 else if (rdma_protocol_roce(id
->device
, id
->port_num
))
2666 ret
= cma_resolve_iboe_route(id_priv
);
2667 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
2668 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
2677 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
2678 cma_deref_id(id_priv
);
2681 EXPORT_SYMBOL(rdma_resolve_route
);
2683 static void cma_set_loopback(struct sockaddr
*addr
)
2685 switch (addr
->sa_family
) {
2687 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2690 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
2694 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
2700 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
2702 struct cma_device
*cma_dev
, *cur_dev
;
2704 enum ib_port_state port_state
;
2711 list_for_each_entry(cur_dev
, &dev_list
, list
) {
2712 if (cma_family(id_priv
) == AF_IB
&&
2713 !rdma_cap_ib_cm(cur_dev
->device
, 1))
2719 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
2720 if (!ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
) &&
2721 port_state
== IB_PORT_ACTIVE
) {
2736 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
, NULL
);
2740 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
2744 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
2745 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
2746 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
2748 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2749 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
2750 id_priv
->id
.port_num
= p
;
2751 cma_attach_to_dev(id_priv
, cma_dev
);
2752 cma_set_loopback(cma_src_addr(id_priv
));
2754 mutex_unlock(&lock
);
2758 static void addr_handler(int status
, struct sockaddr
*src_addr
,
2759 struct rdma_dev_addr
*dev_addr
, void *context
)
2761 struct rdma_id_private
*id_priv
= context
;
2762 struct rdma_cm_event event
;
2764 memset(&event
, 0, sizeof event
);
2765 mutex_lock(&id_priv
->handler_mutex
);
2766 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
2767 RDMA_CM_ADDR_RESOLVED
))
2770 memcpy(cma_src_addr(id_priv
), src_addr
, rdma_addr_size(src_addr
));
2771 if (!status
&& !id_priv
->cma_dev
) {
2772 status
= cma_acquire_dev(id_priv
, NULL
);
2774 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
2777 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status
);
2781 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2782 RDMA_CM_ADDR_BOUND
))
2784 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2785 event
.status
= status
;
2787 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2789 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
2790 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2791 mutex_unlock(&id_priv
->handler_mutex
);
2792 cma_deref_id(id_priv
);
2793 rdma_destroy_id(&id_priv
->id
);
2797 mutex_unlock(&id_priv
->handler_mutex
);
2798 cma_deref_id(id_priv
);
2801 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2803 struct cma_work
*work
;
2807 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2811 if (!id_priv
->cma_dev
) {
2812 ret
= cma_bind_loopback(id_priv
);
2817 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2818 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2821 INIT_WORK(&work
->work
, cma_work_handler
);
2822 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2823 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2824 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2825 queue_work(cma_wq
, &work
->work
);
2832 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
2834 struct cma_work
*work
;
2837 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2841 if (!id_priv
->cma_dev
) {
2842 ret
= cma_resolve_ib_dev(id_priv
);
2847 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
2848 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
2851 INIT_WORK(&work
->work
, cma_work_handler
);
2852 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2853 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2854 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2855 queue_work(cma_wq
, &work
->work
);
2862 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2863 struct sockaddr
*dst_addr
)
2865 if (!src_addr
|| !src_addr
->sa_family
) {
2866 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2867 src_addr
->sa_family
= dst_addr
->sa_family
;
2868 if (IS_ENABLED(CONFIG_IPV6
) &&
2869 dst_addr
->sa_family
== AF_INET6
) {
2870 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*) src_addr
;
2871 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*) dst_addr
;
2872 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
2873 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
2874 id
->route
.addr
.dev_addr
.bound_dev_if
= dst_addr6
->sin6_scope_id
;
2875 } else if (dst_addr
->sa_family
== AF_IB
) {
2876 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
2877 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
2880 return rdma_bind_addr(id
, src_addr
);
2883 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2884 struct sockaddr
*dst_addr
, int timeout_ms
)
2886 struct rdma_id_private
*id_priv
;
2889 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2890 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
2891 if (id_priv
->state
== RDMA_CM_IDLE
) {
2892 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2894 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
2899 if (cma_family(id_priv
) != dst_addr
->sa_family
) {
2900 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
2904 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
)) {
2905 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
2909 atomic_inc(&id_priv
->refcount
);
2910 if (cma_any_addr(dst_addr
)) {
2911 ret
= cma_resolve_loopback(id_priv
);
2913 if (dst_addr
->sa_family
== AF_IB
) {
2914 ret
= cma_resolve_ib_addr(id_priv
);
2916 ret
= rdma_resolve_ip(&addr_client
, cma_src_addr(id_priv
),
2917 dst_addr
, &id
->route
.addr
.dev_addr
,
2918 timeout_ms
, addr_handler
, id_priv
);
2926 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2927 cma_deref_id(id_priv
);
2930 EXPORT_SYMBOL(rdma_resolve_addr
);
2932 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2934 struct rdma_id_private
*id_priv
;
2935 unsigned long flags
;
2938 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2939 spin_lock_irqsave(&id_priv
->lock
, flags
);
2940 if (reuse
|| id_priv
->state
== RDMA_CM_IDLE
) {
2941 id_priv
->reuseaddr
= reuse
;
2946 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2949 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2951 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2953 struct rdma_id_private
*id_priv
;
2954 unsigned long flags
;
2957 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2958 spin_lock_irqsave(&id_priv
->lock
, flags
);
2959 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2960 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2961 id_priv
->afonly
= afonly
;
2966 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2969 EXPORT_SYMBOL(rdma_set_afonly
);
2971 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2972 struct rdma_id_private
*id_priv
)
2974 struct sockaddr
*addr
;
2975 struct sockaddr_ib
*sib
;
2979 addr
= cma_src_addr(id_priv
);
2980 port
= htons(bind_list
->port
);
2982 switch (addr
->sa_family
) {
2984 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
2987 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
2990 sib
= (struct sockaddr_ib
*) addr
;
2991 sid
= be64_to_cpu(sib
->sib_sid
);
2992 mask
= be64_to_cpu(sib
->sib_sid_mask
);
2993 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
2994 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
2997 id_priv
->bind_list
= bind_list
;
2998 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
3001 static int cma_alloc_port(enum rdma_port_space ps
,
3002 struct rdma_id_private
*id_priv
, unsigned short snum
)
3004 struct rdma_bind_list
*bind_list
;
3007 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
3011 ret
= cma_ps_alloc(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, bind_list
,
3017 bind_list
->port
= (unsigned short)ret
;
3018 cma_bind_port(bind_list
, id_priv
);
3022 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
3025 static int cma_port_is_unique(struct rdma_bind_list
*bind_list
,
3026 struct rdma_id_private
*id_priv
)
3028 struct rdma_id_private
*cur_id
;
3029 struct sockaddr
*daddr
= cma_dst_addr(id_priv
);
3030 struct sockaddr
*saddr
= cma_src_addr(id_priv
);
3031 __be16 dport
= cma_port(daddr
);
3033 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3034 struct sockaddr
*cur_daddr
= cma_dst_addr(cur_id
);
3035 struct sockaddr
*cur_saddr
= cma_src_addr(cur_id
);
3036 __be16 cur_dport
= cma_port(cur_daddr
);
3038 if (id_priv
== cur_id
)
3041 /* different dest port -> unique */
3042 if (!cma_any_port(cur_daddr
) &&
3043 (dport
!= cur_dport
))
3046 /* different src address -> unique */
3047 if (!cma_any_addr(saddr
) &&
3048 !cma_any_addr(cur_saddr
) &&
3049 cma_addr_cmp(saddr
, cur_saddr
))
3052 /* different dst address -> unique */
3053 if (!cma_any_addr(cur_daddr
) &&
3054 cma_addr_cmp(daddr
, cur_daddr
))
3057 return -EADDRNOTAVAIL
;
3062 static int cma_alloc_any_port(enum rdma_port_space ps
,
3063 struct rdma_id_private
*id_priv
)
3065 static unsigned int last_used_port
;
3066 int low
, high
, remaining
;
3068 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3070 inet_get_local_port_range(net
, &low
, &high
);
3071 remaining
= (high
- low
) + 1;
3072 rover
= prandom_u32() % remaining
+ low
;
3074 if (last_used_port
!= rover
) {
3075 struct rdma_bind_list
*bind_list
;
3078 bind_list
= cma_ps_find(net
, ps
, (unsigned short)rover
);
3081 ret
= cma_alloc_port(ps
, id_priv
, rover
);
3083 ret
= cma_port_is_unique(bind_list
, id_priv
);
3085 cma_bind_port(bind_list
, id_priv
);
3088 * Remember previously used port number in order to avoid
3089 * re-using same port immediately after it is closed.
3092 last_used_port
= rover
;
3093 if (ret
!= -EADDRNOTAVAIL
)
3098 if ((rover
< low
) || (rover
> high
))
3102 return -EADDRNOTAVAIL
;
3106 * Check that the requested port is available. This is called when trying to
3107 * bind to a specific port, or when trying to listen on a bound port. In
3108 * the latter case, the provided id_priv may already be on the bind_list, but
3109 * we still need to check that it's okay to start listening.
3111 static int cma_check_port(struct rdma_bind_list
*bind_list
,
3112 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
3114 struct rdma_id_private
*cur_id
;
3115 struct sockaddr
*addr
, *cur_addr
;
3117 addr
= cma_src_addr(id_priv
);
3118 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3119 if (id_priv
== cur_id
)
3122 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
3126 cur_addr
= cma_src_addr(cur_id
);
3127 if (id_priv
->afonly
&& cur_id
->afonly
&&
3128 (addr
->sa_family
!= cur_addr
->sa_family
))
3131 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
3132 return -EADDRNOTAVAIL
;
3134 if (!cma_addr_cmp(addr
, cur_addr
))
3140 static int cma_use_port(enum rdma_port_space ps
,
3141 struct rdma_id_private
*id_priv
)
3143 struct rdma_bind_list
*bind_list
;
3144 unsigned short snum
;
3147 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
3148 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
3151 bind_list
= cma_ps_find(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, snum
);
3153 ret
= cma_alloc_port(ps
, id_priv
, snum
);
3155 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
3157 cma_bind_port(bind_list
, id_priv
);
3162 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
3164 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
3168 if (bind_list
->owners
.first
->next
)
3169 ret
= cma_check_port(bind_list
, id_priv
, 0);
3170 mutex_unlock(&lock
);
3174 static enum rdma_port_space
cma_select_inet_ps(
3175 struct rdma_id_private
*id_priv
)
3177 switch (id_priv
->id
.ps
) {
3182 return id_priv
->id
.ps
;
3189 static enum rdma_port_space
cma_select_ib_ps(struct rdma_id_private
*id_priv
)
3191 enum rdma_port_space ps
= 0;
3192 struct sockaddr_ib
*sib
;
3193 u64 sid_ps
, mask
, sid
;
3195 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
3196 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
3197 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
3199 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
3200 sid_ps
= RDMA_IB_IP_PS_IB
;
3202 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
3203 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
3204 sid_ps
= RDMA_IB_IP_PS_TCP
;
3206 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
3207 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
3208 sid_ps
= RDMA_IB_IP_PS_UDP
;
3213 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
3214 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
3215 be64_to_cpu(sib
->sib_sid_mask
));
3220 static int cma_get_port(struct rdma_id_private
*id_priv
)
3222 enum rdma_port_space ps
;
3225 if (cma_family(id_priv
) != AF_IB
)
3226 ps
= cma_select_inet_ps(id_priv
);
3228 ps
= cma_select_ib_ps(id_priv
);
3230 return -EPROTONOSUPPORT
;
3233 if (cma_any_port(cma_src_addr(id_priv
)))
3234 ret
= cma_alloc_any_port(ps
, id_priv
);
3236 ret
= cma_use_port(ps
, id_priv
);
3237 mutex_unlock(&lock
);
3242 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
3243 struct sockaddr
*addr
)
3245 #if IS_ENABLED(CONFIG_IPV6)
3246 struct sockaddr_in6
*sin6
;
3248 if (addr
->sa_family
!= AF_INET6
)
3251 sin6
= (struct sockaddr_in6
*) addr
;
3253 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
3256 if (!sin6
->sin6_scope_id
)
3259 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
3264 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
3266 struct rdma_id_private
*id_priv
;
3269 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3270 if (id_priv
->state
== RDMA_CM_IDLE
) {
3271 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
3272 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
3277 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
3280 if (id_priv
->reuseaddr
) {
3281 ret
= cma_bind_listen(id_priv
);
3286 id_priv
->backlog
= backlog
;
3288 if (rdma_cap_ib_cm(id
->device
, 1)) {
3289 ret
= cma_ib_listen(id_priv
);
3292 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
3293 ret
= cma_iw_listen(id_priv
, backlog
);
3301 cma_listen_on_all(id_priv
);
3305 id_priv
->backlog
= 0;
3306 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
3309 EXPORT_SYMBOL(rdma_listen
);
3311 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3313 struct rdma_id_private
*id_priv
;
3315 struct sockaddr
*daddr
;
3317 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
3318 addr
->sa_family
!= AF_IB
)
3319 return -EAFNOSUPPORT
;
3321 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3322 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
3325 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
3329 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
3330 if (!cma_any_addr(addr
)) {
3331 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
3335 ret
= cma_acquire_dev(id_priv
, NULL
);
3340 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
3341 if (addr
->sa_family
== AF_INET
)
3342 id_priv
->afonly
= 1;
3343 #if IS_ENABLED(CONFIG_IPV6)
3344 else if (addr
->sa_family
== AF_INET6
) {
3345 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3347 id_priv
->afonly
= net
->ipv6
.sysctl
.bindv6only
;
3351 ret
= cma_get_port(id_priv
);
3355 daddr
= cma_dst_addr(id_priv
);
3356 daddr
->sa_family
= addr
->sa_family
;
3360 if (id_priv
->cma_dev
)
3361 cma_release_dev(id_priv
);
3363 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
3366 EXPORT_SYMBOL(rdma_bind_addr
);
3368 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
3370 struct cma_hdr
*cma_hdr
;
3373 cma_hdr
->cma_version
= CMA_VERSION
;
3374 if (cma_family(id_priv
) == AF_INET
) {
3375 struct sockaddr_in
*src4
, *dst4
;
3377 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
3378 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
3380 cma_set_ip_ver(cma_hdr
, 4);
3381 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
3382 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
3383 cma_hdr
->port
= src4
->sin_port
;
3384 } else if (cma_family(id_priv
) == AF_INET6
) {
3385 struct sockaddr_in6
*src6
, *dst6
;
3387 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
3388 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
3390 cma_set_ip_ver(cma_hdr
, 6);
3391 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
3392 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
3393 cma_hdr
->port
= src6
->sin6_port
;
3398 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
3399 struct ib_cm_event
*ib_event
)
3401 struct rdma_id_private
*id_priv
= cm_id
->context
;
3402 struct rdma_cm_event event
;
3403 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
3406 mutex_lock(&id_priv
->handler_mutex
);
3407 if (id_priv
->state
!= RDMA_CM_CONNECT
)
3410 memset(&event
, 0, sizeof event
);
3411 switch (ib_event
->event
) {
3412 case IB_CM_SIDR_REQ_ERROR
:
3413 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3414 event
.status
= -ETIMEDOUT
;
3416 case IB_CM_SIDR_REP_RECEIVED
:
3417 event
.param
.ud
.private_data
= ib_event
->private_data
;
3418 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
3419 if (rep
->status
!= IB_SIDR_SUCCESS
) {
3420 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3421 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
3422 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
3426 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
3428 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret
);
3429 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3433 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
3434 id_priv
->id
.route
.path_rec
,
3435 &event
.param
.ud
.ah_attr
);
3436 event
.param
.ud
.qp_num
= rep
->qpn
;
3437 event
.param
.ud
.qkey
= rep
->qkey
;
3438 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
3442 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
3447 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3449 /* Destroy the CM ID by returning a non-zero value. */
3450 id_priv
->cm_id
.ib
= NULL
;
3451 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3452 mutex_unlock(&id_priv
->handler_mutex
);
3453 rdma_destroy_id(&id_priv
->id
);
3457 mutex_unlock(&id_priv
->handler_mutex
);
3461 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
3462 struct rdma_conn_param
*conn_param
)
3464 struct ib_cm_sidr_req_param req
;
3465 struct ib_cm_id
*id
;
3469 memset(&req
, 0, sizeof req
);
3470 offset
= cma_user_data_offset(id_priv
);
3471 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3472 if (req
.private_data_len
< conn_param
->private_data_len
)
3475 if (req
.private_data_len
) {
3476 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3480 private_data
= NULL
;
3483 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3484 memcpy(private_data
+ offset
, conn_param
->private_data
,
3485 conn_param
->private_data_len
);
3488 ret
= cma_format_hdr(private_data
, id_priv
);
3491 req
.private_data
= private_data
;
3494 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
3500 id_priv
->cm_id
.ib
= id
;
3502 req
.path
= id_priv
->id
.route
.path_rec
;
3503 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3504 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
3505 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3507 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
3509 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
3510 id_priv
->cm_id
.ib
= NULL
;
3513 kfree(private_data
);
3517 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
3518 struct rdma_conn_param
*conn_param
)
3520 struct ib_cm_req_param req
;
3521 struct rdma_route
*route
;
3523 struct ib_cm_id
*id
;
3526 memset(&req
, 0, sizeof req
);
3527 offset
= cma_user_data_offset(id_priv
);
3528 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3529 if (req
.private_data_len
< conn_param
->private_data_len
)
3532 if (req
.private_data_len
) {
3533 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3537 private_data
= NULL
;
3540 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3541 memcpy(private_data
+ offset
, conn_param
->private_data
,
3542 conn_param
->private_data_len
);
3544 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
3549 id_priv
->cm_id
.ib
= id
;
3551 route
= &id_priv
->id
.route
;
3553 ret
= cma_format_hdr(private_data
, id_priv
);
3556 req
.private_data
= private_data
;
3559 req
.primary_path
= &route
->path_rec
[0];
3560 if (route
->num_paths
== 2)
3561 req
.alternate_path
= &route
->path_rec
[1];
3563 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3564 req
.qp_num
= id_priv
->qp_num
;
3565 req
.qp_type
= id_priv
->id
.qp_type
;
3566 req
.starting_psn
= id_priv
->seq_num
;
3567 req
.responder_resources
= conn_param
->responder_resources
;
3568 req
.initiator_depth
= conn_param
->initiator_depth
;
3569 req
.flow_control
= conn_param
->flow_control
;
3570 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
3571 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3572 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3573 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3574 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3575 req
.srq
= id_priv
->srq
? 1 : 0;
3577 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
3579 if (ret
&& !IS_ERR(id
)) {
3580 ib_destroy_cm_id(id
);
3581 id_priv
->cm_id
.ib
= NULL
;
3584 kfree(private_data
);
3588 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
3589 struct rdma_conn_param
*conn_param
)
3591 struct iw_cm_id
*cm_id
;
3593 struct iw_cm_conn_param iw_param
;
3595 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
3597 return PTR_ERR(cm_id
);
3599 cm_id
->tos
= id_priv
->tos
;
3600 id_priv
->cm_id
.iw
= cm_id
;
3602 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
3603 rdma_addr_size(cma_src_addr(id_priv
)));
3604 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
3605 rdma_addr_size(cma_dst_addr(id_priv
)));
3607 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3612 iw_param
.ord
= conn_param
->initiator_depth
;
3613 iw_param
.ird
= conn_param
->responder_resources
;
3614 iw_param
.private_data
= conn_param
->private_data
;
3615 iw_param
.private_data_len
= conn_param
->private_data_len
;
3616 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
3618 memset(&iw_param
, 0, sizeof iw_param
);
3619 iw_param
.qpn
= id_priv
->qp_num
;
3621 ret
= iw_cm_connect(cm_id
, &iw_param
);
3624 iw_destroy_cm_id(cm_id
);
3625 id_priv
->cm_id
.iw
= NULL
;
3630 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3632 struct rdma_id_private
*id_priv
;
3635 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3636 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
3640 id_priv
->qp_num
= conn_param
->qp_num
;
3641 id_priv
->srq
= conn_param
->srq
;
3644 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3645 if (id
->qp_type
== IB_QPT_UD
)
3646 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
3648 ret
= cma_connect_ib(id_priv
, conn_param
);
3649 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3650 ret
= cma_connect_iw(id_priv
, conn_param
);
3658 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
3661 EXPORT_SYMBOL(rdma_connect
);
3663 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
3664 struct rdma_conn_param
*conn_param
)
3666 struct ib_cm_rep_param rep
;
3669 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3673 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
3677 memset(&rep
, 0, sizeof rep
);
3678 rep
.qp_num
= id_priv
->qp_num
;
3679 rep
.starting_psn
= id_priv
->seq_num
;
3680 rep
.private_data
= conn_param
->private_data
;
3681 rep
.private_data_len
= conn_param
->private_data_len
;
3682 rep
.responder_resources
= conn_param
->responder_resources
;
3683 rep
.initiator_depth
= conn_param
->initiator_depth
;
3684 rep
.failover_accepted
= 0;
3685 rep
.flow_control
= conn_param
->flow_control
;
3686 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3687 rep
.srq
= id_priv
->srq
? 1 : 0;
3689 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
3694 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
3695 struct rdma_conn_param
*conn_param
)
3697 struct iw_cm_conn_param iw_param
;
3703 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3707 iw_param
.ord
= conn_param
->initiator_depth
;
3708 iw_param
.ird
= conn_param
->responder_resources
;
3709 iw_param
.private_data
= conn_param
->private_data
;
3710 iw_param
.private_data_len
= conn_param
->private_data_len
;
3711 if (id_priv
->id
.qp
) {
3712 iw_param
.qpn
= id_priv
->qp_num
;
3714 iw_param
.qpn
= conn_param
->qp_num
;
3716 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
3719 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
3720 enum ib_cm_sidr_status status
, u32 qkey
,
3721 const void *private_data
, int private_data_len
)
3723 struct ib_cm_sidr_rep_param rep
;
3726 memset(&rep
, 0, sizeof rep
);
3727 rep
.status
= status
;
3728 if (status
== IB_SIDR_SUCCESS
) {
3729 ret
= cma_set_qkey(id_priv
, qkey
);
3732 rep
.qp_num
= id_priv
->qp_num
;
3733 rep
.qkey
= id_priv
->qkey
;
3735 rep
.private_data
= private_data
;
3736 rep
.private_data_len
= private_data_len
;
3738 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
3741 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3743 struct rdma_id_private
*id_priv
;
3746 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3748 id_priv
->owner
= task_pid_nr(current
);
3750 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
3753 if (!id
->qp
&& conn_param
) {
3754 id_priv
->qp_num
= conn_param
->qp_num
;
3755 id_priv
->srq
= conn_param
->srq
;
3758 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3759 if (id
->qp_type
== IB_QPT_UD
) {
3761 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3763 conn_param
->private_data
,
3764 conn_param
->private_data_len
);
3766 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3770 ret
= cma_accept_ib(id_priv
, conn_param
);
3772 ret
= cma_rep_recv(id_priv
);
3774 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3775 ret
= cma_accept_iw(id_priv
, conn_param
);
3784 cma_modify_qp_err(id_priv
);
3785 rdma_reject(id
, NULL
, 0);
3788 EXPORT_SYMBOL(rdma_accept
);
3790 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
3792 struct rdma_id_private
*id_priv
;
3795 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3796 if (!id_priv
->cm_id
.ib
)
3799 switch (id
->device
->node_type
) {
3800 case RDMA_NODE_IB_CA
:
3801 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
3809 EXPORT_SYMBOL(rdma_notify
);
3811 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
3812 u8 private_data_len
)
3814 struct rdma_id_private
*id_priv
;
3817 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3818 if (!id_priv
->cm_id
.ib
)
3821 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3822 if (id
->qp_type
== IB_QPT_UD
)
3823 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
3824 private_data
, private_data_len
);
3826 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
3827 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
3828 0, private_data
, private_data_len
);
3829 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3830 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
3831 private_data
, private_data_len
);
3837 EXPORT_SYMBOL(rdma_reject
);
3839 int rdma_disconnect(struct rdma_cm_id
*id
)
3841 struct rdma_id_private
*id_priv
;
3844 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3845 if (!id_priv
->cm_id
.ib
)
3848 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3849 ret
= cma_modify_qp_err(id_priv
);
3852 /* Initiate or respond to a disconnect. */
3853 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
3854 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
3855 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3856 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
3863 EXPORT_SYMBOL(rdma_disconnect
);
3865 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
3867 struct rdma_id_private
*id_priv
;
3868 struct cma_multicast
*mc
= multicast
->context
;
3869 struct rdma_cm_event event
;
3872 id_priv
= mc
->id_priv
;
3873 mutex_lock(&id_priv
->handler_mutex
);
3874 if (id_priv
->state
!= RDMA_CM_ADDR_BOUND
&&
3875 id_priv
->state
!= RDMA_CM_ADDR_RESOLVED
)
3879 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
3881 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
3883 mutex_lock(&id_priv
->qp_mutex
);
3884 if (!status
&& id_priv
->id
.qp
) {
3885 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
3886 be16_to_cpu(multicast
->rec
.mlid
));
3888 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
3891 mutex_unlock(&id_priv
->qp_mutex
);
3893 memset(&event
, 0, sizeof event
);
3894 event
.status
= status
;
3895 event
.param
.ud
.private_data
= mc
->context
;
3897 struct rdma_dev_addr
*dev_addr
=
3898 &id_priv
->id
.route
.addr
.dev_addr
;
3899 struct net_device
*ndev
=
3900 dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3901 enum ib_gid_type gid_type
=
3902 id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
3903 rdma_start_port(id_priv
->cma_dev
->device
)];
3905 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
3906 ib_init_ah_from_mcmember(id_priv
->id
.device
,
3907 id_priv
->id
.port_num
, &multicast
->rec
,
3909 &event
.param
.ud
.ah_attr
);
3910 event
.param
.ud
.qp_num
= 0xFFFFFF;
3911 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
3915 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
3917 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3919 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3920 mutex_unlock(&id_priv
->handler_mutex
);
3921 rdma_destroy_id(&id_priv
->id
);
3926 mutex_unlock(&id_priv
->handler_mutex
);
3930 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3931 struct sockaddr
*addr
, union ib_gid
*mgid
)
3933 unsigned char mc_map
[MAX_ADDR_LEN
];
3934 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3935 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3936 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3938 if (cma_any_addr(addr
)) {
3939 memset(mgid
, 0, sizeof *mgid
);
3940 } else if ((addr
->sa_family
== AF_INET6
) &&
3941 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3943 /* IPv6 address is an SA assigned MGID. */
3944 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3945 } else if (addr
->sa_family
== AF_IB
) {
3946 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
3947 } else if ((addr
->sa_family
== AF_INET6
)) {
3948 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3949 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3950 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3951 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3953 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3954 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3955 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3956 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3960 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3961 struct cma_multicast
*mc
)
3963 struct ib_sa_mcmember_rec rec
;
3964 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3965 ib_sa_comp_mask comp_mask
;
3968 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3969 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3974 ret
= cma_set_qkey(id_priv
, 0);
3978 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3979 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
3980 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3981 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3982 rec
.join_state
= mc
->join_state
;
3984 if ((rec
.join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
)) &&
3985 (!ib_sa_sendonly_fullmem_support(&sa_client
,
3987 id_priv
->id
.port_num
))) {
3988 pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
3989 "RDMA CM: SM doesn't support Send Only Full Member option\n",
3990 id_priv
->id
.device
->name
, id_priv
->id
.port_num
);
3994 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3995 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3996 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3997 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3998 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
4000 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
4001 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
4002 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
4003 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
4004 IB_SA_MCMEMBER_REC_MTU
|
4005 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
4007 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
4008 id_priv
->id
.port_num
, &rec
,
4009 comp_mask
, GFP_KERNEL
,
4010 cma_ib_mc_handler
, mc
);
4011 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
4014 static void iboe_mcast_work_handler(struct work_struct
*work
)
4016 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
4017 struct cma_multicast
*mc
= mw
->mc
;
4018 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
4020 mc
->multicast
.ib
->context
= mc
;
4021 cma_ib_mc_handler(0, m
);
4022 kref_put(&mc
->mcref
, release_mc
);
4026 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
4028 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
4029 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
4031 if (cma_any_addr(addr
)) {
4032 memset(mgid
, 0, sizeof *mgid
);
4033 } else if (addr
->sa_family
== AF_INET6
) {
4034 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
4036 mgid
->raw
[0] = 0xff;
4037 mgid
->raw
[1] = 0x0e;
4046 mgid
->raw
[10] = 0xff;
4047 mgid
->raw
[11] = 0xff;
4048 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
4052 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
4053 struct cma_multicast
*mc
)
4055 struct iboe_mcast_work
*work
;
4056 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4058 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
4059 struct net_device
*ndev
= NULL
;
4060 enum ib_gid_type gid_type
;
4063 send_only
= mc
->join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
);
4065 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
4068 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4072 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
4073 if (!mc
->multicast
.ib
) {
4078 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
4080 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
4081 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4082 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
4084 if (dev_addr
->bound_dev_if
)
4085 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
4090 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
4091 mc
->multicast
.ib
->rec
.hop_limit
= 1;
4092 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
4094 gid_type
= id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
4095 rdma_start_port(id_priv
->cma_dev
->device
)];
4096 if (addr
->sa_family
== AF_INET
) {
4097 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
4098 mc
->multicast
.ib
->rec
.hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
4100 err
= cma_igmp_send(ndev
, &mc
->multicast
.ib
->rec
.mgid
,
4103 mc
->igmp_joined
= true;
4107 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
4111 if (err
|| !mc
->multicast
.ib
->rec
.mtu
) {
4116 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
4117 &mc
->multicast
.ib
->rec
.port_gid
);
4120 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
4121 kref_get(&mc
->mcref
);
4122 queue_work(cma_wq
, &work
->work
);
4127 kfree(mc
->multicast
.ib
);
4133 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
4134 u8 join_state
, void *context
)
4136 struct rdma_id_private
*id_priv
;
4137 struct cma_multicast
*mc
;
4140 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4141 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
4142 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
4145 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
4149 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
4150 mc
->context
= context
;
4151 mc
->id_priv
= id_priv
;
4152 mc
->igmp_joined
= false;
4153 mc
->join_state
= join_state
;
4154 spin_lock(&id_priv
->lock
);
4155 list_add(&mc
->list
, &id_priv
->mc_list
);
4156 spin_unlock(&id_priv
->lock
);
4158 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4159 kref_init(&mc
->mcref
);
4160 ret
= cma_iboe_join_multicast(id_priv
, mc
);
4161 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
))
4162 ret
= cma_join_ib_multicast(id_priv
, mc
);
4167 spin_lock_irq(&id_priv
->lock
);
4168 list_del(&mc
->list
);
4169 spin_unlock_irq(&id_priv
->lock
);
4174 EXPORT_SYMBOL(rdma_join_multicast
);
4176 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
4178 struct rdma_id_private
*id_priv
;
4179 struct cma_multicast
*mc
;
4181 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4182 spin_lock_irq(&id_priv
->lock
);
4183 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
4184 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
4185 list_del(&mc
->list
);
4186 spin_unlock_irq(&id_priv
->lock
);
4189 ib_detach_mcast(id
->qp
,
4190 &mc
->multicast
.ib
->rec
.mgid
,
4191 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
4193 BUG_ON(id_priv
->cma_dev
->device
!= id
->device
);
4195 if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
4196 ib_sa_free_multicast(mc
->multicast
.ib
);
4198 } else if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4199 if (mc
->igmp_joined
) {
4200 struct rdma_dev_addr
*dev_addr
=
4201 &id
->route
.addr
.dev_addr
;
4202 struct net_device
*ndev
= NULL
;
4204 if (dev_addr
->bound_dev_if
)
4205 ndev
= dev_get_by_index(&init_net
,
4206 dev_addr
->bound_dev_if
);
4209 &mc
->multicast
.ib
->rec
.mgid
,
4213 mc
->igmp_joined
= false;
4215 kref_put(&mc
->mcref
, release_mc
);
4220 spin_unlock_irq(&id_priv
->lock
);
4222 EXPORT_SYMBOL(rdma_leave_multicast
);
4224 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
4226 struct rdma_dev_addr
*dev_addr
;
4227 struct cma_ndev_work
*work
;
4229 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4231 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
4232 (net_eq(dev_net(ndev
), dev_addr
->net
)) &&
4233 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
4234 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
4235 ndev
->name
, &id_priv
->id
);
4236 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4240 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
4242 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
4243 atomic_inc(&id_priv
->refcount
);
4244 queue_work(cma_wq
, &work
->work
);
4250 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
4253 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
4254 struct cma_device
*cma_dev
;
4255 struct rdma_id_private
*id_priv
;
4256 int ret
= NOTIFY_DONE
;
4258 if (event
!= NETDEV_BONDING_FAILOVER
)
4261 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
4265 list_for_each_entry(cma_dev
, &dev_list
, list
)
4266 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4267 ret
= cma_netdev_change(ndev
, id_priv
);
4273 mutex_unlock(&lock
);
4277 static struct notifier_block cma_nb
= {
4278 .notifier_call
= cma_netdev_callback
4281 static void cma_add_one(struct ib_device
*device
)
4283 struct cma_device
*cma_dev
;
4284 struct rdma_id_private
*id_priv
;
4286 unsigned long supported_gids
= 0;
4288 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
4292 cma_dev
->device
= device
;
4293 cma_dev
->default_gid_type
= kcalloc(device
->phys_port_cnt
,
4294 sizeof(*cma_dev
->default_gid_type
),
4296 if (!cma_dev
->default_gid_type
)
4299 cma_dev
->default_roce_tos
= kcalloc(device
->phys_port_cnt
,
4300 sizeof(*cma_dev
->default_roce_tos
),
4302 if (!cma_dev
->default_roce_tos
)
4305 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
4306 supported_gids
= roce_gid_type_mask_support(device
, i
);
4307 WARN_ON(!supported_gids
);
4308 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4309 find_first_bit(&supported_gids
, BITS_PER_LONG
);
4310 cma_dev
->default_roce_tos
[i
- rdma_start_port(device
)] = 0;
4313 init_completion(&cma_dev
->comp
);
4314 atomic_set(&cma_dev
->refcount
, 1);
4315 INIT_LIST_HEAD(&cma_dev
->id_list
);
4316 ib_set_client_data(device
, &cma_client
, cma_dev
);
4319 list_add_tail(&cma_dev
->list
, &dev_list
);
4320 list_for_each_entry(id_priv
, &listen_any_list
, list
)
4321 cma_listen_on_dev(id_priv
, cma_dev
);
4322 mutex_unlock(&lock
);
4327 kfree(cma_dev
->default_gid_type
);
4335 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
4337 struct rdma_cm_event event
;
4338 enum rdma_cm_state state
;
4341 /* Record that we want to remove the device */
4342 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
4343 if (state
== RDMA_CM_DESTROYING
)
4346 cma_cancel_operation(id_priv
, state
);
4347 mutex_lock(&id_priv
->handler_mutex
);
4349 /* Check for destruction from another callback. */
4350 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
4353 memset(&event
, 0, sizeof event
);
4354 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
4355 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
4357 mutex_unlock(&id_priv
->handler_mutex
);
4361 static void cma_process_remove(struct cma_device
*cma_dev
)
4363 struct rdma_id_private
*id_priv
;
4367 while (!list_empty(&cma_dev
->id_list
)) {
4368 id_priv
= list_entry(cma_dev
->id_list
.next
,
4369 struct rdma_id_private
, list
);
4371 list_del(&id_priv
->listen_list
);
4372 list_del_init(&id_priv
->list
);
4373 atomic_inc(&id_priv
->refcount
);
4374 mutex_unlock(&lock
);
4376 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
4377 cma_deref_id(id_priv
);
4379 rdma_destroy_id(&id_priv
->id
);
4383 mutex_unlock(&lock
);
4385 cma_deref_dev(cma_dev
);
4386 wait_for_completion(&cma_dev
->comp
);
4389 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
4391 struct cma_device
*cma_dev
= client_data
;
4397 list_del(&cma_dev
->list
);
4398 mutex_unlock(&lock
);
4400 cma_process_remove(cma_dev
);
4401 kfree(cma_dev
->default_roce_tos
);
4402 kfree(cma_dev
->default_gid_type
);
4406 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4408 struct nlmsghdr
*nlh
;
4409 struct rdma_cm_id_stats
*id_stats
;
4410 struct rdma_id_private
*id_priv
;
4411 struct rdma_cm_id
*id
= NULL
;
4412 struct cma_device
*cma_dev
;
4413 int i_dev
= 0, i_id
= 0;
4416 * We export all of the IDs as a sequence of messages. Each
4417 * ID gets its own netlink message.
4421 list_for_each_entry(cma_dev
, &dev_list
, list
) {
4422 if (i_dev
< cb
->args
[0]) {
4428 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4429 if (i_id
< cb
->args
[1]) {
4434 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
4435 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
4436 RDMA_NL_RDMA_CM_ID_STATS
,
4441 memset(id_stats
, 0, sizeof *id_stats
);
4443 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
4444 id_stats
->port_num
= id
->port_num
;
4445 id_stats
->bound_dev_if
=
4446 id
->route
.addr
.dev_addr
.bound_dev_if
;
4448 if (ibnl_put_attr(skb
, nlh
,
4449 rdma_addr_size(cma_src_addr(id_priv
)),
4450 cma_src_addr(id_priv
),
4451 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
))
4453 if (ibnl_put_attr(skb
, nlh
,
4454 rdma_addr_size(cma_src_addr(id_priv
)),
4455 cma_dst_addr(id_priv
),
4456 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
))
4459 id_stats
->pid
= id_priv
->owner
;
4460 id_stats
->port_space
= id
->ps
;
4461 id_stats
->cm_state
= id_priv
->state
;
4462 id_stats
->qp_num
= id_priv
->qp_num
;
4463 id_stats
->qp_type
= id
->qp_type
;
4473 mutex_unlock(&lock
);
4474 cb
->args
[0] = i_dev
;
4480 static const struct ibnl_client_cbs cma_cb_table
[] = {
4481 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
,
4482 .module
= THIS_MODULE
},
4485 static int cma_init_net(struct net
*net
)
4487 struct cma_pernet
*pernet
= cma_pernet(net
);
4489 idr_init(&pernet
->tcp_ps
);
4490 idr_init(&pernet
->udp_ps
);
4491 idr_init(&pernet
->ipoib_ps
);
4492 idr_init(&pernet
->ib_ps
);
4497 static void cma_exit_net(struct net
*net
)
4499 struct cma_pernet
*pernet
= cma_pernet(net
);
4501 idr_destroy(&pernet
->tcp_ps
);
4502 idr_destroy(&pernet
->udp_ps
);
4503 idr_destroy(&pernet
->ipoib_ps
);
4504 idr_destroy(&pernet
->ib_ps
);
4507 static struct pernet_operations cma_pernet_operations
= {
4508 .init
= cma_init_net
,
4509 .exit
= cma_exit_net
,
4510 .id
= &cma_pernet_id
,
4511 .size
= sizeof(struct cma_pernet
),
4514 static int __init
cma_init(void)
4518 cma_wq
= alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM
);
4522 ret
= register_pernet_subsys(&cma_pernet_operations
);
4526 ib_sa_register_client(&sa_client
);
4527 rdma_addr_register_client(&addr_client
);
4528 register_netdevice_notifier(&cma_nb
);
4530 ret
= ib_register_client(&cma_client
);
4534 if (ibnl_add_client(RDMA_NL_RDMA_CM
, ARRAY_SIZE(cma_cb_table
),
4536 pr_warn("RDMA CMA: failed to add netlink callback\n");
4537 cma_configfs_init();
4542 unregister_netdevice_notifier(&cma_nb
);
4543 rdma_addr_unregister_client(&addr_client
);
4544 ib_sa_unregister_client(&sa_client
);
4546 destroy_workqueue(cma_wq
);
4550 static void __exit
cma_cleanup(void)
4552 cma_configfs_exit();
4553 ibnl_remove_client(RDMA_NL_RDMA_CM
);
4554 ib_unregister_client(&cma_client
);
4555 unregister_netdevice_notifier(&cma_nb
);
4556 rdma_addr_unregister_client(&addr_client
);
4557 ib_sa_unregister_client(&sa_client
);
4558 unregister_pernet_subsys(&cma_pernet_operations
);
4559 destroy_workqueue(cma_wq
);
4562 module_init(cma_init
);
4563 module_exit(cma_cleanup
);