1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
9 #include <linux/completion.h>
11 #include <linux/in6.h>
12 #include <linux/mutex.h>
13 #include <linux/random.h>
14 #include <linux/rbtree.h>
15 #include <linux/igmp.h>
16 #include <linux/xarray.h>
17 #include <linux/inetdevice.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <net/route.h>
22 #include <net/net_namespace.h>
23 #include <net/netns/generic.h>
24 #include <net/netevent.h>
27 #include <net/ip_fib.h>
28 #include <net/ip6_route.h>
30 #include <rdma/rdma_cm.h>
31 #include <rdma/rdma_cm_ib.h>
32 #include <rdma/rdma_netlink.h>
34 #include <rdma/ib_cache.h>
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_sa.h>
37 #include <rdma/iw_cm.h>
39 #include "core_priv.h"
41 #include "cma_trace.h"
43 MODULE_AUTHOR("Sean Hefty");
44 MODULE_DESCRIPTION("Generic RDMA CM Agent");
45 MODULE_LICENSE("Dual BSD/GPL");
47 #define CMA_CM_RESPONSE_TIMEOUT 20
48 #define CMA_MAX_CM_RETRIES 15
49 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
50 #define CMA_IBOE_PACKET_LIFETIME 16
51 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
53 static const char * const cma_events
[] = {
54 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
55 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
56 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
57 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
58 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
59 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
60 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
61 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
62 [RDMA_CM_EVENT_REJECTED
] = "rejected",
63 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
64 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
65 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
66 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
67 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
68 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
69 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
72 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
,
73 enum ib_gid_type gid_type
);
75 const char *__attribute_const__
rdma_event_msg(enum rdma_cm_event_type event
)
79 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
80 cma_events
[index
] : "unrecognized event";
82 EXPORT_SYMBOL(rdma_event_msg
);
84 const char *__attribute_const__
rdma_reject_msg(struct rdma_cm_id
*id
,
87 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
88 return ibcm_reject_msg(reason
);
90 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
91 return iwcm_reject_msg(reason
);
94 return "unrecognized transport";
96 EXPORT_SYMBOL(rdma_reject_msg
);
99 * rdma_is_consumer_reject - return true if the consumer rejected the connect
101 * @id: Communication identifier that received the REJECT event.
102 * @reason: Value returned in the REJECT event status field.
104 static bool rdma_is_consumer_reject(struct rdma_cm_id
*id
, int reason
)
106 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
107 return reason
== IB_CM_REJ_CONSUMER_DEFINED
;
109 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
110 return reason
== -ECONNREFUSED
;
116 const void *rdma_consumer_reject_data(struct rdma_cm_id
*id
,
117 struct rdma_cm_event
*ev
, u8
*data_len
)
121 if (rdma_is_consumer_reject(id
, ev
->status
)) {
122 *data_len
= ev
->param
.conn
.private_data_len
;
123 p
= ev
->param
.conn
.private_data
;
130 EXPORT_SYMBOL(rdma_consumer_reject_data
);
133 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
134 * @id: Communication Identifier
136 struct iw_cm_id
*rdma_iw_cm_id(struct rdma_cm_id
*id
)
138 struct rdma_id_private
*id_priv
;
140 id_priv
= container_of(id
, struct rdma_id_private
, id
);
141 if (id
->device
->node_type
== RDMA_NODE_RNIC
)
142 return id_priv
->cm_id
.iw
;
145 EXPORT_SYMBOL(rdma_iw_cm_id
);
148 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
149 * @res: rdma resource tracking entry pointer
151 struct rdma_cm_id
*rdma_res_to_id(struct rdma_restrack_entry
*res
)
153 struct rdma_id_private
*id_priv
=
154 container_of(res
, struct rdma_id_private
, res
);
158 EXPORT_SYMBOL(rdma_res_to_id
);
160 static int cma_add_one(struct ib_device
*device
);
161 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
163 static struct ib_client cma_client
= {
166 .remove
= cma_remove_one
169 static struct ib_sa_client sa_client
;
170 static LIST_HEAD(dev_list
);
171 static LIST_HEAD(listen_any_list
);
172 static DEFINE_MUTEX(lock
);
173 static struct rb_root id_table
= RB_ROOT
;
174 /* Serialize operations of id_table tree */
175 static DEFINE_SPINLOCK(id_table_lock
);
176 static struct workqueue_struct
*cma_wq
;
177 static unsigned int cma_pernet_id
;
180 struct xarray tcp_ps
;
181 struct xarray udp_ps
;
182 struct xarray ipoib_ps
;
186 static struct cma_pernet
*cma_pernet(struct net
*net
)
188 return net_generic(net
, cma_pernet_id
);
192 struct xarray
*cma_pernet_xa(struct net
*net
, enum rdma_ucm_port_space ps
)
194 struct cma_pernet
*pernet
= cma_pernet(net
);
198 return &pernet
->tcp_ps
;
200 return &pernet
->udp_ps
;
202 return &pernet
->ipoib_ps
;
204 return &pernet
->ib_ps
;
210 struct id_table_entry
{
211 struct list_head id_list
;
212 struct rb_node rb_node
;
216 struct list_head list
;
217 struct ib_device
*device
;
218 struct completion comp
;
220 struct list_head id_list
;
221 enum ib_gid_type
*default_gid_type
;
222 u8
*default_roce_tos
;
225 struct rdma_bind_list
{
226 enum rdma_ucm_port_space ps
;
227 struct hlist_head owners
;
231 static int cma_ps_alloc(struct net
*net
, enum rdma_ucm_port_space ps
,
232 struct rdma_bind_list
*bind_list
, int snum
)
234 struct xarray
*xa
= cma_pernet_xa(net
, ps
);
236 return xa_insert(xa
, snum
, bind_list
, GFP_KERNEL
);
239 static struct rdma_bind_list
*cma_ps_find(struct net
*net
,
240 enum rdma_ucm_port_space ps
, int snum
)
242 struct xarray
*xa
= cma_pernet_xa(net
, ps
);
244 return xa_load(xa
, snum
);
247 static void cma_ps_remove(struct net
*net
, enum rdma_ucm_port_space ps
,
250 struct xarray
*xa
= cma_pernet_xa(net
, ps
);
259 void cma_dev_get(struct cma_device
*cma_dev
)
261 refcount_inc(&cma_dev
->refcount
);
264 void cma_dev_put(struct cma_device
*cma_dev
)
266 if (refcount_dec_and_test(&cma_dev
->refcount
))
267 complete(&cma_dev
->comp
);
270 struct cma_device
*cma_enum_devices_by_ibdev(cma_device_filter filter
,
273 struct cma_device
*cma_dev
;
274 struct cma_device
*found_cma_dev
= NULL
;
278 list_for_each_entry(cma_dev
, &dev_list
, list
)
279 if (filter(cma_dev
->device
, cookie
)) {
280 found_cma_dev
= cma_dev
;
285 cma_dev_get(found_cma_dev
);
287 return found_cma_dev
;
290 int cma_get_default_gid_type(struct cma_device
*cma_dev
,
293 if (!rdma_is_port_valid(cma_dev
->device
, port
))
296 return cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)];
299 int cma_set_default_gid_type(struct cma_device
*cma_dev
,
301 enum ib_gid_type default_gid_type
)
303 unsigned long supported_gids
;
305 if (!rdma_is_port_valid(cma_dev
->device
, port
))
308 if (default_gid_type
== IB_GID_TYPE_IB
&&
309 rdma_protocol_roce_eth_encap(cma_dev
->device
, port
))
310 default_gid_type
= IB_GID_TYPE_ROCE
;
312 supported_gids
= roce_gid_type_mask_support(cma_dev
->device
, port
);
314 if (!(supported_gids
& 1 << default_gid_type
))
317 cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)] =
323 int cma_get_default_roce_tos(struct cma_device
*cma_dev
, u32 port
)
325 if (!rdma_is_port_valid(cma_dev
->device
, port
))
328 return cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)];
331 int cma_set_default_roce_tos(struct cma_device
*cma_dev
, u32 port
,
334 if (!rdma_is_port_valid(cma_dev
->device
, port
))
337 cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)] =
342 struct ib_device
*cma_get_ib_dev(struct cma_device
*cma_dev
)
344 return cma_dev
->device
;
348 * Device removal can occur at anytime, so we need extra handling to
349 * serialize notifying the user of device removal with other callbacks.
350 * We do this by disabling removal notification while a callback is in process,
351 * and reporting it after the callback completes.
354 struct cma_multicast
{
355 struct rdma_id_private
*id_priv
;
357 struct ib_sa_multicast
*sa_mc
;
359 struct work_struct work
;
360 struct rdma_cm_event event
;
363 struct list_head list
;
365 struct sockaddr_storage addr
;
370 struct work_struct work
;
371 struct rdma_id_private
*id
;
372 enum rdma_cm_state old_state
;
373 enum rdma_cm_state new_state
;
374 struct rdma_cm_event event
;
387 u8 ip_version
; /* IP version: 7:4 */
389 union cma_ip_addr src_addr
;
390 union cma_ip_addr dst_addr
;
393 #define CMA_VERSION 0x00
395 struct cma_req_info
{
396 struct sockaddr_storage listen_addr_storage
;
397 struct sockaddr_storage src_addr_storage
;
398 struct ib_device
*device
;
399 union ib_gid local_gid
;
406 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
407 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
413 * The FSM uses a funny double locking where state is protected by both
414 * the handler_mutex and the spinlock. State is not allowed to change
415 * to/from a handler_mutex protected value without also holding
418 if (comp
== RDMA_CM_CONNECT
|| exch
== RDMA_CM_CONNECT
)
419 lockdep_assert_held(&id_priv
->handler_mutex
);
421 spin_lock_irqsave(&id_priv
->lock
, flags
);
422 if ((ret
= (id_priv
->state
== comp
)))
423 id_priv
->state
= exch
;
424 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
428 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
430 return hdr
->ip_version
>> 4;
433 static void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
435 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
438 static struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
440 return (struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
;
443 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
445 return (struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
;
448 static int cma_igmp_send(struct net_device
*ndev
, union ib_gid
*mgid
, bool join
)
450 struct in_device
*in_dev
= NULL
;
454 in_dev
= __in_dev_get_rtnl(ndev
);
457 ip_mc_inc_group(in_dev
,
458 *(__be32
*)(mgid
->raw
+ 12));
460 ip_mc_dec_group(in_dev
,
461 *(__be32
*)(mgid
->raw
+ 12));
465 return (in_dev
) ? 0 : -ENODEV
;
468 static int compare_netdev_and_ip(int ifindex_a
, struct sockaddr
*sa
,
469 struct id_table_entry
*entry_b
)
471 struct rdma_id_private
*id_priv
= list_first_entry(
472 &entry_b
->id_list
, struct rdma_id_private
, id_list_entry
);
473 int ifindex_b
= id_priv
->id
.route
.addr
.dev_addr
.bound_dev_if
;
474 struct sockaddr
*sb
= cma_dst_addr(id_priv
);
476 if (ifindex_a
!= ifindex_b
)
477 return (ifindex_a
> ifindex_b
) ? 1 : -1;
479 if (sa
->sa_family
!= sb
->sa_family
)
480 return sa
->sa_family
- sb
->sa_family
;
482 if (sa
->sa_family
== AF_INET
&&
483 __builtin_object_size(sa
, 0) >= sizeof(struct sockaddr_in
)) {
484 return memcmp(&((struct sockaddr_in
*)sa
)->sin_addr
,
485 &((struct sockaddr_in
*)sb
)->sin_addr
,
486 sizeof(((struct sockaddr_in
*)sa
)->sin_addr
));
489 if (sa
->sa_family
== AF_INET6
&&
490 __builtin_object_size(sa
, 0) >= sizeof(struct sockaddr_in6
)) {
491 return ipv6_addr_cmp(&((struct sockaddr_in6
*)sa
)->sin6_addr
,
492 &((struct sockaddr_in6
*)sb
)->sin6_addr
);
498 static int cma_add_id_to_tree(struct rdma_id_private
*node_id_priv
)
500 struct rb_node
**new, *parent
= NULL
;
501 struct id_table_entry
*this, *node
;
505 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
509 spin_lock_irqsave(&id_table_lock
, flags
);
510 new = &id_table
.rb_node
;
512 this = container_of(*new, struct id_table_entry
, rb_node
);
513 result
= compare_netdev_and_ip(
514 node_id_priv
->id
.route
.addr
.dev_addr
.bound_dev_if
,
515 cma_dst_addr(node_id_priv
), this);
519 new = &((*new)->rb_left
);
521 new = &((*new)->rb_right
);
523 list_add_tail(&node_id_priv
->id_list_entry
,
530 INIT_LIST_HEAD(&node
->id_list
);
531 list_add_tail(&node_id_priv
->id_list_entry
, &node
->id_list
);
533 rb_link_node(&node
->rb_node
, parent
, new);
534 rb_insert_color(&node
->rb_node
, &id_table
);
537 spin_unlock_irqrestore(&id_table_lock
, flags
);
541 static struct id_table_entry
*
542 node_from_ndev_ip(struct rb_root
*root
, int ifindex
, struct sockaddr
*sa
)
544 struct rb_node
*node
= root
->rb_node
;
545 struct id_table_entry
*data
;
549 data
= container_of(node
, struct id_table_entry
, rb_node
);
550 result
= compare_netdev_and_ip(ifindex
, sa
, data
);
552 node
= node
->rb_left
;
554 node
= node
->rb_right
;
562 static void cma_remove_id_from_tree(struct rdma_id_private
*id_priv
)
564 struct id_table_entry
*data
;
567 spin_lock_irqsave(&id_table_lock
, flags
);
568 if (list_empty(&id_priv
->id_list_entry
))
571 data
= node_from_ndev_ip(&id_table
,
572 id_priv
->id
.route
.addr
.dev_addr
.bound_dev_if
,
573 cma_dst_addr(id_priv
));
577 list_del_init(&id_priv
->id_list_entry
);
578 if (list_empty(&data
->id_list
)) {
579 rb_erase(&data
->rb_node
, &id_table
);
583 spin_unlock_irqrestore(&id_table_lock
, flags
);
586 static void _cma_attach_to_dev(struct rdma_id_private
*id_priv
,
587 struct cma_device
*cma_dev
)
589 cma_dev_get(cma_dev
);
590 id_priv
->cma_dev
= cma_dev
;
591 id_priv
->id
.device
= cma_dev
->device
;
592 id_priv
->id
.route
.addr
.dev_addr
.transport
=
593 rdma_node_get_transport(cma_dev
->device
->node_type
);
594 list_add_tail(&id_priv
->device_item
, &cma_dev
->id_list
);
596 trace_cm_id_attach(id_priv
, cma_dev
->device
);
599 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
600 struct cma_device
*cma_dev
)
602 _cma_attach_to_dev(id_priv
, cma_dev
);
604 cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
605 rdma_start_port(cma_dev
->device
)];
608 static void cma_release_dev(struct rdma_id_private
*id_priv
)
611 list_del_init(&id_priv
->device_item
);
612 cma_dev_put(id_priv
->cma_dev
);
613 id_priv
->cma_dev
= NULL
;
614 id_priv
->id
.device
= NULL
;
615 if (id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
) {
616 rdma_put_gid_attr(id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
);
617 id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
= NULL
;
622 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
624 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
627 static int cma_set_default_qkey(struct rdma_id_private
*id_priv
)
629 struct ib_sa_mcmember_rec rec
;
632 switch (id_priv
->id
.ps
) {
635 id_priv
->qkey
= RDMA_UDP_QKEY
;
638 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
639 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
640 id_priv
->id
.port_num
, &rec
.mgid
,
643 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
651 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
654 (id_priv
->qkey
&& (id_priv
->qkey
!= qkey
)))
657 id_priv
->qkey
= qkey
;
661 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
663 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
664 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
665 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
668 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
672 if (addr
->sa_family
!= AF_IB
) {
673 ret
= rdma_translate_ip(addr
, dev_addr
);
675 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
682 static const struct ib_gid_attr
*
683 cma_validate_port(struct ib_device
*device
, u32 port
,
684 enum ib_gid_type gid_type
,
686 struct rdma_id_private
*id_priv
)
688 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
689 const struct ib_gid_attr
*sgid_attr
= ERR_PTR(-ENODEV
);
690 int bound_if_index
= dev_addr
->bound_dev_if
;
691 int dev_type
= dev_addr
->dev_type
;
692 struct net_device
*ndev
= NULL
;
694 if (!rdma_dev_access_netns(device
, id_priv
->id
.route
.addr
.dev_addr
.net
))
697 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
700 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
704 * For drivers that do not associate more than one net device with
705 * their gid tables, such as iWARP drivers, it is sufficient to
706 * return the first table entry.
708 * Other driver classes might be included in the future.
710 if (rdma_protocol_iwarp(device
, port
)) {
711 sgid_attr
= rdma_get_gid_attr(device
, port
, 0);
712 if (IS_ERR(sgid_attr
))
716 ndev
= rcu_dereference(sgid_attr
->ndev
);
717 if (!net_eq(dev_net(ndev
), dev_addr
->net
) ||
718 ndev
->ifindex
!= bound_if_index
) {
719 rdma_put_gid_attr(sgid_attr
);
720 sgid_attr
= ERR_PTR(-ENODEV
);
726 if (dev_type
== ARPHRD_ETHER
&& rdma_protocol_roce(device
, port
)) {
727 ndev
= dev_get_by_index(dev_addr
->net
, bound_if_index
);
731 gid_type
= IB_GID_TYPE_IB
;
734 sgid_attr
= rdma_find_gid_by_port(device
, gid
, gid_type
, port
, ndev
);
740 static void cma_bind_sgid_attr(struct rdma_id_private
*id_priv
,
741 const struct ib_gid_attr
*sgid_attr
)
743 WARN_ON(id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
);
744 id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
= sgid_attr
;
748 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
749 * based on source ip address.
750 * @id_priv: cm_id which should be bound to cma device
752 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
753 * based on source IP address. It returns 0 on success or error code otherwise.
754 * It is applicable to active and passive side cm_id.
756 static int cma_acquire_dev_by_src_ip(struct rdma_id_private
*id_priv
)
758 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
759 const struct ib_gid_attr
*sgid_attr
;
760 union ib_gid gid
, iboe_gid
, *gidp
;
761 struct cma_device
*cma_dev
;
762 enum ib_gid_type gid_type
;
766 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
767 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
770 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
773 memcpy(&gid
, dev_addr
->src_dev_addr
+
774 rdma_addr_gid_offset(dev_addr
), sizeof(gid
));
777 list_for_each_entry(cma_dev
, &dev_list
, list
) {
778 rdma_for_each_port (cma_dev
->device
, port
) {
779 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
781 gid_type
= cma_dev
->default_gid_type
[port
- 1];
782 sgid_attr
= cma_validate_port(cma_dev
->device
, port
,
783 gid_type
, gidp
, id_priv
);
784 if (!IS_ERR(sgid_attr
)) {
785 id_priv
->id
.port_num
= port
;
786 cma_bind_sgid_attr(id_priv
, sgid_attr
);
787 cma_attach_to_dev(id_priv
, cma_dev
);
799 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
800 * @id_priv: cm id to bind to cma device
801 * @listen_id_priv: listener cm id to match against
802 * @req: Pointer to req structure containaining incoming
803 * request information
804 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
805 * rdma device matches for listen_id and incoming request. It also verifies
806 * that a GID table entry is present for the source address.
807 * Returns 0 on success, or returns error code otherwise.
809 static int cma_ib_acquire_dev(struct rdma_id_private
*id_priv
,
810 const struct rdma_id_private
*listen_id_priv
,
811 struct cma_req_info
*req
)
813 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
814 const struct ib_gid_attr
*sgid_attr
;
815 enum ib_gid_type gid_type
;
818 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
819 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
822 if (rdma_protocol_roce(req
->device
, req
->port
))
823 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
826 memcpy(&gid
, dev_addr
->src_dev_addr
+
827 rdma_addr_gid_offset(dev_addr
), sizeof(gid
));
829 gid_type
= listen_id_priv
->cma_dev
->default_gid_type
[req
->port
- 1];
830 sgid_attr
= cma_validate_port(req
->device
, req
->port
,
831 gid_type
, &gid
, id_priv
);
832 if (IS_ERR(sgid_attr
))
833 return PTR_ERR(sgid_attr
);
835 id_priv
->id
.port_num
= req
->port
;
836 cma_bind_sgid_attr(id_priv
, sgid_attr
);
837 /* Need to acquire lock to protect against reader
838 * of cma_dev->id_list such as cma_netdev_callback() and
839 * cma_process_remove().
842 cma_attach_to_dev(id_priv
, listen_id_priv
->cma_dev
);
844 rdma_restrack_add(&id_priv
->res
);
848 static int cma_iw_acquire_dev(struct rdma_id_private
*id_priv
,
849 const struct rdma_id_private
*listen_id_priv
)
851 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
852 const struct ib_gid_attr
*sgid_attr
;
853 struct cma_device
*cma_dev
;
854 enum ib_gid_type gid_type
;
859 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
860 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
863 memcpy(&gid
, dev_addr
->src_dev_addr
+
864 rdma_addr_gid_offset(dev_addr
), sizeof(gid
));
868 cma_dev
= listen_id_priv
->cma_dev
;
869 port
= listen_id_priv
->id
.port_num
;
870 gid_type
= listen_id_priv
->gid_type
;
871 sgid_attr
= cma_validate_port(cma_dev
->device
, port
,
872 gid_type
, &gid
, id_priv
);
873 if (!IS_ERR(sgid_attr
)) {
874 id_priv
->id
.port_num
= port
;
875 cma_bind_sgid_attr(id_priv
, sgid_attr
);
880 list_for_each_entry(cma_dev
, &dev_list
, list
) {
881 rdma_for_each_port (cma_dev
->device
, port
) {
882 if (listen_id_priv
->cma_dev
== cma_dev
&&
883 listen_id_priv
->id
.port_num
== port
)
886 gid_type
= cma_dev
->default_gid_type
[port
- 1];
887 sgid_attr
= cma_validate_port(cma_dev
->device
, port
,
888 gid_type
, &gid
, id_priv
);
889 if (!IS_ERR(sgid_attr
)) {
890 id_priv
->id
.port_num
= port
;
891 cma_bind_sgid_attr(id_priv
, sgid_attr
);
900 cma_attach_to_dev(id_priv
, cma_dev
);
901 rdma_restrack_add(&id_priv
->res
);
909 * Select the source IB device and address to reach the destination IB address.
911 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
913 struct cma_device
*cma_dev
, *cur_dev
;
914 struct sockaddr_ib
*addr
;
915 union ib_gid gid
, sgid
, *dgid
;
918 enum ib_port_state port_state
;
923 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
924 dgid
= (union ib_gid
*) &addr
->sib_addr
;
925 pkey
= ntohs(addr
->sib_pkey
);
928 list_for_each_entry(cur_dev
, &dev_list
, list
) {
929 rdma_for_each_port (cur_dev
->device
, p
) {
930 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
933 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
936 if (ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
))
939 for (i
= 0; i
< cur_dev
->device
->port_data
[p
].immutable
.gid_tbl_len
;
941 ret
= rdma_query_gid(cur_dev
->device
, p
, i
,
946 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
949 id_priv
->id
.port_num
= p
;
953 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
954 dgid
->global
.subnet_prefix
) &&
955 port_state
== IB_PORT_ACTIVE
) {
958 id_priv
->id
.port_num
= p
;
968 cma_attach_to_dev(id_priv
, cma_dev
);
969 rdma_restrack_add(&id_priv
->res
);
971 addr
= (struct sockaddr_ib
*)cma_src_addr(id_priv
);
972 memcpy(&addr
->sib_addr
, &sgid
, sizeof(sgid
));
973 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
977 static void cma_id_get(struct rdma_id_private
*id_priv
)
979 refcount_inc(&id_priv
->refcount
);
982 static void cma_id_put(struct rdma_id_private
*id_priv
)
984 if (refcount_dec_and_test(&id_priv
->refcount
))
985 complete(&id_priv
->comp
);
988 static struct rdma_id_private
*
989 __rdma_create_id(struct net
*net
, rdma_cm_event_handler event_handler
,
990 void *context
, enum rdma_ucm_port_space ps
,
991 enum ib_qp_type qp_type
, const struct rdma_id_private
*parent
)
993 struct rdma_id_private
*id_priv
;
995 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
997 return ERR_PTR(-ENOMEM
);
999 id_priv
->state
= RDMA_CM_IDLE
;
1000 id_priv
->id
.context
= context
;
1001 id_priv
->id
.event_handler
= event_handler
;
1002 id_priv
->id
.ps
= ps
;
1003 id_priv
->id
.qp_type
= qp_type
;
1004 id_priv
->tos_set
= false;
1005 id_priv
->timeout_set
= false;
1006 id_priv
->min_rnr_timer_set
= false;
1007 id_priv
->gid_type
= IB_GID_TYPE_IB
;
1008 spin_lock_init(&id_priv
->lock
);
1009 mutex_init(&id_priv
->qp_mutex
);
1010 init_completion(&id_priv
->comp
);
1011 refcount_set(&id_priv
->refcount
, 1);
1012 mutex_init(&id_priv
->handler_mutex
);
1013 INIT_LIST_HEAD(&id_priv
->device_item
);
1014 INIT_LIST_HEAD(&id_priv
->id_list_entry
);
1015 INIT_LIST_HEAD(&id_priv
->listen_list
);
1016 INIT_LIST_HEAD(&id_priv
->mc_list
);
1017 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
1018 id_priv
->id
.route
.addr
.dev_addr
.net
= get_net(net
);
1019 id_priv
->seq_num
&= 0x00ffffff;
1021 rdma_restrack_new(&id_priv
->res
, RDMA_RESTRACK_CM_ID
);
1023 rdma_restrack_parent_name(&id_priv
->res
, &parent
->res
);
1029 __rdma_create_kernel_id(struct net
*net
, rdma_cm_event_handler event_handler
,
1030 void *context
, enum rdma_ucm_port_space ps
,
1031 enum ib_qp_type qp_type
, const char *caller
)
1033 struct rdma_id_private
*ret
;
1035 ret
= __rdma_create_id(net
, event_handler
, context
, ps
, qp_type
, NULL
);
1037 return ERR_CAST(ret
);
1039 rdma_restrack_set_name(&ret
->res
, caller
);
1042 EXPORT_SYMBOL(__rdma_create_kernel_id
);
1044 struct rdma_cm_id
*rdma_create_user_id(rdma_cm_event_handler event_handler
,
1046 enum rdma_ucm_port_space ps
,
1047 enum ib_qp_type qp_type
)
1049 struct rdma_id_private
*ret
;
1051 ret
= __rdma_create_id(current
->nsproxy
->net_ns
, event_handler
, context
,
1054 return ERR_CAST(ret
);
1056 rdma_restrack_set_name(&ret
->res
, NULL
);
1059 EXPORT_SYMBOL(rdma_create_user_id
);
1061 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
1063 struct ib_qp_attr qp_attr
;
1064 int qp_attr_mask
, ret
;
1066 qp_attr
.qp_state
= IB_QPS_INIT
;
1067 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1071 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
1075 qp_attr
.qp_state
= IB_QPS_RTR
;
1076 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
1080 qp_attr
.qp_state
= IB_QPS_RTS
;
1082 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
1087 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
1089 struct ib_qp_attr qp_attr
;
1090 int qp_attr_mask
, ret
;
1092 qp_attr
.qp_state
= IB_QPS_INIT
;
1093 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1097 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
1100 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
1101 struct ib_qp_init_attr
*qp_init_attr
)
1103 struct rdma_id_private
*id_priv
;
1107 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1108 if (id
->device
!= pd
->device
) {
1113 qp_init_attr
->port_num
= id
->port_num
;
1114 qp
= ib_create_qp(pd
, qp_init_attr
);
1120 if (id
->qp_type
== IB_QPT_UD
)
1121 ret
= cma_init_ud_qp(id_priv
, qp
);
1123 ret
= cma_init_conn_qp(id_priv
, qp
);
1128 id_priv
->qp_num
= qp
->qp_num
;
1129 id_priv
->srq
= (qp
->srq
!= NULL
);
1130 trace_cm_qp_create(id_priv
, pd
, qp_init_attr
, 0);
1135 trace_cm_qp_create(id_priv
, pd
, qp_init_attr
, ret
);
1138 EXPORT_SYMBOL(rdma_create_qp
);
1140 void rdma_destroy_qp(struct rdma_cm_id
*id
)
1142 struct rdma_id_private
*id_priv
;
1144 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1145 trace_cm_qp_destroy(id_priv
);
1146 mutex_lock(&id_priv
->qp_mutex
);
1147 ib_destroy_qp(id_priv
->id
.qp
);
1148 id_priv
->id
.qp
= NULL
;
1149 mutex_unlock(&id_priv
->qp_mutex
);
1151 EXPORT_SYMBOL(rdma_destroy_qp
);
1153 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
1154 struct rdma_conn_param
*conn_param
)
1156 struct ib_qp_attr qp_attr
;
1157 int qp_attr_mask
, ret
;
1159 mutex_lock(&id_priv
->qp_mutex
);
1160 if (!id_priv
->id
.qp
) {
1165 /* Need to update QP attributes from default values. */
1166 qp_attr
.qp_state
= IB_QPS_INIT
;
1167 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1171 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
1175 qp_attr
.qp_state
= IB_QPS_RTR
;
1176 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1180 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
1183 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
1184 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
1186 mutex_unlock(&id_priv
->qp_mutex
);
1190 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
1191 struct rdma_conn_param
*conn_param
)
1193 struct ib_qp_attr qp_attr
;
1194 int qp_attr_mask
, ret
;
1196 mutex_lock(&id_priv
->qp_mutex
);
1197 if (!id_priv
->id
.qp
) {
1202 qp_attr
.qp_state
= IB_QPS_RTS
;
1203 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1208 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
1209 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
1211 mutex_unlock(&id_priv
->qp_mutex
);
1215 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
1217 struct ib_qp_attr qp_attr
;
1220 mutex_lock(&id_priv
->qp_mutex
);
1221 if (!id_priv
->id
.qp
) {
1226 qp_attr
.qp_state
= IB_QPS_ERR
;
1227 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
1229 mutex_unlock(&id_priv
->qp_mutex
);
1233 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
1234 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
1236 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1240 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
1243 pkey
= ib_addr_get_pkey(dev_addr
);
1245 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
1246 pkey
, &qp_attr
->pkey_index
);
1250 qp_attr
->port_num
= id_priv
->id
.port_num
;
1251 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
1253 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
1254 ret
= cma_set_default_qkey(id_priv
);
1258 qp_attr
->qkey
= id_priv
->qkey
;
1259 *qp_attr_mask
|= IB_QP_QKEY
;
1261 qp_attr
->qp_access_flags
= 0;
1262 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
1267 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
1270 struct rdma_id_private
*id_priv
;
1273 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1274 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
1275 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
1276 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
1278 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
1281 if (qp_attr
->qp_state
== IB_QPS_RTR
)
1282 qp_attr
->rq_psn
= id_priv
->seq_num
;
1283 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
1284 if (!id_priv
->cm_id
.iw
) {
1285 qp_attr
->qp_access_flags
= 0;
1286 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
1288 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
1290 qp_attr
->port_num
= id_priv
->id
.port_num
;
1291 *qp_attr_mask
|= IB_QP_PORT
;
1296 if ((*qp_attr_mask
& IB_QP_TIMEOUT
) && id_priv
->timeout_set
)
1297 qp_attr
->timeout
= id_priv
->timeout
;
1299 if ((*qp_attr_mask
& IB_QP_MIN_RNR_TIMER
) && id_priv
->min_rnr_timer_set
)
1300 qp_attr
->min_rnr_timer
= id_priv
->min_rnr_timer
;
1304 EXPORT_SYMBOL(rdma_init_qp_attr
);
1306 static inline bool cma_zero_addr(const struct sockaddr
*addr
)
1308 switch (addr
->sa_family
) {
1310 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
1312 return ipv6_addr_any(&((struct sockaddr_in6
*)addr
)->sin6_addr
);
1314 return ib_addr_any(&((struct sockaddr_ib
*)addr
)->sib_addr
);
1320 static inline bool cma_loopback_addr(const struct sockaddr
*addr
)
1322 switch (addr
->sa_family
) {
1324 return ipv4_is_loopback(
1325 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
1327 return ipv6_addr_loopback(
1328 &((struct sockaddr_in6
*)addr
)->sin6_addr
);
1330 return ib_addr_loopback(
1331 &((struct sockaddr_ib
*)addr
)->sib_addr
);
1337 static inline bool cma_any_addr(const struct sockaddr
*addr
)
1339 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
1342 static int cma_addr_cmp(const struct sockaddr
*src
, const struct sockaddr
*dst
)
1344 if (src
->sa_family
!= dst
->sa_family
)
1347 switch (src
->sa_family
) {
1349 return ((struct sockaddr_in
*)src
)->sin_addr
.s_addr
!=
1350 ((struct sockaddr_in
*)dst
)->sin_addr
.s_addr
;
1352 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*)src
;
1353 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*)dst
;
1356 if (ipv6_addr_cmp(&src_addr6
->sin6_addr
,
1357 &dst_addr6
->sin6_addr
))
1359 link_local
= ipv6_addr_type(&dst_addr6
->sin6_addr
) &
1360 IPV6_ADDR_LINKLOCAL
;
1361 /* Link local must match their scope_ids */
1362 return link_local
? (src_addr6
->sin6_scope_id
!=
1363 dst_addr6
->sin6_scope_id
) :
1368 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
1369 &((struct sockaddr_ib
*) dst
)->sib_addr
);
1373 static __be16
cma_port(const struct sockaddr
*addr
)
1375 struct sockaddr_ib
*sib
;
1377 switch (addr
->sa_family
) {
1379 return ((struct sockaddr_in
*) addr
)->sin_port
;
1381 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
1383 sib
= (struct sockaddr_ib
*) addr
;
1384 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
1385 be64_to_cpu(sib
->sib_sid_mask
)));
1391 static inline int cma_any_port(const struct sockaddr
*addr
)
1393 return !cma_port(addr
);
1396 static void cma_save_ib_info(struct sockaddr
*src_addr
,
1397 struct sockaddr
*dst_addr
,
1398 const struct rdma_cm_id
*listen_id
,
1399 const struct sa_path_rec
*path
)
1401 struct sockaddr_ib
*listen_ib
, *ib
;
1403 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
1405 ib
= (struct sockaddr_ib
*)src_addr
;
1406 ib
->sib_family
= AF_IB
;
1408 ib
->sib_pkey
= path
->pkey
;
1409 ib
->sib_flowinfo
= path
->flow_label
;
1410 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
1411 ib
->sib_sid
= path
->service_id
;
1412 ib
->sib_scope_id
= 0;
1414 ib
->sib_pkey
= listen_ib
->sib_pkey
;
1415 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
1416 ib
->sib_addr
= listen_ib
->sib_addr
;
1417 ib
->sib_sid
= listen_ib
->sib_sid
;
1418 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
1420 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
1423 ib
= (struct sockaddr_ib
*)dst_addr
;
1424 ib
->sib_family
= AF_IB
;
1426 ib
->sib_pkey
= path
->pkey
;
1427 ib
->sib_flowinfo
= path
->flow_label
;
1428 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
1433 static void cma_save_ip4_info(struct sockaddr_in
*src_addr
,
1434 struct sockaddr_in
*dst_addr
,
1435 struct cma_hdr
*hdr
,
1439 *src_addr
= (struct sockaddr_in
) {
1440 .sin_family
= AF_INET
,
1441 .sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
,
1442 .sin_port
= local_port
,
1447 *dst_addr
= (struct sockaddr_in
) {
1448 .sin_family
= AF_INET
,
1449 .sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
,
1450 .sin_port
= hdr
->port
,
1455 static void cma_save_ip6_info(struct sockaddr_in6
*src_addr
,
1456 struct sockaddr_in6
*dst_addr
,
1457 struct cma_hdr
*hdr
,
1461 *src_addr
= (struct sockaddr_in6
) {
1462 .sin6_family
= AF_INET6
,
1463 .sin6_addr
= hdr
->dst_addr
.ip6
,
1464 .sin6_port
= local_port
,
1469 *dst_addr
= (struct sockaddr_in6
) {
1470 .sin6_family
= AF_INET6
,
1471 .sin6_addr
= hdr
->src_addr
.ip6
,
1472 .sin6_port
= hdr
->port
,
1477 static u16
cma_port_from_service_id(__be64 service_id
)
1479 return (u16
)be64_to_cpu(service_id
);
1482 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1483 struct sockaddr
*dst_addr
,
1484 const struct ib_cm_event
*ib_event
,
1487 struct cma_hdr
*hdr
;
1490 hdr
= ib_event
->private_data
;
1491 if (hdr
->cma_version
!= CMA_VERSION
)
1494 port
= htons(cma_port_from_service_id(service_id
));
1496 switch (cma_get_ip_ver(hdr
)) {
1498 cma_save_ip4_info((struct sockaddr_in
*)src_addr
,
1499 (struct sockaddr_in
*)dst_addr
, hdr
, port
);
1502 cma_save_ip6_info((struct sockaddr_in6
*)src_addr
,
1503 (struct sockaddr_in6
*)dst_addr
, hdr
, port
);
1506 return -EAFNOSUPPORT
;
1512 static int cma_save_net_info(struct sockaddr
*src_addr
,
1513 struct sockaddr
*dst_addr
,
1514 const struct rdma_cm_id
*listen_id
,
1515 const struct ib_cm_event
*ib_event
,
1516 sa_family_t sa_family
, __be64 service_id
)
1518 if (sa_family
== AF_IB
) {
1519 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1520 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1521 ib_event
->param
.req_rcvd
.primary_path
);
1522 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1523 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1527 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1530 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1531 struct cma_req_info
*req
)
1533 const struct ib_cm_req_event_param
*req_param
=
1534 &ib_event
->param
.req_rcvd
;
1535 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1536 &ib_event
->param
.sidr_req_rcvd
;
1538 switch (ib_event
->event
) {
1539 case IB_CM_REQ_RECEIVED
:
1540 req
->device
= req_param
->listen_id
->device
;
1541 req
->port
= req_param
->port
;
1542 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1543 sizeof(req
->local_gid
));
1544 req
->has_gid
= true;
1545 req
->service_id
= req_param
->primary_path
->service_id
;
1546 req
->pkey
= be16_to_cpu(req_param
->primary_path
->pkey
);
1547 if (req
->pkey
!= req_param
->bth_pkey
)
1548 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1549 "RDMA CMA: in the future this may cause the request to be dropped\n",
1550 req_param
->bth_pkey
, req
->pkey
);
1552 case IB_CM_SIDR_REQ_RECEIVED
:
1553 req
->device
= sidr_param
->listen_id
->device
;
1554 req
->port
= sidr_param
->port
;
1555 req
->has_gid
= false;
1556 req
->service_id
= sidr_param
->service_id
;
1557 req
->pkey
= sidr_param
->pkey
;
1558 if (req
->pkey
!= sidr_param
->bth_pkey
)
1559 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1560 "RDMA CMA: in the future this may cause the request to be dropped\n",
1561 sidr_param
->bth_pkey
, req
->pkey
);
1570 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1571 const struct sockaddr_in
*dst_addr
,
1572 const struct sockaddr_in
*src_addr
)
1574 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1575 saddr
= src_addr
->sin_addr
.s_addr
;
1576 struct fib_result res
;
1581 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1582 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1583 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1584 ipv4_is_loopback(saddr
))
1587 memset(&fl4
, 0, sizeof(fl4
));
1588 fl4
.flowi4_oif
= net_dev
->ifindex
;
1593 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1594 ret
= err
== 0 && FIB_RES_DEV(res
) == net_dev
;
1600 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1601 const struct sockaddr_in6
*dst_addr
,
1602 const struct sockaddr_in6
*src_addr
)
1604 #if IS_ENABLED(CONFIG_IPV6)
1605 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1606 IPV6_ADDR_LINKLOCAL
;
1607 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1608 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1615 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1624 static bool validate_net_dev(struct net_device
*net_dev
,
1625 const struct sockaddr
*daddr
,
1626 const struct sockaddr
*saddr
)
1628 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1629 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1630 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1631 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1633 switch (daddr
->sa_family
) {
1635 return saddr
->sa_family
== AF_INET
&&
1636 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1639 return saddr
->sa_family
== AF_INET6
&&
1640 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1647 static struct net_device
*
1648 roce_get_net_dev_by_cm_event(const struct ib_cm_event
*ib_event
)
1650 const struct ib_gid_attr
*sgid_attr
= NULL
;
1651 struct net_device
*ndev
;
1653 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1654 sgid_attr
= ib_event
->param
.req_rcvd
.ppath_sgid_attr
;
1655 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1656 sgid_attr
= ib_event
->param
.sidr_req_rcvd
.sgid_attr
;
1662 ndev
= rdma_read_gid_attr_ndev_rcu(sgid_attr
);
1671 static struct net_device
*cma_get_net_dev(const struct ib_cm_event
*ib_event
,
1672 struct cma_req_info
*req
)
1674 struct sockaddr
*listen_addr
=
1675 (struct sockaddr
*)&req
->listen_addr_storage
;
1676 struct sockaddr
*src_addr
= (struct sockaddr
*)&req
->src_addr_storage
;
1677 struct net_device
*net_dev
;
1678 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1681 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1684 return ERR_PTR(err
);
1686 if (rdma_protocol_roce(req
->device
, req
->port
))
1687 net_dev
= roce_get_net_dev_by_cm_event(ib_event
);
1689 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
,
1693 return ERR_PTR(-ENODEV
);
1698 static enum rdma_ucm_port_space
rdma_ps_from_service_id(__be64 service_id
)
1700 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1703 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1704 const struct cma_hdr
*hdr
)
1706 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1708 struct in6_addr ip6_addr
;
1710 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1713 switch (addr
->sa_family
) {
1715 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1716 if (cma_get_ip_ver(hdr
) != 4)
1718 if (!cma_any_addr(addr
) &&
1719 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1723 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1724 if (cma_get_ip_ver(hdr
) != 6)
1726 if (!cma_any_addr(addr
) &&
1727 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1739 static bool cma_protocol_roce(const struct rdma_cm_id
*id
)
1741 struct ib_device
*device
= id
->device
;
1742 const u32 port_num
= id
->port_num
?: rdma_start_port(device
);
1744 return rdma_protocol_roce(device
, port_num
);
1747 static bool cma_is_req_ipv6_ll(const struct cma_req_info
*req
)
1749 const struct sockaddr
*daddr
=
1750 (const struct sockaddr
*)&req
->listen_addr_storage
;
1751 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1753 /* Returns true if the req is for IPv6 link local */
1754 return (daddr
->sa_family
== AF_INET6
&&
1755 (ipv6_addr_type(&daddr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
));
1758 static bool cma_match_net_dev(const struct rdma_cm_id
*id
,
1759 const struct net_device
*net_dev
,
1760 const struct cma_req_info
*req
)
1762 const struct rdma_addr
*addr
= &id
->route
.addr
;
1765 /* This request is an AF_IB request */
1766 return (!id
->port_num
|| id
->port_num
== req
->port
) &&
1767 (addr
->src_addr
.ss_family
== AF_IB
);
1770 * If the request is not for IPv6 link local, allow matching
1771 * request to any netdevice of the one or multiport rdma device.
1773 if (!cma_is_req_ipv6_ll(req
))
1776 * Net namespaces must match, and if the listner is listening
1777 * on a specific netdevice than netdevice must match as well.
1779 if (net_eq(dev_net(net_dev
), addr
->dev_addr
.net
) &&
1780 (!!addr
->dev_addr
.bound_dev_if
==
1781 (addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
)))
1787 static struct rdma_id_private
*cma_find_listener(
1788 const struct rdma_bind_list
*bind_list
,
1789 const struct ib_cm_id
*cm_id
,
1790 const struct ib_cm_event
*ib_event
,
1791 const struct cma_req_info
*req
,
1792 const struct net_device
*net_dev
)
1794 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1796 lockdep_assert_held(&lock
);
1799 return ERR_PTR(-EINVAL
);
1801 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1802 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1803 if (id_priv
->id
.device
== cm_id
->device
&&
1804 cma_match_net_dev(&id_priv
->id
, net_dev
, req
))
1806 list_for_each_entry(id_priv_dev
,
1807 &id_priv
->listen_list
,
1809 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1810 cma_match_net_dev(&id_priv_dev
->id
,
1817 return ERR_PTR(-EINVAL
);
1820 static struct rdma_id_private
*
1821 cma_ib_id_from_event(struct ib_cm_id
*cm_id
,
1822 const struct ib_cm_event
*ib_event
,
1823 struct cma_req_info
*req
,
1824 struct net_device
**net_dev
)
1826 struct rdma_bind_list
*bind_list
;
1827 struct rdma_id_private
*id_priv
;
1830 err
= cma_save_req_info(ib_event
, req
);
1832 return ERR_PTR(err
);
1834 *net_dev
= cma_get_net_dev(ib_event
, req
);
1835 if (IS_ERR(*net_dev
)) {
1836 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1837 /* Assuming the protocol is AF_IB */
1840 return ERR_CAST(*net_dev
);
1846 * Net namespace might be getting deleted while route lookup,
1847 * cm_id lookup is in progress. Therefore, perform netdevice
1848 * validation, cm_id lookup under rcu lock.
1849 * RCU lock along with netdevice state check, synchronizes with
1850 * netdevice migrating to different net namespace and also avoids
1851 * case where net namespace doesn't get deleted while lookup is in
1853 * If the device state is not IFF_UP, its properties such as ifindex
1854 * and nd_net cannot be trusted to remain valid without rcu lock.
1855 * net/core/dev.c change_net_namespace() ensures to synchronize with
1856 * ongoing operations on net device after device is closed using
1857 * synchronize_net().
1862 * If netdevice is down, it is likely that it is administratively
1863 * down or it might be migrating to different namespace.
1864 * In that case avoid further processing, as the net namespace
1865 * or ifindex may change.
1867 if (((*net_dev
)->flags
& IFF_UP
) == 0) {
1868 id_priv
= ERR_PTR(-EHOSTUNREACH
);
1872 if (!validate_net_dev(*net_dev
,
1873 (struct sockaddr
*)&req
->src_addr_storage
,
1874 (struct sockaddr
*)&req
->listen_addr_storage
)) {
1875 id_priv
= ERR_PTR(-EHOSTUNREACH
);
1880 bind_list
= cma_ps_find(*net_dev
? dev_net(*net_dev
) : &init_net
,
1881 rdma_ps_from_service_id(req
->service_id
),
1882 cma_port_from_service_id(req
->service_id
));
1883 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, req
, *net_dev
);
1886 mutex_unlock(&lock
);
1887 if (IS_ERR(id_priv
) && *net_dev
) {
1894 static inline u8
cma_user_data_offset(struct rdma_id_private
*id_priv
)
1896 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1899 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1901 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1903 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1907 static void _cma_cancel_listens(struct rdma_id_private
*id_priv
)
1909 struct rdma_id_private
*dev_id_priv
;
1911 lockdep_assert_held(&lock
);
1914 * Remove from listen_any_list to prevent added devices from spawning
1915 * additional listen requests.
1917 list_del_init(&id_priv
->listen_any_item
);
1919 while (!list_empty(&id_priv
->listen_list
)) {
1921 list_first_entry(&id_priv
->listen_list
,
1922 struct rdma_id_private
, listen_item
);
1923 /* sync with device removal to avoid duplicate destruction */
1924 list_del_init(&dev_id_priv
->device_item
);
1925 list_del_init(&dev_id_priv
->listen_item
);
1926 mutex_unlock(&lock
);
1928 rdma_destroy_id(&dev_id_priv
->id
);
1933 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1936 _cma_cancel_listens(id_priv
);
1937 mutex_unlock(&lock
);
1940 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1941 enum rdma_cm_state state
)
1944 case RDMA_CM_ADDR_QUERY
:
1946 * We can avoid doing the rdma_addr_cancel() based on state,
1947 * only RDMA_CM_ADDR_QUERY has a work that could still execute.
1948 * Notice that the addr_handler work could still be exiting
1949 * outside this state, however due to the interaction with the
1950 * handler_mutex the work is guaranteed not to touch id_priv
1953 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1955 case RDMA_CM_ROUTE_QUERY
:
1956 cma_cancel_route(id_priv
);
1958 case RDMA_CM_LISTEN
:
1959 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1960 cma_cancel_listens(id_priv
);
1967 static void cma_release_port(struct rdma_id_private
*id_priv
)
1969 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1970 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
1976 hlist_del(&id_priv
->node
);
1977 if (hlist_empty(&bind_list
->owners
)) {
1978 cma_ps_remove(net
, bind_list
->ps
, bind_list
->port
);
1981 mutex_unlock(&lock
);
1984 static void destroy_mc(struct rdma_id_private
*id_priv
,
1985 struct cma_multicast
*mc
)
1987 bool send_only
= mc
->join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
);
1989 if (rdma_cap_ib_mcast(id_priv
->id
.device
, id_priv
->id
.port_num
))
1990 ib_sa_free_multicast(mc
->sa_mc
);
1992 if (rdma_protocol_roce(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1993 struct rdma_dev_addr
*dev_addr
=
1994 &id_priv
->id
.route
.addr
.dev_addr
;
1995 struct net_device
*ndev
= NULL
;
1997 if (dev_addr
->bound_dev_if
)
1998 ndev
= dev_get_by_index(dev_addr
->net
,
1999 dev_addr
->bound_dev_if
);
2000 if (ndev
&& !send_only
) {
2001 enum ib_gid_type gid_type
;
2004 gid_type
= id_priv
->cma_dev
->default_gid_type
2005 [id_priv
->id
.port_num
-
2007 id_priv
->cma_dev
->device
)];
2008 cma_iboe_set_mgid((struct sockaddr
*)&mc
->addr
, &mgid
,
2010 cma_igmp_send(ndev
, &mgid
, false);
2014 cancel_work_sync(&mc
->iboe_join
.work
);
2019 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
2021 struct cma_multicast
*mc
;
2023 while (!list_empty(&id_priv
->mc_list
)) {
2024 mc
= list_first_entry(&id_priv
->mc_list
, struct cma_multicast
,
2026 list_del(&mc
->list
);
2027 destroy_mc(id_priv
, mc
);
2031 static void _destroy_id(struct rdma_id_private
*id_priv
,
2032 enum rdma_cm_state state
)
2034 cma_cancel_operation(id_priv
, state
);
2036 rdma_restrack_del(&id_priv
->res
);
2037 cma_remove_id_from_tree(id_priv
);
2038 if (id_priv
->cma_dev
) {
2039 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
2040 if (id_priv
->cm_id
.ib
)
2041 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2042 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
2043 if (id_priv
->cm_id
.iw
)
2044 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2046 cma_leave_mc_groups(id_priv
);
2047 cma_release_dev(id_priv
);
2050 cma_release_port(id_priv
);
2051 cma_id_put(id_priv
);
2052 wait_for_completion(&id_priv
->comp
);
2054 if (id_priv
->internal_id
)
2055 cma_id_put(id_priv
->id
.context
);
2057 kfree(id_priv
->id
.route
.path_rec
);
2058 kfree(id_priv
->id
.route
.path_rec_inbound
);
2059 kfree(id_priv
->id
.route
.path_rec_outbound
);
2061 put_net(id_priv
->id
.route
.addr
.dev_addr
.net
);
2066 * destroy an ID from within the handler_mutex. This ensures that no other
2067 * handlers can start running concurrently.
2069 static void destroy_id_handler_unlock(struct rdma_id_private
*id_priv
)
2070 __releases(&idprv
->handler_mutex
)
2072 enum rdma_cm_state state
;
2073 unsigned long flags
;
2075 trace_cm_id_destroy(id_priv
);
2078 * Setting the state to destroyed under the handler mutex provides a
2079 * fence against calling handler callbacks. If this is invoked due to
2080 * the failure of a handler callback then it guarentees that no future
2081 * handlers will be called.
2083 lockdep_assert_held(&id_priv
->handler_mutex
);
2084 spin_lock_irqsave(&id_priv
->lock
, flags
);
2085 state
= id_priv
->state
;
2086 id_priv
->state
= RDMA_CM_DESTROYING
;
2087 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2088 mutex_unlock(&id_priv
->handler_mutex
);
2089 _destroy_id(id_priv
, state
);
2092 void rdma_destroy_id(struct rdma_cm_id
*id
)
2094 struct rdma_id_private
*id_priv
=
2095 container_of(id
, struct rdma_id_private
, id
);
2097 mutex_lock(&id_priv
->handler_mutex
);
2098 destroy_id_handler_unlock(id_priv
);
2100 EXPORT_SYMBOL(rdma_destroy_id
);
2102 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
2106 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
2110 ret
= cma_modify_qp_rts(id_priv
, NULL
);
2114 trace_cm_send_rtu(id_priv
);
2115 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
2121 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret
);
2122 cma_modify_qp_err(id_priv
);
2123 trace_cm_send_rej(id_priv
);
2124 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
2129 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
2130 const struct ib_cm_rep_event_param
*rep_data
,
2133 event
->param
.conn
.private_data
= private_data
;
2134 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
2135 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
2136 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
2137 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
2138 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
2139 event
->param
.conn
.srq
= rep_data
->srq
;
2140 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
2142 event
->ece
.vendor_id
= rep_data
->ece
.vendor_id
;
2143 event
->ece
.attr_mod
= rep_data
->ece
.attr_mod
;
2146 static int cma_cm_event_handler(struct rdma_id_private
*id_priv
,
2147 struct rdma_cm_event
*event
)
2151 lockdep_assert_held(&id_priv
->handler_mutex
);
2153 trace_cm_event_handler(id_priv
, event
);
2154 ret
= id_priv
->id
.event_handler(&id_priv
->id
, event
);
2155 trace_cm_event_done(id_priv
, event
, ret
);
2159 static int cma_ib_handler(struct ib_cm_id
*cm_id
,
2160 const struct ib_cm_event
*ib_event
)
2162 struct rdma_id_private
*id_priv
= cm_id
->context
;
2163 struct rdma_cm_event event
= {};
2164 enum rdma_cm_state state
;
2167 mutex_lock(&id_priv
->handler_mutex
);
2168 state
= READ_ONCE(id_priv
->state
);
2169 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
2170 state
!= RDMA_CM_CONNECT
) ||
2171 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
2172 state
!= RDMA_CM_DISCONNECT
))
2175 switch (ib_event
->event
) {
2176 case IB_CM_REQ_ERROR
:
2177 case IB_CM_REP_ERROR
:
2178 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2179 event
.status
= -ETIMEDOUT
;
2181 case IB_CM_REP_RECEIVED
:
2182 if (state
== RDMA_CM_CONNECT
&&
2183 (id_priv
->id
.qp_type
!= IB_QPT_UD
)) {
2184 trace_cm_send_mra(id_priv
);
2185 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
2187 if (id_priv
->id
.qp
) {
2188 event
.status
= cma_rep_recv(id_priv
);
2189 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
2190 RDMA_CM_EVENT_ESTABLISHED
;
2192 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
2194 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
2195 ib_event
->private_data
);
2197 case IB_CM_RTU_RECEIVED
:
2198 case IB_CM_USER_ESTABLISHED
:
2199 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2201 case IB_CM_DREQ_ERROR
:
2202 event
.status
= -ETIMEDOUT
;
2204 case IB_CM_DREQ_RECEIVED
:
2205 case IB_CM_DREP_RECEIVED
:
2206 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
2207 RDMA_CM_DISCONNECT
))
2209 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
2211 case IB_CM_TIMEWAIT_EXIT
:
2212 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
2214 case IB_CM_MRA_RECEIVED
:
2217 case IB_CM_REJ_RECEIVED
:
2218 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv
->id
,
2219 ib_event
->param
.rej_rcvd
.reason
));
2220 cma_modify_qp_err(id_priv
);
2221 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
2222 event
.event
= RDMA_CM_EVENT_REJECTED
;
2223 event
.param
.conn
.private_data
= ib_event
->private_data
;
2224 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
2227 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2232 ret
= cma_cm_event_handler(id_priv
, &event
);
2234 /* Destroy the CM ID by returning a non-zero value. */
2235 id_priv
->cm_id
.ib
= NULL
;
2236 destroy_id_handler_unlock(id_priv
);
2240 mutex_unlock(&id_priv
->handler_mutex
);
2244 static struct rdma_id_private
*
2245 cma_ib_new_conn_id(const struct rdma_cm_id
*listen_id
,
2246 const struct ib_cm_event
*ib_event
,
2247 struct net_device
*net_dev
)
2249 struct rdma_id_private
*listen_id_priv
;
2250 struct rdma_id_private
*id_priv
;
2251 struct rdma_cm_id
*id
;
2252 struct rdma_route
*rt
;
2253 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
2254 struct sa_path_rec
*path
= ib_event
->param
.req_rcvd
.primary_path
;
2255 const __be64 service_id
=
2256 ib_event
->param
.req_rcvd
.primary_path
->service_id
;
2259 listen_id_priv
= container_of(listen_id
, struct rdma_id_private
, id
);
2260 id_priv
= __rdma_create_id(listen_id
->route
.addr
.dev_addr
.net
,
2261 listen_id
->event_handler
, listen_id
->context
,
2263 ib_event
->param
.req_rcvd
.qp_type
,
2265 if (IS_ERR(id_priv
))
2269 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
2270 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
2271 listen_id
, ib_event
, ss_family
, service_id
))
2275 rt
->num_pri_alt_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
2276 rt
->path_rec
= kmalloc_array(rt
->num_pri_alt_paths
,
2277 sizeof(*rt
->path_rec
), GFP_KERNEL
);
2281 rt
->path_rec
[0] = *path
;
2282 if (rt
->num_pri_alt_paths
== 2)
2283 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
2286 rdma_copy_src_l2_addr(&rt
->addr
.dev_addr
, net_dev
);
2288 if (!cma_protocol_roce(listen_id
) &&
2289 cma_any_addr(cma_src_addr(id_priv
))) {
2290 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
2291 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
2292 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
2293 } else if (!cma_any_addr(cma_src_addr(id_priv
))) {
2294 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
2299 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
2301 id_priv
->state
= RDMA_CM_CONNECT
;
2305 rdma_destroy_id(id
);
2309 static struct rdma_id_private
*
2310 cma_ib_new_udp_id(const struct rdma_cm_id
*listen_id
,
2311 const struct ib_cm_event
*ib_event
,
2312 struct net_device
*net_dev
)
2314 const struct rdma_id_private
*listen_id_priv
;
2315 struct rdma_id_private
*id_priv
;
2316 struct rdma_cm_id
*id
;
2317 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
2318 struct net
*net
= listen_id
->route
.addr
.dev_addr
.net
;
2321 listen_id_priv
= container_of(listen_id
, struct rdma_id_private
, id
);
2322 id_priv
= __rdma_create_id(net
, listen_id
->event_handler
,
2323 listen_id
->context
, listen_id
->ps
, IB_QPT_UD
,
2325 if (IS_ERR(id_priv
))
2329 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
2330 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
2331 listen_id
, ib_event
, ss_family
,
2332 ib_event
->param
.sidr_req_rcvd
.service_id
))
2336 rdma_copy_src_l2_addr(&id
->route
.addr
.dev_addr
, net_dev
);
2338 if (!cma_any_addr(cma_src_addr(id_priv
))) {
2339 ret
= cma_translate_addr(cma_src_addr(id_priv
),
2340 &id
->route
.addr
.dev_addr
);
2346 id_priv
->state
= RDMA_CM_CONNECT
;
2349 rdma_destroy_id(id
);
2353 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
2354 const struct ib_cm_req_event_param
*req_data
,
2355 void *private_data
, int offset
)
2357 event
->param
.conn
.private_data
= private_data
+ offset
;
2358 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
2359 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
2360 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
2361 event
->param
.conn
.flow_control
= req_data
->flow_control
;
2362 event
->param
.conn
.retry_count
= req_data
->retry_count
;
2363 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
2364 event
->param
.conn
.srq
= req_data
->srq
;
2365 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
2367 event
->ece
.vendor_id
= req_data
->ece
.vendor_id
;
2368 event
->ece
.attr_mod
= req_data
->ece
.attr_mod
;
2371 static int cma_ib_check_req_qp_type(const struct rdma_cm_id
*id
,
2372 const struct ib_cm_event
*ib_event
)
2374 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
2375 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
2376 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
2377 (id
->qp_type
== IB_QPT_UD
)) ||
2381 static int cma_ib_req_handler(struct ib_cm_id
*cm_id
,
2382 const struct ib_cm_event
*ib_event
)
2384 struct rdma_id_private
*listen_id
, *conn_id
= NULL
;
2385 struct rdma_cm_event event
= {};
2386 struct cma_req_info req
= {};
2387 struct net_device
*net_dev
;
2391 listen_id
= cma_ib_id_from_event(cm_id
, ib_event
, &req
, &net_dev
);
2392 if (IS_ERR(listen_id
))
2393 return PTR_ERR(listen_id
);
2395 trace_cm_req_handler(listen_id
, ib_event
->event
);
2396 if (!cma_ib_check_req_qp_type(&listen_id
->id
, ib_event
)) {
2401 mutex_lock(&listen_id
->handler_mutex
);
2402 if (READ_ONCE(listen_id
->state
) != RDMA_CM_LISTEN
) {
2403 ret
= -ECONNABORTED
;
2407 offset
= cma_user_data_offset(listen_id
);
2408 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2409 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
2410 conn_id
= cma_ib_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
2411 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
2412 event
.param
.ud
.private_data_len
=
2413 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
2415 conn_id
= cma_ib_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
2416 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
2417 ib_event
->private_data
, offset
);
2424 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2425 ret
= cma_ib_acquire_dev(conn_id
, listen_id
, &req
);
2427 destroy_id_handler_unlock(conn_id
);
2431 conn_id
->cm_id
.ib
= cm_id
;
2432 cm_id
->context
= conn_id
;
2433 cm_id
->cm_handler
= cma_ib_handler
;
2435 ret
= cma_cm_event_handler(conn_id
, &event
);
2437 /* Destroy the CM ID by returning a non-zero value. */
2438 conn_id
->cm_id
.ib
= NULL
;
2439 mutex_unlock(&listen_id
->handler_mutex
);
2440 destroy_id_handler_unlock(conn_id
);
2444 if (READ_ONCE(conn_id
->state
) == RDMA_CM_CONNECT
&&
2445 conn_id
->id
.qp_type
!= IB_QPT_UD
) {
2446 trace_cm_send_mra(cm_id
->context
);
2447 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
2449 mutex_unlock(&conn_id
->handler_mutex
);
2452 mutex_unlock(&listen_id
->handler_mutex
);
2460 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2462 if (addr
->sa_family
== AF_IB
)
2463 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
2465 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
2467 EXPORT_SYMBOL(rdma_get_service_id
);
2469 void rdma_read_gids(struct rdma_cm_id
*cm_id
, union ib_gid
*sgid
,
2472 struct rdma_addr
*addr
= &cm_id
->route
.addr
;
2474 if (!cm_id
->device
) {
2476 memset(sgid
, 0, sizeof(*sgid
));
2478 memset(dgid
, 0, sizeof(*dgid
));
2482 if (rdma_protocol_roce(cm_id
->device
, cm_id
->port_num
)) {
2484 rdma_ip2gid((struct sockaddr
*)&addr
->src_addr
, sgid
);
2486 rdma_ip2gid((struct sockaddr
*)&addr
->dst_addr
, dgid
);
2489 rdma_addr_get_sgid(&addr
->dev_addr
, sgid
);
2491 rdma_addr_get_dgid(&addr
->dev_addr
, dgid
);
2494 EXPORT_SYMBOL(rdma_read_gids
);
2496 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
2498 struct rdma_id_private
*id_priv
= iw_id
->context
;
2499 struct rdma_cm_event event
= {};
2501 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2502 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2504 mutex_lock(&id_priv
->handler_mutex
);
2505 if (READ_ONCE(id_priv
->state
) != RDMA_CM_CONNECT
)
2508 switch (iw_event
->event
) {
2509 case IW_CM_EVENT_CLOSE
:
2510 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
2512 case IW_CM_EVENT_CONNECT_REPLY
:
2513 memcpy(cma_src_addr(id_priv
), laddr
,
2514 rdma_addr_size(laddr
));
2515 memcpy(cma_dst_addr(id_priv
), raddr
,
2516 rdma_addr_size(raddr
));
2517 switch (iw_event
->status
) {
2519 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2520 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2521 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2525 event
.event
= RDMA_CM_EVENT_REJECTED
;
2528 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2531 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
2535 case IW_CM_EVENT_ESTABLISHED
:
2536 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2537 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2538 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2544 event
.status
= iw_event
->status
;
2545 event
.param
.conn
.private_data
= iw_event
->private_data
;
2546 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2547 ret
= cma_cm_event_handler(id_priv
, &event
);
2549 /* Destroy the CM ID by returning a non-zero value. */
2550 id_priv
->cm_id
.iw
= NULL
;
2551 destroy_id_handler_unlock(id_priv
);
2556 mutex_unlock(&id_priv
->handler_mutex
);
2560 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
2561 struct iw_cm_event
*iw_event
)
2563 struct rdma_id_private
*listen_id
, *conn_id
;
2564 struct rdma_cm_event event
= {};
2565 int ret
= -ECONNABORTED
;
2566 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2567 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2569 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2570 event
.param
.conn
.private_data
= iw_event
->private_data
;
2571 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2572 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2573 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2575 listen_id
= cm_id
->context
;
2577 mutex_lock(&listen_id
->handler_mutex
);
2578 if (READ_ONCE(listen_id
->state
) != RDMA_CM_LISTEN
)
2581 /* Create a new RDMA id for the new IW CM ID */
2582 conn_id
= __rdma_create_id(listen_id
->id
.route
.addr
.dev_addr
.net
,
2583 listen_id
->id
.event_handler
,
2584 listen_id
->id
.context
, RDMA_PS_TCP
,
2585 IB_QPT_RC
, listen_id
);
2586 if (IS_ERR(conn_id
)) {
2590 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2591 conn_id
->state
= RDMA_CM_CONNECT
;
2593 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
);
2595 mutex_unlock(&listen_id
->handler_mutex
);
2596 destroy_id_handler_unlock(conn_id
);
2600 ret
= cma_iw_acquire_dev(conn_id
, listen_id
);
2602 mutex_unlock(&listen_id
->handler_mutex
);
2603 destroy_id_handler_unlock(conn_id
);
2607 conn_id
->cm_id
.iw
= cm_id
;
2608 cm_id
->context
= conn_id
;
2609 cm_id
->cm_handler
= cma_iw_handler
;
2611 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
2612 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
2614 ret
= cma_cm_event_handler(conn_id
, &event
);
2616 /* User wants to destroy the CM ID */
2617 conn_id
->cm_id
.iw
= NULL
;
2618 mutex_unlock(&listen_id
->handler_mutex
);
2619 destroy_id_handler_unlock(conn_id
);
2623 mutex_unlock(&conn_id
->handler_mutex
);
2626 mutex_unlock(&listen_id
->handler_mutex
);
2630 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
2632 struct sockaddr
*addr
;
2633 struct ib_cm_id
*id
;
2636 addr
= cma_src_addr(id_priv
);
2637 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
2638 id
= ib_cm_insert_listen(id_priv
->id
.device
,
2639 cma_ib_req_handler
, svc_id
);
2642 id_priv
->cm_id
.ib
= id
;
2647 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
2650 struct iw_cm_id
*id
;
2652 id
= iw_create_cm_id(id_priv
->id
.device
,
2653 iw_conn_req_handler
,
2658 mutex_lock(&id_priv
->qp_mutex
);
2659 id
->tos
= id_priv
->tos
;
2660 id
->tos_set
= id_priv
->tos_set
;
2661 mutex_unlock(&id_priv
->qp_mutex
);
2662 id
->afonly
= id_priv
->afonly
;
2663 id_priv
->cm_id
.iw
= id
;
2665 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
2666 rdma_addr_size(cma_src_addr(id_priv
)));
2668 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
2671 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2672 id_priv
->cm_id
.iw
= NULL
;
2678 static int cma_listen_handler(struct rdma_cm_id
*id
,
2679 struct rdma_cm_event
*event
)
2681 struct rdma_id_private
*id_priv
= id
->context
;
2683 /* Listening IDs are always destroyed on removal */
2684 if (event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
)
2687 id
->context
= id_priv
->id
.context
;
2688 id
->event_handler
= id_priv
->id
.event_handler
;
2689 trace_cm_event_handler(id_priv
, event
);
2690 return id_priv
->id
.event_handler(id
, event
);
2693 static int cma_listen_on_dev(struct rdma_id_private
*id_priv
,
2694 struct cma_device
*cma_dev
,
2695 struct rdma_id_private
**to_destroy
)
2697 struct rdma_id_private
*dev_id_priv
;
2698 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2701 lockdep_assert_held(&lock
);
2704 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
2708 __rdma_create_id(net
, cma_listen_handler
, id_priv
,
2709 id_priv
->id
.ps
, id_priv
->id
.qp_type
, id_priv
);
2710 if (IS_ERR(dev_id_priv
))
2711 return PTR_ERR(dev_id_priv
);
2713 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2714 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2715 rdma_addr_size(cma_src_addr(id_priv
)));
2717 _cma_attach_to_dev(dev_id_priv
, cma_dev
);
2718 rdma_restrack_add(&dev_id_priv
->res
);
2719 cma_id_get(id_priv
);
2720 dev_id_priv
->internal_id
= 1;
2721 dev_id_priv
->afonly
= id_priv
->afonly
;
2722 mutex_lock(&id_priv
->qp_mutex
);
2723 dev_id_priv
->tos_set
= id_priv
->tos_set
;
2724 dev_id_priv
->tos
= id_priv
->tos
;
2725 mutex_unlock(&id_priv
->qp_mutex
);
2727 ret
= rdma_listen(&dev_id_priv
->id
, id_priv
->backlog
);
2730 list_add_tail(&dev_id_priv
->listen_item
, &id_priv
->listen_list
);
2733 /* Caller must destroy this after releasing lock */
2734 *to_destroy
= dev_id_priv
;
2735 dev_warn(&cma_dev
->device
->dev
, "RDMA CMA: %s, error %d\n", __func__
, ret
);
2739 static int cma_listen_on_all(struct rdma_id_private
*id_priv
)
2741 struct rdma_id_private
*to_destroy
;
2742 struct cma_device
*cma_dev
;
2746 list_add_tail(&id_priv
->listen_any_item
, &listen_any_list
);
2747 list_for_each_entry(cma_dev
, &dev_list
, list
) {
2748 ret
= cma_listen_on_dev(id_priv
, cma_dev
, &to_destroy
);
2750 /* Prevent racing with cma_process_remove() */
2752 list_del_init(&to_destroy
->device_item
);
2756 mutex_unlock(&lock
);
2760 _cma_cancel_listens(id_priv
);
2761 mutex_unlock(&lock
);
2763 rdma_destroy_id(&to_destroy
->id
);
2767 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2769 struct rdma_id_private
*id_priv
;
2771 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2772 mutex_lock(&id_priv
->qp_mutex
);
2773 id_priv
->tos
= (u8
) tos
;
2774 id_priv
->tos_set
= true;
2775 mutex_unlock(&id_priv
->qp_mutex
);
2777 EXPORT_SYMBOL(rdma_set_service_type
);
2780 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2781 * with a connection identifier.
2782 * @id: Communication identifier to associated with service type.
2783 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2785 * This function should be called before rdma_connect() on active side,
2786 * and on passive side before rdma_accept(). It is applicable to primary
2787 * path only. The timeout will affect the local side of the QP, it is not
2788 * negotiated with remote side and zero disables the timer. In case it is
2789 * set before rdma_resolve_route, the value will also be used to determine
2790 * PacketLifeTime for RoCE.
2792 * Return: 0 for success
2794 int rdma_set_ack_timeout(struct rdma_cm_id
*id
, u8 timeout
)
2796 struct rdma_id_private
*id_priv
;
2798 if (id
->qp_type
!= IB_QPT_RC
&& id
->qp_type
!= IB_QPT_XRC_INI
)
2801 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2802 mutex_lock(&id_priv
->qp_mutex
);
2803 id_priv
->timeout
= timeout
;
2804 id_priv
->timeout_set
= true;
2805 mutex_unlock(&id_priv
->qp_mutex
);
2809 EXPORT_SYMBOL(rdma_set_ack_timeout
);
2812 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2813 * QP associated with a connection identifier.
2814 * @id: Communication identifier to associated with service type.
2815 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2816 * Timer Field" in the IBTA specification.
2818 * This function should be called before rdma_connect() on active
2819 * side, and on passive side before rdma_accept(). The timer value
2820 * will be associated with the local QP. When it receives a send it is
2821 * not read to handle, typically if the receive queue is empty, an RNR
2822 * Retry NAK is returned to the requester with the min_rnr_timer
2823 * encoded. The requester will then wait at least the time specified
2824 * in the NAK before retrying. The default is zero, which translates
2825 * to a minimum RNR Timer value of 655 ms.
2827 * Return: 0 for success
2829 int rdma_set_min_rnr_timer(struct rdma_cm_id
*id
, u8 min_rnr_timer
)
2831 struct rdma_id_private
*id_priv
;
2833 /* It is a five-bit value */
2834 if (min_rnr_timer
& 0xe0)
2837 if (WARN_ON(id
->qp_type
!= IB_QPT_RC
&& id
->qp_type
!= IB_QPT_XRC_TGT
))
2840 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2841 mutex_lock(&id_priv
->qp_mutex
);
2842 id_priv
->min_rnr_timer
= min_rnr_timer
;
2843 id_priv
->min_rnr_timer_set
= true;
2844 mutex_unlock(&id_priv
->qp_mutex
);
2848 EXPORT_SYMBOL(rdma_set_min_rnr_timer
);
2850 static int route_set_path_rec_inbound(struct cma_work
*work
,
2851 struct sa_path_rec
*path_rec
)
2853 struct rdma_route
*route
= &work
->id
->id
.route
;
2855 if (!route
->path_rec_inbound
) {
2856 route
->path_rec_inbound
=
2857 kzalloc(sizeof(*route
->path_rec_inbound
), GFP_KERNEL
);
2858 if (!route
->path_rec_inbound
)
2862 *route
->path_rec_inbound
= *path_rec
;
2866 static int route_set_path_rec_outbound(struct cma_work
*work
,
2867 struct sa_path_rec
*path_rec
)
2869 struct rdma_route
*route
= &work
->id
->id
.route
;
2871 if (!route
->path_rec_outbound
) {
2872 route
->path_rec_outbound
=
2873 kzalloc(sizeof(*route
->path_rec_outbound
), GFP_KERNEL
);
2874 if (!route
->path_rec_outbound
)
2878 *route
->path_rec_outbound
= *path_rec
;
2882 static void cma_query_handler(int status
, struct sa_path_rec
*path_rec
,
2883 unsigned int num_prs
, void *context
)
2885 struct cma_work
*work
= context
;
2886 struct rdma_route
*route
;
2889 route
= &work
->id
->id
.route
;
2894 for (i
= 0; i
< num_prs
; i
++) {
2895 if (!path_rec
[i
].flags
|| (path_rec
[i
].flags
& IB_PATH_GMP
))
2896 *route
->path_rec
= path_rec
[i
];
2897 else if (path_rec
[i
].flags
& IB_PATH_INBOUND
)
2898 status
= route_set_path_rec_inbound(work
, &path_rec
[i
]);
2899 else if (path_rec
[i
].flags
& IB_PATH_OUTBOUND
)
2900 status
= route_set_path_rec_outbound(work
,
2909 route
->num_pri_alt_paths
= 1;
2910 queue_work(cma_wq
, &work
->work
);
2914 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2915 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2916 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2917 work
->event
.status
= status
;
2918 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2920 queue_work(cma_wq
, &work
->work
);
2923 static int cma_query_ib_route(struct rdma_id_private
*id_priv
,
2924 unsigned long timeout_ms
, struct cma_work
*work
)
2926 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2927 struct sa_path_rec path_rec
;
2928 ib_sa_comp_mask comp_mask
;
2929 struct sockaddr_in6
*sin6
;
2930 struct sockaddr_ib
*sib
;
2932 memset(&path_rec
, 0, sizeof path_rec
);
2934 if (rdma_cap_opa_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
2935 path_rec
.rec_type
= SA_PATH_REC_TYPE_OPA
;
2937 path_rec
.rec_type
= SA_PATH_REC_TYPE_IB
;
2938 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2939 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2940 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2941 path_rec
.numb_path
= 1;
2942 path_rec
.reversible
= 1;
2943 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
,
2944 cma_dst_addr(id_priv
));
2946 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2947 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2948 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2950 switch (cma_family(id_priv
)) {
2952 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2953 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2956 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2957 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2958 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2961 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2962 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2963 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2967 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2968 id_priv
->id
.port_num
, &path_rec
,
2969 comp_mask
, timeout_ms
,
2970 GFP_KERNEL
, cma_query_handler
,
2971 work
, &id_priv
->query
);
2973 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2976 static void cma_iboe_join_work_handler(struct work_struct
*work
)
2978 struct cma_multicast
*mc
=
2979 container_of(work
, struct cma_multicast
, iboe_join
.work
);
2980 struct rdma_cm_event
*event
= &mc
->iboe_join
.event
;
2981 struct rdma_id_private
*id_priv
= mc
->id_priv
;
2984 mutex_lock(&id_priv
->handler_mutex
);
2985 if (READ_ONCE(id_priv
->state
) == RDMA_CM_DESTROYING
||
2986 READ_ONCE(id_priv
->state
) == RDMA_CM_DEVICE_REMOVAL
)
2989 ret
= cma_cm_event_handler(id_priv
, event
);
2993 mutex_unlock(&id_priv
->handler_mutex
);
2994 if (event
->event
== RDMA_CM_EVENT_MULTICAST_JOIN
)
2995 rdma_destroy_ah_attr(&event
->param
.ud
.ah_attr
);
2998 static void cma_work_handler(struct work_struct
*_work
)
3000 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
3001 struct rdma_id_private
*id_priv
= work
->id
;
3003 mutex_lock(&id_priv
->handler_mutex
);
3004 if (READ_ONCE(id_priv
->state
) == RDMA_CM_DESTROYING
||
3005 READ_ONCE(id_priv
->state
) == RDMA_CM_DEVICE_REMOVAL
)
3007 if (work
->old_state
!= 0 || work
->new_state
!= 0) {
3008 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
3012 if (cma_cm_event_handler(id_priv
, &work
->event
)) {
3013 cma_id_put(id_priv
);
3014 destroy_id_handler_unlock(id_priv
);
3019 mutex_unlock(&id_priv
->handler_mutex
);
3020 cma_id_put(id_priv
);
3022 if (work
->event
.event
== RDMA_CM_EVENT_MULTICAST_JOIN
)
3023 rdma_destroy_ah_attr(&work
->event
.param
.ud
.ah_attr
);
3027 static void cma_init_resolve_route_work(struct cma_work
*work
,
3028 struct rdma_id_private
*id_priv
)
3031 INIT_WORK(&work
->work
, cma_work_handler
);
3032 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
3033 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
3034 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
3037 static void enqueue_resolve_addr_work(struct cma_work
*work
,
3038 struct rdma_id_private
*id_priv
)
3040 /* Balances with cma_id_put() in cma_work_handler */
3041 cma_id_get(id_priv
);
3044 INIT_WORK(&work
->work
, cma_work_handler
);
3045 work
->old_state
= RDMA_CM_ADDR_QUERY
;
3046 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
3047 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
3049 queue_work(cma_wq
, &work
->work
);
3052 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
,
3053 unsigned long timeout_ms
)
3055 struct rdma_route
*route
= &id_priv
->id
.route
;
3056 struct cma_work
*work
;
3059 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3063 cma_init_resolve_route_work(work
, id_priv
);
3065 if (!route
->path_rec
)
3066 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
3067 if (!route
->path_rec
) {
3072 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
3078 kfree(route
->path_rec
);
3079 route
->path_rec
= NULL
;
3085 static enum ib_gid_type
cma_route_gid_type(enum rdma_network_type network_type
,
3086 unsigned long supported_gids
,
3087 enum ib_gid_type default_gid
)
3089 if ((network_type
== RDMA_NETWORK_IPV4
||
3090 network_type
== RDMA_NETWORK_IPV6
) &&
3091 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP
, &supported_gids
))
3092 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
3098 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
3099 * path record type based on GID type.
3100 * It also sets up other L2 fields which includes destination mac address
3101 * netdev ifindex, of the path record.
3102 * It returns the netdev of the bound interface for this path record entry.
3104 static struct net_device
*
3105 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private
*id_priv
)
3107 struct rdma_route
*route
= &id_priv
->id
.route
;
3108 enum ib_gid_type gid_type
= IB_GID_TYPE_ROCE
;
3109 struct rdma_addr
*addr
= &route
->addr
;
3110 unsigned long supported_gids
;
3111 struct net_device
*ndev
;
3113 if (!addr
->dev_addr
.bound_dev_if
)
3116 ndev
= dev_get_by_index(addr
->dev_addr
.net
,
3117 addr
->dev_addr
.bound_dev_if
);
3121 supported_gids
= roce_gid_type_mask_support(id_priv
->id
.device
,
3122 id_priv
->id
.port_num
);
3123 gid_type
= cma_route_gid_type(addr
->dev_addr
.network
,
3126 /* Use the hint from IP Stack to select GID Type */
3127 if (gid_type
< ib_network_to_gid_type(addr
->dev_addr
.network
))
3128 gid_type
= ib_network_to_gid_type(addr
->dev_addr
.network
);
3129 route
->path_rec
->rec_type
= sa_conv_gid_to_pathrec_type(gid_type
);
3131 route
->path_rec
->roce
.route_resolved
= true;
3132 sa_path_set_dmac(route
->path_rec
, addr
->dev_addr
.dst_dev_addr
);
3136 int rdma_set_ib_path(struct rdma_cm_id
*id
,
3137 struct sa_path_rec
*path_rec
)
3139 struct rdma_id_private
*id_priv
;
3140 struct net_device
*ndev
;
3143 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3144 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
3145 RDMA_CM_ROUTE_RESOLVED
))
3148 id
->route
.path_rec
= kmemdup(path_rec
, sizeof(*path_rec
),
3150 if (!id
->route
.path_rec
) {
3155 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
3156 ndev
= cma_iboe_set_path_rec_l2_fields(id_priv
);
3164 id
->route
.num_pri_alt_paths
= 1;
3168 kfree(id
->route
.path_rec
);
3169 id
->route
.path_rec
= NULL
;
3171 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
3174 EXPORT_SYMBOL(rdma_set_ib_path
);
3176 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
)
3178 struct cma_work
*work
;
3180 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3184 cma_init_resolve_route_work(work
, id_priv
);
3185 queue_work(cma_wq
, &work
->work
);
3189 static int get_vlan_ndev_tc(struct net_device
*vlan_ndev
, int prio
)
3191 struct net_device
*dev
;
3193 dev
= vlan_dev_real_dev(vlan_ndev
);
3195 return netdev_get_prio_tc_map(dev
, prio
);
3197 return (vlan_dev_get_egress_qos_mask(vlan_ndev
, prio
) &
3198 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
3201 struct iboe_prio_tc_map
{
3207 static int get_lower_vlan_dev_tc(struct net_device
*dev
,
3208 struct netdev_nested_priv
*priv
)
3210 struct iboe_prio_tc_map
*map
= (struct iboe_prio_tc_map
*)priv
->data
;
3212 if (is_vlan_dev(dev
))
3213 map
->output_tc
= get_vlan_ndev_tc(dev
, map
->input_prio
);
3214 else if (dev
->num_tc
)
3215 map
->output_tc
= netdev_get_prio_tc_map(dev
, map
->input_prio
);
3218 /* We are interested only in first level VLAN device, so always
3219 * return 1 to stop iterating over next level devices.
3225 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
3227 struct iboe_prio_tc_map prio_tc_map
= {};
3228 int prio
= rt_tos2priority(tos
);
3229 struct netdev_nested_priv priv
;
3231 /* If VLAN device, get it directly from the VLAN netdev */
3232 if (is_vlan_dev(ndev
))
3233 return get_vlan_ndev_tc(ndev
, prio
);
3235 prio_tc_map
.input_prio
= prio
;
3236 priv
.data
= (void *)&prio_tc_map
;
3238 netdev_walk_all_lower_dev_rcu(ndev
,
3239 get_lower_vlan_dev_tc
,
3242 /* If map is found from lower device, use it; Otherwise
3243 * continue with the current netdevice to get priority to tc map.
3245 if (prio_tc_map
.found
)
3246 return prio_tc_map
.output_tc
;
3247 else if (ndev
->num_tc
)
3248 return netdev_get_prio_tc_map(ndev
, prio
);
3253 static __be32
cma_get_roce_udp_flow_label(struct rdma_id_private
*id_priv
)
3255 struct sockaddr_in6
*addr6
;
3259 addr6
= (struct sockaddr_in6
*)cma_src_addr(id_priv
);
3260 fl
= be32_to_cpu(addr6
->sin6_flowinfo
) & IB_GRH_FLOWLABEL_MASK
;
3261 if ((cma_family(id_priv
) != AF_INET6
) || !fl
) {
3262 dport
= be16_to_cpu(cma_port(cma_dst_addr(id_priv
)));
3263 sport
= be16_to_cpu(cma_port(cma_src_addr(id_priv
)));
3264 hash
= (u32
)sport
* 31 + dport
;
3265 fl
= hash
& IB_GRH_FLOWLABEL_MASK
;
3268 return cpu_to_be32(fl
);
3271 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
3273 struct rdma_route
*route
= &id_priv
->id
.route
;
3274 struct rdma_addr
*addr
= &route
->addr
;
3275 struct cma_work
*work
;
3277 struct net_device
*ndev
;
3279 u8 default_roce_tos
= id_priv
->cma_dev
->default_roce_tos
[id_priv
->id
.port_num
-
3280 rdma_start_port(id_priv
->cma_dev
->device
)];
3283 mutex_lock(&id_priv
->qp_mutex
);
3284 tos
= id_priv
->tos_set
? id_priv
->tos
: default_roce_tos
;
3285 mutex_unlock(&id_priv
->qp_mutex
);
3287 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3291 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
3292 if (!route
->path_rec
) {
3297 route
->num_pri_alt_paths
= 1;
3299 ndev
= cma_iboe_set_path_rec_l2_fields(id_priv
);
3305 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
3306 &route
->path_rec
->sgid
);
3307 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
3308 &route
->path_rec
->dgid
);
3310 if (((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
)->sa_family
!= AF_IB
)
3311 /* TODO: get the hoplimit from the inet/inet6 device */
3312 route
->path_rec
->hop_limit
= addr
->dev_addr
.hoplimit
;
3314 route
->path_rec
->hop_limit
= 1;
3315 route
->path_rec
->reversible
= 1;
3316 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
3317 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
3318 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, tos
);
3319 route
->path_rec
->traffic_class
= tos
;
3320 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
3321 route
->path_rec
->rate_selector
= IB_SA_EQ
;
3322 route
->path_rec
->rate
= IB_RATE_PORT_CURRENT
;
3324 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
3325 /* In case ACK timeout is set, use this value to calculate
3326 * PacketLifeTime. As per IBTA 12.7.34,
3327 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
3328 * Assuming a negligible local ACK delay, we can use
3329 * PacketLifeTime = local ACK timeout/2
3330 * as a reasonable approximation for RoCE networks.
3332 mutex_lock(&id_priv
->qp_mutex
);
3333 if (id_priv
->timeout_set
&& id_priv
->timeout
)
3334 route
->path_rec
->packet_life_time
= id_priv
->timeout
- 1;
3336 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
3337 mutex_unlock(&id_priv
->qp_mutex
);
3339 if (!route
->path_rec
->mtu
) {
3344 if (rdma_protocol_roce_udp_encap(id_priv
->id
.device
,
3345 id_priv
->id
.port_num
))
3346 route
->path_rec
->flow_label
=
3347 cma_get_roce_udp_flow_label(id_priv
);
3349 cma_init_resolve_route_work(work
, id_priv
);
3350 queue_work(cma_wq
, &work
->work
);
3355 kfree(route
->path_rec
);
3356 route
->path_rec
= NULL
;
3357 route
->num_pri_alt_paths
= 0;
3363 int rdma_resolve_route(struct rdma_cm_id
*id
, unsigned long timeout_ms
)
3365 struct rdma_id_private
*id_priv
;
3371 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3372 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
3375 cma_id_get(id_priv
);
3376 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
3377 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
3378 else if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
3379 ret
= cma_resolve_iboe_route(id_priv
);
3381 cma_add_id_to_tree(id_priv
);
3383 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
3384 ret
= cma_resolve_iw_route(id_priv
);
3393 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
3394 cma_id_put(id_priv
);
3397 EXPORT_SYMBOL(rdma_resolve_route
);
3399 static void cma_set_loopback(struct sockaddr
*addr
)
3401 switch (addr
->sa_family
) {
3403 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
3406 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
3410 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
3416 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
3418 struct cma_device
*cma_dev
, *cur_dev
;
3420 enum ib_port_state port_state
;
3427 list_for_each_entry(cur_dev
, &dev_list
, list
) {
3428 if (cma_family(id_priv
) == AF_IB
&&
3429 !rdma_cap_ib_cm(cur_dev
->device
, 1))
3435 rdma_for_each_port (cur_dev
->device
, p
) {
3436 if (!ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
) &&
3437 port_state
== IB_PORT_ACTIVE
) {
3452 ret
= rdma_query_gid(cma_dev
->device
, p
, 0, &gid
);
3456 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
3460 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
3461 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
3462 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
3464 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
3465 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
3466 id_priv
->id
.port_num
= p
;
3467 cma_attach_to_dev(id_priv
, cma_dev
);
3468 rdma_restrack_add(&id_priv
->res
);
3469 cma_set_loopback(cma_src_addr(id_priv
));
3471 mutex_unlock(&lock
);
3475 static void addr_handler(int status
, struct sockaddr
*src_addr
,
3476 struct rdma_dev_addr
*dev_addr
, void *context
)
3478 struct rdma_id_private
*id_priv
= context
;
3479 struct rdma_cm_event event
= {};
3480 struct sockaddr
*addr
;
3481 struct sockaddr_storage old_addr
;
3483 mutex_lock(&id_priv
->handler_mutex
);
3484 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
3485 RDMA_CM_ADDR_RESOLVED
))
3489 * Store the previous src address, so that if we fail to acquire
3490 * matching rdma device, old address can be restored back, which helps
3491 * to cancel the cma listen operation correctly.
3493 addr
= cma_src_addr(id_priv
);
3494 memcpy(&old_addr
, addr
, rdma_addr_size(addr
));
3495 memcpy(addr
, src_addr
, rdma_addr_size(src_addr
));
3496 if (!status
&& !id_priv
->cma_dev
) {
3497 status
= cma_acquire_dev_by_src_ip(id_priv
);
3499 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3501 rdma_restrack_add(&id_priv
->res
);
3502 } else if (status
) {
3503 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status
);
3507 memcpy(addr
, &old_addr
,
3508 rdma_addr_size((struct sockaddr
*)&old_addr
));
3509 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
3510 RDMA_CM_ADDR_BOUND
))
3512 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3513 event
.status
= status
;
3515 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
3517 if (cma_cm_event_handler(id_priv
, &event
)) {
3518 destroy_id_handler_unlock(id_priv
);
3522 mutex_unlock(&id_priv
->handler_mutex
);
3525 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
3527 struct cma_work
*work
;
3531 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3535 if (!id_priv
->cma_dev
) {
3536 ret
= cma_bind_loopback(id_priv
);
3541 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
3542 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
3544 enqueue_resolve_addr_work(work
, id_priv
);
3551 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
3553 struct cma_work
*work
;
3556 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3560 if (!id_priv
->cma_dev
) {
3561 ret
= cma_resolve_ib_dev(id_priv
);
3566 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
3567 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
3569 enqueue_resolve_addr_work(work
, id_priv
);
3576 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
3578 struct rdma_id_private
*id_priv
;
3579 unsigned long flags
;
3582 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3583 spin_lock_irqsave(&id_priv
->lock
, flags
);
3584 if ((reuse
&& id_priv
->state
!= RDMA_CM_LISTEN
) ||
3585 id_priv
->state
== RDMA_CM_IDLE
) {
3586 id_priv
->reuseaddr
= reuse
;
3591 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
3594 EXPORT_SYMBOL(rdma_set_reuseaddr
);
3596 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
3598 struct rdma_id_private
*id_priv
;
3599 unsigned long flags
;
3602 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3603 spin_lock_irqsave(&id_priv
->lock
, flags
);
3604 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
3605 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
3606 id_priv
->afonly
= afonly
;
3611 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
3614 EXPORT_SYMBOL(rdma_set_afonly
);
3616 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
3617 struct rdma_id_private
*id_priv
)
3619 struct sockaddr
*addr
;
3620 struct sockaddr_ib
*sib
;
3624 lockdep_assert_held(&lock
);
3626 addr
= cma_src_addr(id_priv
);
3627 port
= htons(bind_list
->port
);
3629 switch (addr
->sa_family
) {
3631 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
3634 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
3637 sib
= (struct sockaddr_ib
*) addr
;
3638 sid
= be64_to_cpu(sib
->sib_sid
);
3639 mask
= be64_to_cpu(sib
->sib_sid_mask
);
3640 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
3641 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
3644 id_priv
->bind_list
= bind_list
;
3645 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
3648 static int cma_alloc_port(enum rdma_ucm_port_space ps
,
3649 struct rdma_id_private
*id_priv
, unsigned short snum
)
3651 struct rdma_bind_list
*bind_list
;
3654 lockdep_assert_held(&lock
);
3656 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
3660 ret
= cma_ps_alloc(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, bind_list
,
3666 bind_list
->port
= snum
;
3667 cma_bind_port(bind_list
, id_priv
);
3671 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
3674 static int cma_port_is_unique(struct rdma_bind_list
*bind_list
,
3675 struct rdma_id_private
*id_priv
)
3677 struct rdma_id_private
*cur_id
;
3678 struct sockaddr
*daddr
= cma_dst_addr(id_priv
);
3679 struct sockaddr
*saddr
= cma_src_addr(id_priv
);
3680 __be16 dport
= cma_port(daddr
);
3682 lockdep_assert_held(&lock
);
3684 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3685 struct sockaddr
*cur_daddr
= cma_dst_addr(cur_id
);
3686 struct sockaddr
*cur_saddr
= cma_src_addr(cur_id
);
3687 __be16 cur_dport
= cma_port(cur_daddr
);
3689 if (id_priv
== cur_id
)
3692 /* different dest port -> unique */
3693 if (!cma_any_port(daddr
) &&
3694 !cma_any_port(cur_daddr
) &&
3695 (dport
!= cur_dport
))
3698 /* different src address -> unique */
3699 if (!cma_any_addr(saddr
) &&
3700 !cma_any_addr(cur_saddr
) &&
3701 cma_addr_cmp(saddr
, cur_saddr
))
3704 /* different dst address -> unique */
3705 if (!cma_any_addr(daddr
) &&
3706 !cma_any_addr(cur_daddr
) &&
3707 cma_addr_cmp(daddr
, cur_daddr
))
3710 return -EADDRNOTAVAIL
;
3715 static int cma_alloc_any_port(enum rdma_ucm_port_space ps
,
3716 struct rdma_id_private
*id_priv
)
3718 static unsigned int last_used_port
;
3719 int low
, high
, remaining
;
3721 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3723 lockdep_assert_held(&lock
);
3725 inet_get_local_port_range(net
, &low
, &high
);
3726 remaining
= (high
- low
) + 1;
3727 rover
= get_random_u32_inclusive(low
, remaining
+ low
- 1);
3729 if (last_used_port
!= rover
) {
3730 struct rdma_bind_list
*bind_list
;
3733 bind_list
= cma_ps_find(net
, ps
, (unsigned short)rover
);
3736 ret
= cma_alloc_port(ps
, id_priv
, rover
);
3738 ret
= cma_port_is_unique(bind_list
, id_priv
);
3740 cma_bind_port(bind_list
, id_priv
);
3743 * Remember previously used port number in order to avoid
3744 * re-using same port immediately after it is closed.
3747 last_used_port
= rover
;
3748 if (ret
!= -EADDRNOTAVAIL
)
3753 if ((rover
< low
) || (rover
> high
))
3757 return -EADDRNOTAVAIL
;
3761 * Check that the requested port is available. This is called when trying to
3762 * bind to a specific port, or when trying to listen on a bound port. In
3763 * the latter case, the provided id_priv may already be on the bind_list, but
3764 * we still need to check that it's okay to start listening.
3766 static int cma_check_port(struct rdma_bind_list
*bind_list
,
3767 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
3769 struct rdma_id_private
*cur_id
;
3770 struct sockaddr
*addr
, *cur_addr
;
3772 lockdep_assert_held(&lock
);
3774 addr
= cma_src_addr(id_priv
);
3775 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3776 if (id_priv
== cur_id
)
3779 if (reuseaddr
&& cur_id
->reuseaddr
)
3782 cur_addr
= cma_src_addr(cur_id
);
3783 if (id_priv
->afonly
&& cur_id
->afonly
&&
3784 (addr
->sa_family
!= cur_addr
->sa_family
))
3787 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
3788 return -EADDRNOTAVAIL
;
3790 if (!cma_addr_cmp(addr
, cur_addr
))
3796 static int cma_use_port(enum rdma_ucm_port_space ps
,
3797 struct rdma_id_private
*id_priv
)
3799 struct rdma_bind_list
*bind_list
;
3800 unsigned short snum
;
3803 lockdep_assert_held(&lock
);
3805 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
3806 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
3809 bind_list
= cma_ps_find(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, snum
);
3811 ret
= cma_alloc_port(ps
, id_priv
, snum
);
3813 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
3815 cma_bind_port(bind_list
, id_priv
);
3820 static enum rdma_ucm_port_space
3821 cma_select_inet_ps(struct rdma_id_private
*id_priv
)
3823 switch (id_priv
->id
.ps
) {
3828 return id_priv
->id
.ps
;
3835 static enum rdma_ucm_port_space
3836 cma_select_ib_ps(struct rdma_id_private
*id_priv
)
3838 enum rdma_ucm_port_space ps
= 0;
3839 struct sockaddr_ib
*sib
;
3840 u64 sid_ps
, mask
, sid
;
3842 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
3843 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
3844 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
3846 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
3847 sid_ps
= RDMA_IB_IP_PS_IB
;
3849 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
3850 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
3851 sid_ps
= RDMA_IB_IP_PS_TCP
;
3853 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
3854 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
3855 sid_ps
= RDMA_IB_IP_PS_UDP
;
3860 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
3861 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
3862 be64_to_cpu(sib
->sib_sid_mask
));
3867 static int cma_get_port(struct rdma_id_private
*id_priv
)
3869 enum rdma_ucm_port_space ps
;
3872 if (cma_family(id_priv
) != AF_IB
)
3873 ps
= cma_select_inet_ps(id_priv
);
3875 ps
= cma_select_ib_ps(id_priv
);
3877 return -EPROTONOSUPPORT
;
3880 if (cma_any_port(cma_src_addr(id_priv
)))
3881 ret
= cma_alloc_any_port(ps
, id_priv
);
3883 ret
= cma_use_port(ps
, id_priv
);
3884 mutex_unlock(&lock
);
3889 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
3890 struct sockaddr
*addr
)
3892 #if IS_ENABLED(CONFIG_IPV6)
3893 struct sockaddr_in6
*sin6
;
3895 if (addr
->sa_family
!= AF_INET6
)
3898 sin6
= (struct sockaddr_in6
*) addr
;
3900 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
3903 if (!sin6
->sin6_scope_id
)
3906 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
3911 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
3913 struct rdma_id_private
*id_priv
=
3914 container_of(id
, struct rdma_id_private
, id
);
3917 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
)) {
3918 struct sockaddr_in any_in
= {
3919 .sin_family
= AF_INET
,
3920 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
3923 /* For a well behaved ULP state will be RDMA_CM_IDLE */
3924 ret
= rdma_bind_addr(id
, (struct sockaddr
*)&any_in
);
3927 if (WARN_ON(!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
,
3933 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
3934 * any more, and has to be unique in the bind list.
3936 if (id_priv
->reuseaddr
) {
3938 ret
= cma_check_port(id_priv
->bind_list
, id_priv
, 0);
3940 id_priv
->reuseaddr
= 0;
3941 mutex_unlock(&lock
);
3946 id_priv
->backlog
= backlog
;
3947 if (id_priv
->cma_dev
) {
3948 if (rdma_cap_ib_cm(id
->device
, 1)) {
3949 ret
= cma_ib_listen(id_priv
);
3952 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
3953 ret
= cma_iw_listen(id_priv
, backlog
);
3961 ret
= cma_listen_on_all(id_priv
);
3968 id_priv
->backlog
= 0;
3970 * All the failure paths that lead here will not allow the req_handler's
3973 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
3976 EXPORT_SYMBOL(rdma_listen
);
3978 static int rdma_bind_addr_dst(struct rdma_id_private
*id_priv
,
3979 struct sockaddr
*addr
, const struct sockaddr
*daddr
)
3981 struct sockaddr
*id_daddr
;
3984 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
3985 addr
->sa_family
!= AF_IB
)
3986 return -EAFNOSUPPORT
;
3988 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
3991 ret
= cma_check_linklocal(&id_priv
->id
.route
.addr
.dev_addr
, addr
);
3995 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
3996 if (!cma_any_addr(addr
)) {
3997 ret
= cma_translate_addr(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
4001 ret
= cma_acquire_dev_by_src_ip(id_priv
);
4006 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
4007 if (addr
->sa_family
== AF_INET
)
4008 id_priv
->afonly
= 1;
4009 #if IS_ENABLED(CONFIG_IPV6)
4010 else if (addr
->sa_family
== AF_INET6
) {
4011 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
4013 id_priv
->afonly
= net
->ipv6
.sysctl
.bindv6only
;
4017 id_daddr
= cma_dst_addr(id_priv
);
4018 if (daddr
!= id_daddr
)
4019 memcpy(id_daddr
, daddr
, rdma_addr_size(addr
));
4020 id_daddr
->sa_family
= addr
->sa_family
;
4022 ret
= cma_get_port(id_priv
);
4026 if (!cma_any_addr(addr
))
4027 rdma_restrack_add(&id_priv
->res
);
4030 if (id_priv
->cma_dev
)
4031 cma_release_dev(id_priv
);
4033 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
4037 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
4038 const struct sockaddr
*dst_addr
)
4040 struct rdma_id_private
*id_priv
=
4041 container_of(id
, struct rdma_id_private
, id
);
4042 struct sockaddr_storage zero_sock
= {};
4044 if (src_addr
&& src_addr
->sa_family
)
4045 return rdma_bind_addr_dst(id_priv
, src_addr
, dst_addr
);
4048 * When the src_addr is not specified, automatically supply an any addr
4050 zero_sock
.ss_family
= dst_addr
->sa_family
;
4051 if (IS_ENABLED(CONFIG_IPV6
) && dst_addr
->sa_family
== AF_INET6
) {
4052 struct sockaddr_in6
*src_addr6
=
4053 (struct sockaddr_in6
*)&zero_sock
;
4054 struct sockaddr_in6
*dst_addr6
=
4055 (struct sockaddr_in6
*)dst_addr
;
4057 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
4058 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
4059 id
->route
.addr
.dev_addr
.bound_dev_if
=
4060 dst_addr6
->sin6_scope_id
;
4061 } else if (dst_addr
->sa_family
== AF_IB
) {
4062 ((struct sockaddr_ib
*)&zero_sock
)->sib_pkey
=
4063 ((struct sockaddr_ib
*)dst_addr
)->sib_pkey
;
4065 return rdma_bind_addr_dst(id_priv
, (struct sockaddr
*)&zero_sock
, dst_addr
);
4069 * If required, resolve the source address for bind and leave the id_priv in
4070 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
4071 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
4074 static int resolve_prepare_src(struct rdma_id_private
*id_priv
,
4075 struct sockaddr
*src_addr
,
4076 const struct sockaddr
*dst_addr
)
4080 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
)) {
4081 /* For a well behaved ULP state will be RDMA_CM_IDLE */
4082 ret
= cma_bind_addr(&id_priv
->id
, src_addr
, dst_addr
);
4085 if (WARN_ON(!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
,
4086 RDMA_CM_ADDR_QUERY
)))
4090 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
4093 if (cma_family(id_priv
) != dst_addr
->sa_family
) {
4100 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
4104 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
4105 const struct sockaddr
*dst_addr
, unsigned long timeout_ms
)
4107 struct rdma_id_private
*id_priv
=
4108 container_of(id
, struct rdma_id_private
, id
);
4111 ret
= resolve_prepare_src(id_priv
, src_addr
, dst_addr
);
4115 if (cma_any_addr(dst_addr
)) {
4116 ret
= cma_resolve_loopback(id_priv
);
4118 if (dst_addr
->sa_family
== AF_IB
) {
4119 ret
= cma_resolve_ib_addr(id_priv
);
4122 * The FSM can return back to RDMA_CM_ADDR_BOUND after
4123 * rdma_resolve_ip() is called, eg through the error
4124 * path in addr_handler(). If this happens the existing
4125 * request must be canceled before issuing a new one.
4126 * Since canceling a request is a bit slow and this
4127 * oddball path is rare, keep track once a request has
4128 * been issued. The track turns out to be a permanent
4129 * state since this is the only cancel as it is
4130 * immediately before rdma_resolve_ip().
4132 if (id_priv
->used_resolve_ip
)
4133 rdma_addr_cancel(&id
->route
.addr
.dev_addr
);
4135 id_priv
->used_resolve_ip
= 1;
4136 ret
= rdma_resolve_ip(cma_src_addr(id_priv
), dst_addr
,
4137 &id
->route
.addr
.dev_addr
,
4138 timeout_ms
, addr_handler
,
4147 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
4150 EXPORT_SYMBOL(rdma_resolve_addr
);
4152 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
4154 struct rdma_id_private
*id_priv
=
4155 container_of(id
, struct rdma_id_private
, id
);
4157 return rdma_bind_addr_dst(id_priv
, addr
, cma_dst_addr(id_priv
));
4159 EXPORT_SYMBOL(rdma_bind_addr
);
4161 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
4163 struct cma_hdr
*cma_hdr
;
4166 cma_hdr
->cma_version
= CMA_VERSION
;
4167 if (cma_family(id_priv
) == AF_INET
) {
4168 struct sockaddr_in
*src4
, *dst4
;
4170 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
4171 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
4173 cma_set_ip_ver(cma_hdr
, 4);
4174 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
4175 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
4176 cma_hdr
->port
= src4
->sin_port
;
4177 } else if (cma_family(id_priv
) == AF_INET6
) {
4178 struct sockaddr_in6
*src6
, *dst6
;
4180 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
4181 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
4183 cma_set_ip_ver(cma_hdr
, 6);
4184 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
4185 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
4186 cma_hdr
->port
= src6
->sin6_port
;
4191 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
4192 const struct ib_cm_event
*ib_event
)
4194 struct rdma_id_private
*id_priv
= cm_id
->context
;
4195 struct rdma_cm_event event
= {};
4196 const struct ib_cm_sidr_rep_event_param
*rep
=
4197 &ib_event
->param
.sidr_rep_rcvd
;
4200 mutex_lock(&id_priv
->handler_mutex
);
4201 if (READ_ONCE(id_priv
->state
) != RDMA_CM_CONNECT
)
4204 switch (ib_event
->event
) {
4205 case IB_CM_SIDR_REQ_ERROR
:
4206 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
4207 event
.status
= -ETIMEDOUT
;
4209 case IB_CM_SIDR_REP_RECEIVED
:
4210 event
.param
.ud
.private_data
= ib_event
->private_data
;
4211 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
4212 if (rep
->status
!= IB_SIDR_SUCCESS
) {
4213 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
4214 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
4215 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
4219 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
4221 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret
);
4222 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
4226 ib_init_ah_attr_from_path(id_priv
->id
.device
,
4227 id_priv
->id
.port_num
,
4228 id_priv
->id
.route
.path_rec
,
4229 &event
.param
.ud
.ah_attr
,
4231 event
.param
.ud
.qp_num
= rep
->qpn
;
4232 event
.param
.ud
.qkey
= rep
->qkey
;
4233 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
4237 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
4242 ret
= cma_cm_event_handler(id_priv
, &event
);
4244 rdma_destroy_ah_attr(&event
.param
.ud
.ah_attr
);
4246 /* Destroy the CM ID by returning a non-zero value. */
4247 id_priv
->cm_id
.ib
= NULL
;
4248 destroy_id_handler_unlock(id_priv
);
4252 mutex_unlock(&id_priv
->handler_mutex
);
4256 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
4257 struct rdma_conn_param
*conn_param
)
4259 struct ib_cm_sidr_req_param req
;
4260 struct ib_cm_id
*id
;
4265 memset(&req
, 0, sizeof req
);
4266 offset
= cma_user_data_offset(id_priv
);
4267 if (check_add_overflow(offset
, conn_param
->private_data_len
, &req
.private_data_len
))
4270 if (req
.private_data_len
) {
4271 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
4275 private_data
= NULL
;
4278 if (conn_param
->private_data
&& conn_param
->private_data_len
)
4279 memcpy(private_data
+ offset
, conn_param
->private_data
,
4280 conn_param
->private_data_len
);
4283 ret
= cma_format_hdr(private_data
, id_priv
);
4286 req
.private_data
= private_data
;
4289 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
4295 id_priv
->cm_id
.ib
= id
;
4297 req
.path
= id_priv
->id
.route
.path_rec
;
4298 req
.sgid_attr
= id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
;
4299 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
4300 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
4301 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
4303 trace_cm_send_sidr_req(id_priv
);
4304 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
4306 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
4307 id_priv
->cm_id
.ib
= NULL
;
4310 kfree(private_data
);
4314 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
4315 struct rdma_conn_param
*conn_param
)
4317 struct ib_cm_req_param req
;
4318 struct rdma_route
*route
;
4320 struct ib_cm_id
*id
;
4324 memset(&req
, 0, sizeof req
);
4325 offset
= cma_user_data_offset(id_priv
);
4326 if (check_add_overflow(offset
, conn_param
->private_data_len
, &req
.private_data_len
))
4329 if (req
.private_data_len
) {
4330 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
4334 private_data
= NULL
;
4337 if (conn_param
->private_data
&& conn_param
->private_data_len
)
4338 memcpy(private_data
+ offset
, conn_param
->private_data
,
4339 conn_param
->private_data_len
);
4341 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
4346 id_priv
->cm_id
.ib
= id
;
4348 route
= &id_priv
->id
.route
;
4350 ret
= cma_format_hdr(private_data
, id_priv
);
4353 req
.private_data
= private_data
;
4356 req
.primary_path
= &route
->path_rec
[0];
4357 req
.primary_path_inbound
= route
->path_rec_inbound
;
4358 req
.primary_path_outbound
= route
->path_rec_outbound
;
4359 if (route
->num_pri_alt_paths
== 2)
4360 req
.alternate_path
= &route
->path_rec
[1];
4362 req
.ppath_sgid_attr
= id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
;
4363 /* Alternate path SGID attribute currently unsupported */
4364 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
4365 req
.qp_num
= id_priv
->qp_num
;
4366 req
.qp_type
= id_priv
->id
.qp_type
;
4367 req
.starting_psn
= id_priv
->seq_num
;
4368 req
.responder_resources
= conn_param
->responder_resources
;
4369 req
.initiator_depth
= conn_param
->initiator_depth
;
4370 req
.flow_control
= conn_param
->flow_control
;
4371 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
4372 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
4373 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
4374 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
4375 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
4376 req
.srq
= id_priv
->srq
? 1 : 0;
4377 req
.ece
.vendor_id
= id_priv
->ece
.vendor_id
;
4378 req
.ece
.attr_mod
= id_priv
->ece
.attr_mod
;
4380 trace_cm_send_req(id_priv
);
4381 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
4383 if (ret
&& !IS_ERR(id
)) {
4384 ib_destroy_cm_id(id
);
4385 id_priv
->cm_id
.ib
= NULL
;
4388 kfree(private_data
);
4392 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
4393 struct rdma_conn_param
*conn_param
)
4395 struct iw_cm_id
*cm_id
;
4397 struct iw_cm_conn_param iw_param
;
4399 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
4401 return PTR_ERR(cm_id
);
4403 mutex_lock(&id_priv
->qp_mutex
);
4404 cm_id
->tos
= id_priv
->tos
;
4405 cm_id
->tos_set
= id_priv
->tos_set
;
4406 mutex_unlock(&id_priv
->qp_mutex
);
4408 id_priv
->cm_id
.iw
= cm_id
;
4410 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
4411 rdma_addr_size(cma_src_addr(id_priv
)));
4412 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
4413 rdma_addr_size(cma_dst_addr(id_priv
)));
4415 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
4420 iw_param
.ord
= conn_param
->initiator_depth
;
4421 iw_param
.ird
= conn_param
->responder_resources
;
4422 iw_param
.private_data
= conn_param
->private_data
;
4423 iw_param
.private_data_len
= conn_param
->private_data_len
;
4424 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
4426 memset(&iw_param
, 0, sizeof iw_param
);
4427 iw_param
.qpn
= id_priv
->qp_num
;
4429 ret
= iw_cm_connect(cm_id
, &iw_param
);
4432 iw_destroy_cm_id(cm_id
);
4433 id_priv
->cm_id
.iw
= NULL
;
4439 * rdma_connect_locked - Initiate an active connection request.
4440 * @id: Connection identifier to connect.
4441 * @conn_param: Connection information used for connected QPs.
4443 * Same as rdma_connect() but can only be called from the
4444 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
4446 int rdma_connect_locked(struct rdma_cm_id
*id
,
4447 struct rdma_conn_param
*conn_param
)
4449 struct rdma_id_private
*id_priv
=
4450 container_of(id
, struct rdma_id_private
, id
);
4453 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
4457 id_priv
->qp_num
= conn_param
->qp_num
;
4458 id_priv
->srq
= conn_param
->srq
;
4461 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4462 if (id
->qp_type
== IB_QPT_UD
)
4463 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
4465 ret
= cma_connect_ib(id_priv
, conn_param
);
4466 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
4467 ret
= cma_connect_iw(id_priv
, conn_param
);
4475 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
4478 EXPORT_SYMBOL(rdma_connect_locked
);
4481 * rdma_connect - Initiate an active connection request.
4482 * @id: Connection identifier to connect.
4483 * @conn_param: Connection information used for connected QPs.
4485 * Users must have resolved a route for the rdma_cm_id to connect with by having
4486 * called rdma_resolve_route before calling this routine.
4488 * This call will either connect to a remote QP or obtain remote QP information
4489 * for unconnected rdma_cm_id's. The actual operation is based on the
4490 * rdma_cm_id's port space.
4492 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
4494 struct rdma_id_private
*id_priv
=
4495 container_of(id
, struct rdma_id_private
, id
);
4498 mutex_lock(&id_priv
->handler_mutex
);
4499 ret
= rdma_connect_locked(id
, conn_param
);
4500 mutex_unlock(&id_priv
->handler_mutex
);
4503 EXPORT_SYMBOL(rdma_connect
);
4506 * rdma_connect_ece - Initiate an active connection request with ECE data.
4507 * @id: Connection identifier to connect.
4508 * @conn_param: Connection information used for connected QPs.
4509 * @ece: ECE parameters
4511 * See rdma_connect() explanation.
4513 int rdma_connect_ece(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
,
4514 struct rdma_ucm_ece
*ece
)
4516 struct rdma_id_private
*id_priv
=
4517 container_of(id
, struct rdma_id_private
, id
);
4519 id_priv
->ece
.vendor_id
= ece
->vendor_id
;
4520 id_priv
->ece
.attr_mod
= ece
->attr_mod
;
4522 return rdma_connect(id
, conn_param
);
4524 EXPORT_SYMBOL(rdma_connect_ece
);
4526 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
4527 struct rdma_conn_param
*conn_param
)
4529 struct ib_cm_rep_param rep
;
4532 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
4536 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
4540 memset(&rep
, 0, sizeof rep
);
4541 rep
.qp_num
= id_priv
->qp_num
;
4542 rep
.starting_psn
= id_priv
->seq_num
;
4543 rep
.private_data
= conn_param
->private_data
;
4544 rep
.private_data_len
= conn_param
->private_data_len
;
4545 rep
.responder_resources
= conn_param
->responder_resources
;
4546 rep
.initiator_depth
= conn_param
->initiator_depth
;
4547 rep
.failover_accepted
= 0;
4548 rep
.flow_control
= conn_param
->flow_control
;
4549 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
4550 rep
.srq
= id_priv
->srq
? 1 : 0;
4551 rep
.ece
.vendor_id
= id_priv
->ece
.vendor_id
;
4552 rep
.ece
.attr_mod
= id_priv
->ece
.attr_mod
;
4554 trace_cm_send_rep(id_priv
);
4555 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
4560 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
4561 struct rdma_conn_param
*conn_param
)
4563 struct iw_cm_conn_param iw_param
;
4569 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
4573 iw_param
.ord
= conn_param
->initiator_depth
;
4574 iw_param
.ird
= conn_param
->responder_resources
;
4575 iw_param
.private_data
= conn_param
->private_data
;
4576 iw_param
.private_data_len
= conn_param
->private_data_len
;
4578 iw_param
.qpn
= id_priv
->qp_num
;
4580 iw_param
.qpn
= conn_param
->qp_num
;
4582 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
4585 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
4586 enum ib_cm_sidr_status status
, u32 qkey
,
4587 const void *private_data
, int private_data_len
)
4589 struct ib_cm_sidr_rep_param rep
;
4592 memset(&rep
, 0, sizeof rep
);
4593 rep
.status
= status
;
4594 if (status
== IB_SIDR_SUCCESS
) {
4596 ret
= cma_set_qkey(id_priv
, qkey
);
4598 ret
= cma_set_default_qkey(id_priv
);
4601 rep
.qp_num
= id_priv
->qp_num
;
4602 rep
.qkey
= id_priv
->qkey
;
4604 rep
.ece
.vendor_id
= id_priv
->ece
.vendor_id
;
4605 rep
.ece
.attr_mod
= id_priv
->ece
.attr_mod
;
4608 rep
.private_data
= private_data
;
4609 rep
.private_data_len
= private_data_len
;
4611 trace_cm_send_sidr_rep(id_priv
);
4612 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
4616 * rdma_accept - Called to accept a connection request or response.
4617 * @id: Connection identifier associated with the request.
4618 * @conn_param: Information needed to establish the connection. This must be
4619 * provided if accepting a connection request. If accepting a connection
4620 * response, this parameter must be NULL.
4622 * Typically, this routine is only called by the listener to accept a connection
4623 * request. It must also be called on the active side of a connection if the
4624 * user is performing their own QP transitions.
4626 * In the case of error, a reject message is sent to the remote side and the
4627 * state of the qp associated with the id is modified to error, such that any
4628 * previously posted receive buffers would be flushed.
4630 * This function is for use by kernel ULPs and must be called from under the
4633 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
4635 struct rdma_id_private
*id_priv
=
4636 container_of(id
, struct rdma_id_private
, id
);
4639 lockdep_assert_held(&id_priv
->handler_mutex
);
4641 if (READ_ONCE(id_priv
->state
) != RDMA_CM_CONNECT
)
4644 if (!id
->qp
&& conn_param
) {
4645 id_priv
->qp_num
= conn_param
->qp_num
;
4646 id_priv
->srq
= conn_param
->srq
;
4649 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4650 if (id
->qp_type
== IB_QPT_UD
) {
4652 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
4654 conn_param
->private_data
,
4655 conn_param
->private_data_len
);
4657 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
4661 ret
= cma_accept_ib(id_priv
, conn_param
);
4663 ret
= cma_rep_recv(id_priv
);
4665 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
4666 ret
= cma_accept_iw(id_priv
, conn_param
);
4675 cma_modify_qp_err(id_priv
);
4676 rdma_reject(id
, NULL
, 0, IB_CM_REJ_CONSUMER_DEFINED
);
4679 EXPORT_SYMBOL(rdma_accept
);
4681 int rdma_accept_ece(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
,
4682 struct rdma_ucm_ece
*ece
)
4684 struct rdma_id_private
*id_priv
=
4685 container_of(id
, struct rdma_id_private
, id
);
4687 id_priv
->ece
.vendor_id
= ece
->vendor_id
;
4688 id_priv
->ece
.attr_mod
= ece
->attr_mod
;
4690 return rdma_accept(id
, conn_param
);
4692 EXPORT_SYMBOL(rdma_accept_ece
);
4694 void rdma_lock_handler(struct rdma_cm_id
*id
)
4696 struct rdma_id_private
*id_priv
=
4697 container_of(id
, struct rdma_id_private
, id
);
4699 mutex_lock(&id_priv
->handler_mutex
);
4701 EXPORT_SYMBOL(rdma_lock_handler
);
4703 void rdma_unlock_handler(struct rdma_cm_id
*id
)
4705 struct rdma_id_private
*id_priv
=
4706 container_of(id
, struct rdma_id_private
, id
);
4708 mutex_unlock(&id_priv
->handler_mutex
);
4710 EXPORT_SYMBOL(rdma_unlock_handler
);
4712 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
4714 struct rdma_id_private
*id_priv
;
4717 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4718 if (!id_priv
->cm_id
.ib
)
4721 switch (id
->device
->node_type
) {
4722 case RDMA_NODE_IB_CA
:
4723 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
4731 EXPORT_SYMBOL(rdma_notify
);
4733 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
4734 u8 private_data_len
, u8 reason
)
4736 struct rdma_id_private
*id_priv
;
4739 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4740 if (!id_priv
->cm_id
.ib
)
4743 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4744 if (id
->qp_type
== IB_QPT_UD
) {
4745 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
4746 private_data
, private_data_len
);
4748 trace_cm_send_rej(id_priv
);
4749 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
, reason
, NULL
, 0,
4750 private_data
, private_data_len
);
4752 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
4753 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
4754 private_data
, private_data_len
);
4761 EXPORT_SYMBOL(rdma_reject
);
4763 int rdma_disconnect(struct rdma_cm_id
*id
)
4765 struct rdma_id_private
*id_priv
;
4768 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4769 if (!id_priv
->cm_id
.ib
)
4772 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4773 ret
= cma_modify_qp_err(id_priv
);
4776 /* Initiate or respond to a disconnect. */
4777 trace_cm_disconnect(id_priv
);
4778 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0)) {
4779 if (!ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0))
4780 trace_cm_sent_drep(id_priv
);
4782 trace_cm_sent_dreq(id_priv
);
4784 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
4785 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
4792 EXPORT_SYMBOL(rdma_disconnect
);
4794 static void cma_make_mc_event(int status
, struct rdma_id_private
*id_priv
,
4795 struct ib_sa_multicast
*multicast
,
4796 struct rdma_cm_event
*event
,
4797 struct cma_multicast
*mc
)
4799 struct rdma_dev_addr
*dev_addr
;
4800 enum ib_gid_type gid_type
;
4801 struct net_device
*ndev
;
4804 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4807 event
->status
= status
;
4808 event
->param
.ud
.private_data
= mc
->context
;
4810 event
->event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
4814 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4815 ndev
= dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
4818 ->default_gid_type
[id_priv
->id
.port_num
-
4820 id_priv
->cma_dev
->device
)];
4822 event
->event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
4823 if (ib_init_ah_from_mcmember(id_priv
->id
.device
, id_priv
->id
.port_num
,
4824 &multicast
->rec
, ndev
, gid_type
,
4825 &event
->param
.ud
.ah_attr
)) {
4826 event
->event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
4830 event
->param
.ud
.qp_num
= 0xFFFFFF;
4831 event
->param
.ud
.qkey
= id_priv
->qkey
;
4837 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
4839 struct cma_multicast
*mc
= multicast
->context
;
4840 struct rdma_id_private
*id_priv
= mc
->id_priv
;
4841 struct rdma_cm_event event
= {};
4844 mutex_lock(&id_priv
->handler_mutex
);
4845 if (READ_ONCE(id_priv
->state
) == RDMA_CM_DEVICE_REMOVAL
||
4846 READ_ONCE(id_priv
->state
) == RDMA_CM_DESTROYING
)
4849 ret
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
4851 cma_make_mc_event(status
, id_priv
, multicast
, &event
, mc
);
4852 ret
= cma_cm_event_handler(id_priv
, &event
);
4854 rdma_destroy_ah_attr(&event
.param
.ud
.ah_attr
);
4858 mutex_unlock(&id_priv
->handler_mutex
);
4862 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
4863 struct sockaddr
*addr
, union ib_gid
*mgid
)
4865 unsigned char mc_map
[MAX_ADDR_LEN
];
4866 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4867 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
4868 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
4870 if (cma_any_addr(addr
)) {
4871 memset(mgid
, 0, sizeof *mgid
);
4872 } else if ((addr
->sa_family
== AF_INET6
) &&
4873 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
4875 /* IPv6 address is an SA assigned MGID. */
4876 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
4877 } else if (addr
->sa_family
== AF_IB
) {
4878 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
4879 } else if (addr
->sa_family
== AF_INET6
) {
4880 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
4881 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4882 mc_map
[7] = 0x01; /* Use RDMA CM signature */
4883 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
4885 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
4886 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4887 mc_map
[7] = 0x01; /* Use RDMA CM signature */
4888 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
4892 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
4893 struct cma_multicast
*mc
)
4895 struct ib_sa_mcmember_rec rec
;
4896 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4897 ib_sa_comp_mask comp_mask
;
4900 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
4901 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
4906 if (!id_priv
->qkey
) {
4907 ret
= cma_set_default_qkey(id_priv
);
4912 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
4913 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
4914 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
4915 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
4916 rec
.join_state
= mc
->join_state
;
4918 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
4919 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
4920 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
4921 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
4922 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
4924 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
4925 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
4926 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
4927 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
4928 IB_SA_MCMEMBER_REC_MTU
|
4929 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
4931 mc
->sa_mc
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
4932 id_priv
->id
.port_num
, &rec
, comp_mask
,
4933 GFP_KERNEL
, cma_ib_mc_handler
, mc
);
4934 return PTR_ERR_OR_ZERO(mc
->sa_mc
);
4937 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
,
4938 enum ib_gid_type gid_type
)
4940 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
4941 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
4943 if (cma_any_addr(addr
)) {
4944 memset(mgid
, 0, sizeof *mgid
);
4945 } else if (addr
->sa_family
== AF_INET6
) {
4946 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
4949 (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ? 0 : 0xff;
4951 (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ? 0 : 0x0e;
4960 mgid
->raw
[10] = 0xff;
4961 mgid
->raw
[11] = 0xff;
4962 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
4966 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
4967 struct cma_multicast
*mc
)
4969 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4971 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
4972 struct net_device
*ndev
= NULL
;
4973 struct ib_sa_multicast ib
= {};
4974 enum ib_gid_type gid_type
;
4977 send_only
= mc
->join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
);
4979 if (cma_zero_addr(addr
))
4982 gid_type
= id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
4983 rdma_start_port(id_priv
->cma_dev
->device
)];
4984 cma_iboe_set_mgid(addr
, &ib
.rec
.mgid
, gid_type
);
4986 ib
.rec
.pkey
= cpu_to_be16(0xffff);
4987 if (dev_addr
->bound_dev_if
)
4988 ndev
= dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
4992 ib
.rec
.rate
= IB_RATE_PORT_CURRENT
;
4993 ib
.rec
.hop_limit
= 1;
4994 ib
.rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
4996 if (addr
->sa_family
== AF_INET
) {
4997 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
4998 ib
.rec
.hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
5000 err
= cma_igmp_send(ndev
, &ib
.rec
.mgid
,
5005 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
5009 if (err
|| !ib
.rec
.mtu
)
5010 return err
?: -EINVAL
;
5013 cma_set_default_qkey(id_priv
);
5015 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
5017 INIT_WORK(&mc
->iboe_join
.work
, cma_iboe_join_work_handler
);
5018 cma_make_mc_event(0, id_priv
, &ib
, &mc
->iboe_join
.event
, mc
);
5019 queue_work(cma_wq
, &mc
->iboe_join
.work
);
5023 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
5024 u8 join_state
, void *context
)
5026 struct rdma_id_private
*id_priv
=
5027 container_of(id
, struct rdma_id_private
, id
);
5028 struct cma_multicast
*mc
;
5031 /* Not supported for kernel QPs */
5032 if (WARN_ON(id
->qp
))
5035 /* ULP is calling this wrong. */
5036 if (!id
->device
|| (READ_ONCE(id_priv
->state
) != RDMA_CM_ADDR_BOUND
&&
5037 READ_ONCE(id_priv
->state
) != RDMA_CM_ADDR_RESOLVED
))
5040 if (id_priv
->id
.qp_type
!= IB_QPT_UD
)
5043 mc
= kzalloc(sizeof(*mc
), GFP_KERNEL
);
5047 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
5048 mc
->context
= context
;
5049 mc
->id_priv
= id_priv
;
5050 mc
->join_state
= join_state
;
5052 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
5053 ret
= cma_iboe_join_multicast(id_priv
, mc
);
5056 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
5057 ret
= cma_join_ib_multicast(id_priv
, mc
);
5065 spin_lock(&id_priv
->lock
);
5066 list_add(&mc
->list
, &id_priv
->mc_list
);
5067 spin_unlock(&id_priv
->lock
);
5074 EXPORT_SYMBOL(rdma_join_multicast
);
5076 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
5078 struct rdma_id_private
*id_priv
;
5079 struct cma_multicast
*mc
;
5081 id_priv
= container_of(id
, struct rdma_id_private
, id
);
5082 spin_lock_irq(&id_priv
->lock
);
5083 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
5084 if (memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
)) != 0)
5086 list_del(&mc
->list
);
5087 spin_unlock_irq(&id_priv
->lock
);
5089 WARN_ON(id_priv
->cma_dev
->device
!= id
->device
);
5090 destroy_mc(id_priv
, mc
);
5093 spin_unlock_irq(&id_priv
->lock
);
5095 EXPORT_SYMBOL(rdma_leave_multicast
);
5097 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
5099 struct rdma_dev_addr
*dev_addr
;
5100 struct cma_work
*work
;
5102 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
5104 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
5105 (net_eq(dev_net(ndev
), dev_addr
->net
)) &&
5106 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
5107 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
5108 ndev
->name
, &id_priv
->id
);
5109 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
5113 INIT_WORK(&work
->work
, cma_work_handler
);
5115 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
5116 cma_id_get(id_priv
);
5117 queue_work(cma_wq
, &work
->work
);
5123 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
5126 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
5127 struct cma_device
*cma_dev
;
5128 struct rdma_id_private
*id_priv
;
5129 int ret
= NOTIFY_DONE
;
5131 if (event
!= NETDEV_BONDING_FAILOVER
)
5134 if (!netif_is_bond_master(ndev
))
5138 list_for_each_entry(cma_dev
, &dev_list
, list
)
5139 list_for_each_entry(id_priv
, &cma_dev
->id_list
, device_item
) {
5140 ret
= cma_netdev_change(ndev
, id_priv
);
5146 mutex_unlock(&lock
);
5150 static void cma_netevent_work_handler(struct work_struct
*_work
)
5152 struct rdma_id_private
*id_priv
=
5153 container_of(_work
, struct rdma_id_private
, id
.net_work
);
5154 struct rdma_cm_event event
= {};
5156 mutex_lock(&id_priv
->handler_mutex
);
5158 if (READ_ONCE(id_priv
->state
) == RDMA_CM_DESTROYING
||
5159 READ_ONCE(id_priv
->state
) == RDMA_CM_DEVICE_REMOVAL
)
5162 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
5163 event
.status
= -ETIMEDOUT
;
5165 if (cma_cm_event_handler(id_priv
, &event
)) {
5166 __acquire(&id_priv
->handler_mutex
);
5167 id_priv
->cm_id
.ib
= NULL
;
5168 cma_id_put(id_priv
);
5169 destroy_id_handler_unlock(id_priv
);
5174 mutex_unlock(&id_priv
->handler_mutex
);
5175 cma_id_put(id_priv
);
5178 static int cma_netevent_callback(struct notifier_block
*self
,
5179 unsigned long event
, void *ctx
)
5181 struct id_table_entry
*ips_node
= NULL
;
5182 struct rdma_id_private
*current_id
;
5183 struct neighbour
*neigh
= ctx
;
5184 unsigned long flags
;
5186 if (event
!= NETEVENT_NEIGH_UPDATE
)
5189 spin_lock_irqsave(&id_table_lock
, flags
);
5190 if (neigh
->tbl
->family
== AF_INET6
) {
5191 struct sockaddr_in6 neigh_sock_6
;
5193 neigh_sock_6
.sin6_family
= AF_INET6
;
5194 neigh_sock_6
.sin6_addr
= *(struct in6_addr
*)neigh
->primary_key
;
5195 ips_node
= node_from_ndev_ip(&id_table
, neigh
->dev
->ifindex
,
5196 (struct sockaddr
*)&neigh_sock_6
);
5197 } else if (neigh
->tbl
->family
== AF_INET
) {
5198 struct sockaddr_in neigh_sock_4
;
5200 neigh_sock_4
.sin_family
= AF_INET
;
5201 neigh_sock_4
.sin_addr
.s_addr
= *(__be32
*)(neigh
->primary_key
);
5202 ips_node
= node_from_ndev_ip(&id_table
, neigh
->dev
->ifindex
,
5203 (struct sockaddr
*)&neigh_sock_4
);
5210 list_for_each_entry(current_id
, &ips_node
->id_list
, id_list_entry
) {
5211 if (!memcmp(current_id
->id
.route
.addr
.dev_addr
.dst_dev_addr
,
5212 neigh
->ha
, ETH_ALEN
))
5214 INIT_WORK(¤t_id
->id
.net_work
, cma_netevent_work_handler
);
5215 cma_id_get(current_id
);
5216 queue_work(cma_wq
, ¤t_id
->id
.net_work
);
5219 spin_unlock_irqrestore(&id_table_lock
, flags
);
5223 static struct notifier_block cma_nb
= {
5224 .notifier_call
= cma_netdev_callback
5227 static struct notifier_block cma_netevent_cb
= {
5228 .notifier_call
= cma_netevent_callback
5231 static void cma_send_device_removal_put(struct rdma_id_private
*id_priv
)
5233 struct rdma_cm_event event
= { .event
= RDMA_CM_EVENT_DEVICE_REMOVAL
};
5234 enum rdma_cm_state state
;
5235 unsigned long flags
;
5237 mutex_lock(&id_priv
->handler_mutex
);
5238 /* Record that we want to remove the device */
5239 spin_lock_irqsave(&id_priv
->lock
, flags
);
5240 state
= id_priv
->state
;
5241 if (state
== RDMA_CM_DESTROYING
|| state
== RDMA_CM_DEVICE_REMOVAL
) {
5242 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
5243 mutex_unlock(&id_priv
->handler_mutex
);
5244 cma_id_put(id_priv
);
5247 id_priv
->state
= RDMA_CM_DEVICE_REMOVAL
;
5248 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
5250 if (cma_cm_event_handler(id_priv
, &event
)) {
5252 * At this point the ULP promises it won't call
5253 * rdma_destroy_id() concurrently
5255 cma_id_put(id_priv
);
5256 mutex_unlock(&id_priv
->handler_mutex
);
5257 trace_cm_id_destroy(id_priv
);
5258 _destroy_id(id_priv
, state
);
5261 mutex_unlock(&id_priv
->handler_mutex
);
5264 * If this races with destroy then the thread that first assigns state
5265 * to a destroying does the cancel.
5267 cma_cancel_operation(id_priv
, state
);
5268 cma_id_put(id_priv
);
5271 static void cma_process_remove(struct cma_device
*cma_dev
)
5274 while (!list_empty(&cma_dev
->id_list
)) {
5275 struct rdma_id_private
*id_priv
= list_first_entry(
5276 &cma_dev
->id_list
, struct rdma_id_private
, device_item
);
5278 list_del_init(&id_priv
->listen_item
);
5279 list_del_init(&id_priv
->device_item
);
5280 cma_id_get(id_priv
);
5281 mutex_unlock(&lock
);
5283 cma_send_device_removal_put(id_priv
);
5287 mutex_unlock(&lock
);
5289 cma_dev_put(cma_dev
);
5290 wait_for_completion(&cma_dev
->comp
);
5293 static bool cma_supported(struct ib_device
*device
)
5297 rdma_for_each_port(device
, i
) {
5298 if (rdma_cap_ib_cm(device
, i
) || rdma_cap_iw_cm(device
, i
))
5304 static int cma_add_one(struct ib_device
*device
)
5306 struct rdma_id_private
*to_destroy
;
5307 struct cma_device
*cma_dev
;
5308 struct rdma_id_private
*id_priv
;
5309 unsigned long supported_gids
= 0;
5313 if (!cma_supported(device
))
5316 cma_dev
= kmalloc(sizeof(*cma_dev
), GFP_KERNEL
);
5320 cma_dev
->device
= device
;
5321 cma_dev
->default_gid_type
= kcalloc(device
->phys_port_cnt
,
5322 sizeof(*cma_dev
->default_gid_type
),
5324 if (!cma_dev
->default_gid_type
) {
5329 cma_dev
->default_roce_tos
= kcalloc(device
->phys_port_cnt
,
5330 sizeof(*cma_dev
->default_roce_tos
),
5332 if (!cma_dev
->default_roce_tos
) {
5337 rdma_for_each_port (device
, i
) {
5338 supported_gids
= roce_gid_type_mask_support(device
, i
);
5339 WARN_ON(!supported_gids
);
5340 if (supported_gids
& (1 << CMA_PREFERRED_ROCE_GID_TYPE
))
5341 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
5342 CMA_PREFERRED_ROCE_GID_TYPE
;
5344 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
5345 find_first_bit(&supported_gids
, BITS_PER_LONG
);
5346 cma_dev
->default_roce_tos
[i
- rdma_start_port(device
)] = 0;
5349 init_completion(&cma_dev
->comp
);
5350 refcount_set(&cma_dev
->refcount
, 1);
5351 INIT_LIST_HEAD(&cma_dev
->id_list
);
5352 ib_set_client_data(device
, &cma_client
, cma_dev
);
5355 list_add_tail(&cma_dev
->list
, &dev_list
);
5356 list_for_each_entry(id_priv
, &listen_any_list
, listen_any_item
) {
5357 ret
= cma_listen_on_dev(id_priv
, cma_dev
, &to_destroy
);
5361 mutex_unlock(&lock
);
5363 trace_cm_add_one(device
);
5367 list_del(&cma_dev
->list
);
5368 mutex_unlock(&lock
);
5370 /* cma_process_remove() will delete to_destroy */
5371 cma_process_remove(cma_dev
);
5372 kfree(cma_dev
->default_roce_tos
);
5374 kfree(cma_dev
->default_gid_type
);
5381 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
5383 struct cma_device
*cma_dev
= client_data
;
5385 trace_cm_remove_one(device
);
5388 list_del(&cma_dev
->list
);
5389 mutex_unlock(&lock
);
5391 cma_process_remove(cma_dev
);
5392 kfree(cma_dev
->default_roce_tos
);
5393 kfree(cma_dev
->default_gid_type
);
5397 static int cma_init_net(struct net
*net
)
5399 struct cma_pernet
*pernet
= cma_pernet(net
);
5401 xa_init(&pernet
->tcp_ps
);
5402 xa_init(&pernet
->udp_ps
);
5403 xa_init(&pernet
->ipoib_ps
);
5404 xa_init(&pernet
->ib_ps
);
5409 static void cma_exit_net(struct net
*net
)
5411 struct cma_pernet
*pernet
= cma_pernet(net
);
5413 WARN_ON(!xa_empty(&pernet
->tcp_ps
));
5414 WARN_ON(!xa_empty(&pernet
->udp_ps
));
5415 WARN_ON(!xa_empty(&pernet
->ipoib_ps
));
5416 WARN_ON(!xa_empty(&pernet
->ib_ps
));
5419 static struct pernet_operations cma_pernet_operations
= {
5420 .init
= cma_init_net
,
5421 .exit
= cma_exit_net
,
5422 .id
= &cma_pernet_id
,
5423 .size
= sizeof(struct cma_pernet
),
5426 static int __init
cma_init(void)
5431 * There is a rare lock ordering dependency in cma_netdev_callback()
5432 * that only happens when bonding is enabled. Teach lockdep that rtnl
5433 * must never be nested under lock so it can find these without having
5434 * to test with bonding.
5436 if (IS_ENABLED(CONFIG_LOCKDEP
)) {
5439 mutex_unlock(&lock
);
5443 cma_wq
= alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM
);
5447 ret
= register_pernet_subsys(&cma_pernet_operations
);
5451 ib_sa_register_client(&sa_client
);
5452 register_netdevice_notifier(&cma_nb
);
5453 register_netevent_notifier(&cma_netevent_cb
);
5455 ret
= ib_register_client(&cma_client
);
5459 ret
= cma_configfs_init();
5466 ib_unregister_client(&cma_client
);
5468 unregister_netevent_notifier(&cma_netevent_cb
);
5469 unregister_netdevice_notifier(&cma_nb
);
5470 ib_sa_unregister_client(&sa_client
);
5471 unregister_pernet_subsys(&cma_pernet_operations
);
5473 destroy_workqueue(cma_wq
);
5477 static void __exit
cma_cleanup(void)
5479 cma_configfs_exit();
5480 ib_unregister_client(&cma_client
);
5481 unregister_netevent_notifier(&cma_netevent_cb
);
5482 unregister_netdevice_notifier(&cma_nb
);
5483 ib_sa_unregister_client(&sa_client
);
5484 unregister_pernet_subsys(&cma_pernet_operations
);
5485 destroy_workqueue(cma_wq
);
5488 module_init(cma_init
);
5489 module_exit(cma_cleanup
);