2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/mutex.h>
37 #include <linux/inetdevice.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/module.h>
42 #include <net/neighbour.h>
43 #include <net/route.h>
44 #include <net/netevent.h>
45 #include <net/addrconf.h>
46 #include <net/ip6_route.h>
47 #include <rdma/ib_addr.h>
49 #include <rdma/rdma_netlink.h>
50 #include <net/netlink.h>
52 #include "core_priv.h"
55 struct list_head list
;
56 struct sockaddr_storage src_addr
;
57 struct sockaddr_storage dst_addr
;
58 struct rdma_dev_addr
*addr
;
60 void (*callback
)(int status
, struct sockaddr
*src_addr
,
61 struct rdma_dev_addr
*addr
, void *context
);
62 unsigned long timeout
;
63 struct delayed_work work
;
68 static atomic_t ib_nl_addr_request_seq
= ATOMIC_INIT(0);
70 static DEFINE_SPINLOCK(lock
);
71 static LIST_HEAD(req_list
);
72 static struct workqueue_struct
*addr_wq
;
74 static const struct nla_policy ib_nl_addr_policy
[LS_NLA_TYPE_MAX
] = {
75 [LS_NLA_TYPE_DGID
] = {.type
= NLA_BINARY
,
76 .len
= sizeof(struct rdma_nla_ls_gid
)},
79 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr
*nlh
)
81 struct nlattr
*tb
[LS_NLA_TYPE_MAX
] = {};
84 if (nlh
->nlmsg_flags
& RDMA_NL_LS_F_ERR
)
87 ret
= nla_parse(tb
, LS_NLA_TYPE_MAX
- 1, nlmsg_data(nlh
),
88 nlmsg_len(nlh
), ib_nl_addr_policy
, NULL
);
95 static void ib_nl_process_good_ip_rsep(const struct nlmsghdr
*nlh
)
97 const struct nlattr
*head
, *curr
;
103 head
= (const struct nlattr
*)nlmsg_data(nlh
);
104 len
= nlmsg_len(nlh
);
106 nla_for_each_attr(curr
, head
, len
, rem
) {
107 if (curr
->nla_type
== LS_NLA_TYPE_DGID
)
108 memcpy(&gid
, nla_data(curr
), nla_len(curr
));
112 list_for_each_entry(req
, &req_list
, list
) {
113 if (nlh
->nlmsg_seq
!= req
->seq
)
115 /* We set the DGID part, the rest was set earlier */
116 rdma_addr_set_dgid(req
->addr
, &gid
);
121 spin_unlock_bh(&lock
);
124 pr_info("Couldn't find request waiting for DGID: %pI6\n",
128 int ib_nl_handle_ip_res_resp(struct sk_buff
*skb
,
129 struct nlmsghdr
*nlh
,
130 struct netlink_ext_ack
*extack
)
132 if ((nlh
->nlmsg_flags
& NLM_F_REQUEST
) ||
133 !(NETLINK_CB(skb
).sk
))
136 if (ib_nl_is_good_ip_resp(nlh
))
137 ib_nl_process_good_ip_rsep(nlh
);
142 static int ib_nl_ip_send_msg(struct rdma_dev_addr
*dev_addr
,
146 struct sk_buff
*skb
= NULL
;
147 struct nlmsghdr
*nlh
;
148 struct rdma_ls_ip_resolve_header
*header
;
154 if (family
== AF_INET
) {
155 size
= sizeof(struct in_addr
);
156 attrtype
= RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_IPV4
;
158 size
= sizeof(struct in6_addr
);
159 attrtype
= RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_IPV6
;
162 len
= nla_total_size(sizeof(size
));
163 len
+= NLMSG_ALIGN(sizeof(*header
));
165 skb
= nlmsg_new(len
, GFP_KERNEL
);
169 data
= ibnl_put_msg(skb
, &nlh
, seq
, 0, RDMA_NL_LS
,
170 RDMA_NL_LS_OP_IP_RESOLVE
, NLM_F_REQUEST
);
176 /* Construct the family header first */
177 header
= skb_put(skb
, NLMSG_ALIGN(sizeof(*header
)));
178 header
->ifindex
= dev_addr
->bound_dev_if
;
179 nla_put(skb
, attrtype
, size
, daddr
);
181 /* Repair the nlmsg header length */
183 rdma_nl_multicast(skb
, RDMA_NL_GROUP_LS
, GFP_KERNEL
);
185 /* Make the request retry, so when we get the response from userspace
186 * we will have something.
191 int rdma_addr_size(const struct sockaddr
*addr
)
193 switch (addr
->sa_family
) {
195 return sizeof(struct sockaddr_in
);
197 return sizeof(struct sockaddr_in6
);
199 return sizeof(struct sockaddr_ib
);
204 EXPORT_SYMBOL(rdma_addr_size
);
206 int rdma_addr_size_in6(struct sockaddr_in6
*addr
)
208 int ret
= rdma_addr_size((struct sockaddr
*) addr
);
210 return ret
<= sizeof(*addr
) ? ret
: 0;
212 EXPORT_SYMBOL(rdma_addr_size_in6
);
214 int rdma_addr_size_kss(struct __kernel_sockaddr_storage
*addr
)
216 int ret
= rdma_addr_size((struct sockaddr
*) addr
);
218 return ret
<= sizeof(*addr
) ? ret
: 0;
220 EXPORT_SYMBOL(rdma_addr_size_kss
);
222 void rdma_copy_addr(struct rdma_dev_addr
*dev_addr
,
223 const struct net_device
*dev
,
224 const unsigned char *dst_dev_addr
)
226 dev_addr
->dev_type
= dev
->type
;
227 memcpy(dev_addr
->src_dev_addr
, dev
->dev_addr
, MAX_ADDR_LEN
);
228 memcpy(dev_addr
->broadcast
, dev
->broadcast
, MAX_ADDR_LEN
);
230 memcpy(dev_addr
->dst_dev_addr
, dst_dev_addr
, MAX_ADDR_LEN
);
231 dev_addr
->bound_dev_if
= dev
->ifindex
;
233 EXPORT_SYMBOL(rdma_copy_addr
);
235 int rdma_translate_ip(const struct sockaddr
*addr
,
236 struct rdma_dev_addr
*dev_addr
)
238 struct net_device
*dev
;
240 if (dev_addr
->bound_dev_if
) {
241 dev
= dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
244 rdma_copy_addr(dev_addr
, dev
, NULL
);
249 switch (addr
->sa_family
) {
251 dev
= ip_dev_find(dev_addr
->net
,
252 ((const struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
255 return -EADDRNOTAVAIL
;
257 rdma_copy_addr(dev_addr
, dev
, NULL
);
260 #if IS_ENABLED(CONFIG_IPV6)
263 for_each_netdev_rcu(dev_addr
->net
, dev
) {
264 if (ipv6_chk_addr(dev_addr
->net
,
265 &((const struct sockaddr_in6
*)addr
)->sin6_addr
,
267 rdma_copy_addr(dev_addr
, dev
, NULL
);
277 EXPORT_SYMBOL(rdma_translate_ip
);
279 static void set_timeout(struct addr_req
*req
, unsigned long time
)
283 delay
= time
- jiffies
;
287 mod_delayed_work(addr_wq
, &req
->work
, delay
);
290 static void queue_req(struct addr_req
*req
)
293 list_add_tail(&req
->list
, &req_list
);
294 set_timeout(req
, req
->timeout
);
295 spin_unlock_bh(&lock
);
298 static int ib_nl_fetch_ha(const struct dst_entry
*dst
,
299 struct rdma_dev_addr
*dev_addr
,
300 const void *daddr
, u32 seq
, u16 family
)
302 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS
))
303 return -EADDRNOTAVAIL
;
305 /* We fill in what we can, the response will fill the rest */
306 rdma_copy_addr(dev_addr
, dst
->dev
, NULL
);
307 return ib_nl_ip_send_msg(dev_addr
, daddr
, seq
, family
);
310 static int dst_fetch_ha(const struct dst_entry
*dst
,
311 struct rdma_dev_addr
*dev_addr
,
317 n
= dst_neigh_lookup(dst
, daddr
);
321 if (!(n
->nud_state
& NUD_VALID
)) {
322 neigh_event_send(n
, NULL
);
325 rdma_copy_addr(dev_addr
, dst
->dev
, n
->ha
);
333 static bool has_gateway(const struct dst_entry
*dst
, sa_family_t family
)
336 struct rt6_info
*rt6
;
338 if (family
== AF_INET
) {
339 rt
= container_of(dst
, struct rtable
, dst
);
340 return rt
->rt_uses_gateway
;
343 rt6
= container_of(dst
, struct rt6_info
, dst
);
344 return rt6
->rt6i_flags
& RTF_GATEWAY
;
347 static int fetch_ha(const struct dst_entry
*dst
, struct rdma_dev_addr
*dev_addr
,
348 const struct sockaddr
*dst_in
, u32 seq
)
350 const struct sockaddr_in
*dst_in4
=
351 (const struct sockaddr_in
*)dst_in
;
352 const struct sockaddr_in6
*dst_in6
=
353 (const struct sockaddr_in6
*)dst_in
;
354 const void *daddr
= (dst_in
->sa_family
== AF_INET
) ?
355 (const void *)&dst_in4
->sin_addr
.s_addr
:
356 (const void *)&dst_in6
->sin6_addr
;
357 sa_family_t family
= dst_in
->sa_family
;
359 /* Gateway + ARPHRD_INFINIBAND -> IB router */
360 if (has_gateway(dst
, family
) && dst
->dev
->type
== ARPHRD_INFINIBAND
)
361 return ib_nl_fetch_ha(dst
, dev_addr
, daddr
, seq
, family
);
363 return dst_fetch_ha(dst
, dev_addr
, daddr
);
366 static int addr4_resolve(struct sockaddr_in
*src_in
,
367 const struct sockaddr_in
*dst_in
,
368 struct rdma_dev_addr
*addr
,
371 __be32 src_ip
= src_in
->sin_addr
.s_addr
;
372 __be32 dst_ip
= dst_in
->sin_addr
.s_addr
;
377 memset(&fl4
, 0, sizeof(fl4
));
380 fl4
.flowi4_oif
= addr
->bound_dev_if
;
381 rt
= ip_route_output_key(addr
->net
, &fl4
);
382 ret
= PTR_ERR_OR_ZERO(rt
);
386 src_in
->sin_family
= AF_INET
;
387 src_in
->sin_addr
.s_addr
= fl4
.saddr
;
389 /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
390 * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
393 if (rt
->rt_uses_gateway
&& rt
->dst
.dev
->type
!= ARPHRD_INFINIBAND
)
394 addr
->network
= RDMA_NETWORK_IPV4
;
396 addr
->hoplimit
= ip4_dst_hoplimit(&rt
->dst
);
402 #if IS_ENABLED(CONFIG_IPV6)
403 static int addr6_resolve(struct sockaddr_in6
*src_in
,
404 const struct sockaddr_in6
*dst_in
,
405 struct rdma_dev_addr
*addr
,
406 struct dst_entry
**pdst
)
409 struct dst_entry
*dst
;
412 memset(&fl6
, 0, sizeof fl6
);
413 fl6
.daddr
= dst_in
->sin6_addr
;
414 fl6
.saddr
= src_in
->sin6_addr
;
415 fl6
.flowi6_oif
= addr
->bound_dev_if
;
417 dst
= ipv6_stub
->ipv6_dst_lookup_flow(addr
->net
, NULL
, &fl6
, NULL
);
421 rt
= (struct rt6_info
*)dst
;
422 if (ipv6_addr_any(&src_in
->sin6_addr
)) {
423 src_in
->sin6_family
= AF_INET6
;
424 src_in
->sin6_addr
= fl6
.saddr
;
427 /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
428 * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
431 if (rt
->rt6i_flags
& RTF_GATEWAY
&&
432 ip6_dst_idev(dst
)->dev
->type
!= ARPHRD_INFINIBAND
)
433 addr
->network
= RDMA_NETWORK_IPV6
;
435 addr
->hoplimit
= ip6_dst_hoplimit(dst
);
441 static int addr6_resolve(struct sockaddr_in6
*src_in
,
442 const struct sockaddr_in6
*dst_in
,
443 struct rdma_dev_addr
*addr
,
444 struct dst_entry
**pdst
)
446 return -EADDRNOTAVAIL
;
450 static int addr_resolve_neigh(const struct dst_entry
*dst
,
451 const struct sockaddr
*dst_in
,
452 struct rdma_dev_addr
*addr
,
455 if (dst
->dev
->flags
& IFF_LOOPBACK
) {
458 ret
= rdma_translate_ip(dst_in
, addr
);
460 memcpy(addr
->dst_dev_addr
, addr
->src_dev_addr
,
466 /* If the device doesn't do ARP internally */
467 if (!(dst
->dev
->flags
& IFF_NOARP
))
468 return fetch_ha(dst
, addr
, dst_in
, seq
);
470 rdma_copy_addr(addr
, dst
->dev
, NULL
);
475 static int addr_resolve(struct sockaddr
*src_in
,
476 const struct sockaddr
*dst_in
,
477 struct rdma_dev_addr
*addr
,
481 struct net_device
*ndev
;
482 struct dst_entry
*dst
;
486 pr_warn_ratelimited("%s: missing namespace\n", __func__
);
490 if (src_in
->sa_family
== AF_INET
) {
491 struct rtable
*rt
= NULL
;
492 const struct sockaddr_in
*dst_in4
=
493 (const struct sockaddr_in
*)dst_in
;
495 ret
= addr4_resolve((struct sockaddr_in
*)src_in
,
501 ret
= addr_resolve_neigh(&rt
->dst
, dst_in
, addr
, seq
);
503 if (addr
->bound_dev_if
) {
504 ndev
= dev_get_by_index(addr
->net
, addr
->bound_dev_if
);
512 const struct sockaddr_in6
*dst_in6
=
513 (const struct sockaddr_in6
*)dst_in
;
515 ret
= addr6_resolve((struct sockaddr_in6
*)src_in
,
522 ret
= addr_resolve_neigh(dst
, dst_in
, addr
, seq
);
524 if (addr
->bound_dev_if
) {
525 ndev
= dev_get_by_index(addr
->net
, addr
->bound_dev_if
);
535 if (ndev
->flags
& IFF_LOOPBACK
)
536 ret
= rdma_translate_ip(dst_in
, addr
);
538 addr
->bound_dev_if
= ndev
->ifindex
;
545 static void process_one_req(struct work_struct
*_work
)
547 struct addr_req
*req
;
548 struct sockaddr
*src_in
, *dst_in
;
550 req
= container_of(_work
, struct addr_req
, work
.work
);
552 if (req
->status
== -ENODATA
) {
553 src_in
= (struct sockaddr
*)&req
->src_addr
;
554 dst_in
= (struct sockaddr
*)&req
->dst_addr
;
555 req
->status
= addr_resolve(src_in
, dst_in
, req
->addr
,
557 if (req
->status
&& time_after_eq(jiffies
, req
->timeout
)) {
558 req
->status
= -ETIMEDOUT
;
559 } else if (req
->status
== -ENODATA
) {
560 /* requeue the work for retrying again */
562 if (!list_empty(&req
->list
))
563 set_timeout(req
, req
->timeout
);
564 spin_unlock_bh(&lock
);
569 req
->callback(req
->status
, (struct sockaddr
*)&req
->src_addr
,
570 req
->addr
, req
->context
);
571 req
->callback
= NULL
;
574 if (!list_empty(&req
->list
)) {
576 * Although the work will normally have been canceled by the
577 * workqueue, it can still be requeued as long as it is on the
580 cancel_delayed_work(&req
->work
);
581 list_del_init(&req
->list
);
584 spin_unlock_bh(&lock
);
587 int rdma_resolve_ip(struct sockaddr
*src_addr
, const struct sockaddr
*dst_addr
,
588 struct rdma_dev_addr
*addr
, int timeout_ms
,
589 void (*callback
)(int status
, struct sockaddr
*src_addr
,
590 struct rdma_dev_addr
*addr
, void *context
),
593 struct sockaddr
*src_in
, *dst_in
;
594 struct addr_req
*req
;
597 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
601 src_in
= (struct sockaddr
*) &req
->src_addr
;
602 dst_in
= (struct sockaddr
*) &req
->dst_addr
;
605 if (src_addr
->sa_family
!= dst_addr
->sa_family
) {
610 memcpy(src_in
, src_addr
, rdma_addr_size(src_addr
));
612 src_in
->sa_family
= dst_addr
->sa_family
;
615 memcpy(dst_in
, dst_addr
, rdma_addr_size(dst_addr
));
617 req
->callback
= callback
;
618 req
->context
= context
;
619 INIT_DELAYED_WORK(&req
->work
, process_one_req
);
620 req
->seq
= (u32
)atomic_inc_return(&ib_nl_addr_request_seq
);
622 req
->status
= addr_resolve(src_in
, dst_in
, addr
, true, req
->seq
);
623 switch (req
->status
) {
625 req
->timeout
= jiffies
;
629 req
->timeout
= msecs_to_jiffies(timeout_ms
) + jiffies
;
641 EXPORT_SYMBOL(rdma_resolve_ip
);
643 int rdma_resolve_ip_route(struct sockaddr
*src_addr
,
644 const struct sockaddr
*dst_addr
,
645 struct rdma_dev_addr
*addr
)
647 struct sockaddr_storage ssrc_addr
= {};
648 struct sockaddr
*src_in
= (struct sockaddr
*)&ssrc_addr
;
651 if (src_addr
->sa_family
!= dst_addr
->sa_family
)
654 memcpy(src_in
, src_addr
, rdma_addr_size(src_addr
));
656 src_in
->sa_family
= dst_addr
->sa_family
;
659 return addr_resolve(src_in
, dst_addr
, addr
, false, 0);
662 void rdma_addr_cancel(struct rdma_dev_addr
*addr
)
664 struct addr_req
*req
, *temp_req
;
665 struct addr_req
*found
= NULL
;
668 list_for_each_entry_safe(req
, temp_req
, &req_list
, list
) {
669 if (req
->addr
== addr
) {
671 * Removing from the list means we take ownership of
674 list_del_init(&req
->list
);
679 spin_unlock_bh(&lock
);
685 * sync canceling the work after removing it from the req_list
686 * guarentees no work is running and none will be started.
688 cancel_delayed_work_sync(&found
->work
);
691 found
->callback(-ECANCELED
, (struct sockaddr
*)&found
->src_addr
,
692 found
->addr
, found
->context
);
696 EXPORT_SYMBOL(rdma_addr_cancel
);
698 struct resolve_cb_context
{
699 struct completion comp
;
703 static void resolve_cb(int status
, struct sockaddr
*src_addr
,
704 struct rdma_dev_addr
*addr
, void *context
)
706 ((struct resolve_cb_context
*)context
)->status
= status
;
707 complete(&((struct resolve_cb_context
*)context
)->comp
);
710 int rdma_addr_find_l2_eth_by_grh(const union ib_gid
*sgid
,
711 const union ib_gid
*dgid
,
712 u8
*dmac
, const struct net_device
*ndev
,
715 struct rdma_dev_addr dev_addr
;
716 struct resolve_cb_context ctx
;
718 struct sockaddr_in _sockaddr_in
;
719 struct sockaddr_in6 _sockaddr_in6
;
720 } sgid_addr
, dgid_addr
;
723 rdma_gid2ip((struct sockaddr
*)&sgid_addr
, sgid
);
724 rdma_gid2ip((struct sockaddr
*)&dgid_addr
, dgid
);
726 memset(&dev_addr
, 0, sizeof(dev_addr
));
727 dev_addr
.bound_dev_if
= ndev
->ifindex
;
728 dev_addr
.net
= &init_net
;
730 init_completion(&ctx
.comp
);
731 ret
= rdma_resolve_ip((struct sockaddr
*)&sgid_addr
,
732 (struct sockaddr
*)&dgid_addr
, &dev_addr
, 1000,
737 wait_for_completion(&ctx
.comp
);
743 memcpy(dmac
, dev_addr
.dst_dev_addr
, ETH_ALEN
);
744 *hoplimit
= dev_addr
.hoplimit
;
748 static int netevent_callback(struct notifier_block
*self
, unsigned long event
,
751 struct addr_req
*req
;
753 if (event
== NETEVENT_NEIGH_UPDATE
) {
754 struct neighbour
*neigh
= ctx
;
756 if (neigh
->nud_state
& NUD_VALID
) {
758 list_for_each_entry(req
, &req_list
, list
)
759 set_timeout(req
, jiffies
);
760 spin_unlock_bh(&lock
);
766 static struct notifier_block nb
= {
767 .notifier_call
= netevent_callback
772 addr_wq
= alloc_ordered_workqueue("ib_addr", 0);
776 register_netevent_notifier(&nb
);
781 void addr_cleanup(void)
783 unregister_netevent_notifier(&nb
);
784 destroy_workqueue(addr_wq
);
785 WARN_ON(!list_empty(&req_list
));