2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/mutex.h>
37 #include <linux/inetdevice.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
41 #include <net/neighbour.h>
42 #include <net/route.h>
43 #include <net/netevent.h>
44 #include <net/ipv6_stubs.h>
45 #include <net/ip6_route.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/ib_sa.h>
50 #include <rdma/rdma_netlink.h>
51 #include <net/netlink.h>
53 #include "core_priv.h"
56 struct list_head list
;
57 struct sockaddr_storage src_addr
;
58 struct sockaddr_storage dst_addr
;
59 struct rdma_dev_addr
*addr
;
61 void (*callback
)(int status
, struct sockaddr
*src_addr
,
62 struct rdma_dev_addr
*addr
, void *context
);
63 unsigned long timeout
;
64 struct delayed_work work
;
65 bool resolve_by_gid_attr
; /* Consider gid attr in resolve phase */
70 static atomic_t ib_nl_addr_request_seq
= ATOMIC_INIT(0);
72 static DEFINE_SPINLOCK(lock
);
73 static LIST_HEAD(req_list
);
74 static struct workqueue_struct
*addr_wq
;
76 static const struct nla_policy ib_nl_addr_policy
[LS_NLA_TYPE_MAX
] = {
77 [LS_NLA_TYPE_DGID
] = {.type
= NLA_BINARY
,
78 .len
= sizeof(struct rdma_nla_ls_gid
),
79 .validation_type
= NLA_VALIDATE_MIN
,
80 .min
= sizeof(struct rdma_nla_ls_gid
)},
83 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr
*nlh
)
85 struct nlattr
*tb
[LS_NLA_TYPE_MAX
] = {};
88 if (nlh
->nlmsg_flags
& RDMA_NL_LS_F_ERR
)
91 ret
= nla_parse_deprecated(tb
, LS_NLA_TYPE_MAX
- 1, nlmsg_data(nlh
),
92 nlmsg_len(nlh
), ib_nl_addr_policy
, NULL
);
99 static void ib_nl_process_good_ip_rsep(const struct nlmsghdr
*nlh
)
101 const struct nlattr
*head
, *curr
;
103 struct addr_req
*req
;
107 head
= (const struct nlattr
*)nlmsg_data(nlh
);
108 len
= nlmsg_len(nlh
);
110 nla_for_each_attr(curr
, head
, len
, rem
) {
111 if (curr
->nla_type
== LS_NLA_TYPE_DGID
)
112 memcpy(&gid
, nla_data(curr
), nla_len(curr
));
116 list_for_each_entry(req
, &req_list
, list
) {
117 if (nlh
->nlmsg_seq
!= req
->seq
)
119 /* We set the DGID part, the rest was set earlier */
120 rdma_addr_set_dgid(req
->addr
, &gid
);
125 spin_unlock_bh(&lock
);
128 pr_info("Couldn't find request waiting for DGID: %pI6\n",
132 int ib_nl_handle_ip_res_resp(struct sk_buff
*skb
,
133 struct nlmsghdr
*nlh
,
134 struct netlink_ext_ack
*extack
)
136 if ((nlh
->nlmsg_flags
& NLM_F_REQUEST
) ||
137 !(NETLINK_CB(skb
).sk
))
140 if (ib_nl_is_good_ip_resp(nlh
))
141 ib_nl_process_good_ip_rsep(nlh
);
146 static int ib_nl_ip_send_msg(struct rdma_dev_addr
*dev_addr
,
150 struct sk_buff
*skb
= NULL
;
151 struct nlmsghdr
*nlh
;
152 struct rdma_ls_ip_resolve_header
*header
;
158 if (family
== AF_INET
) {
159 size
= sizeof(struct in_addr
);
160 attrtype
= RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_IPV4
;
162 size
= sizeof(struct in6_addr
);
163 attrtype
= RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_IPV6
;
166 len
= nla_total_size(sizeof(size
));
167 len
+= NLMSG_ALIGN(sizeof(*header
));
169 skb
= nlmsg_new(len
, GFP_KERNEL
);
173 data
= ibnl_put_msg(skb
, &nlh
, seq
, 0, RDMA_NL_LS
,
174 RDMA_NL_LS_OP_IP_RESOLVE
, NLM_F_REQUEST
);
180 /* Construct the family header first */
181 header
= skb_put(skb
, NLMSG_ALIGN(sizeof(*header
)));
182 header
->ifindex
= dev_addr
->bound_dev_if
;
183 nla_put(skb
, attrtype
, size
, daddr
);
185 /* Repair the nlmsg header length */
187 rdma_nl_multicast(&init_net
, skb
, RDMA_NL_GROUP_LS
, GFP_KERNEL
);
189 /* Make the request retry, so when we get the response from userspace
190 * we will have something.
195 int rdma_addr_size(const struct sockaddr
*addr
)
197 switch (addr
->sa_family
) {
199 return sizeof(struct sockaddr_in
);
201 return sizeof(struct sockaddr_in6
);
203 return sizeof(struct sockaddr_ib
);
208 EXPORT_SYMBOL(rdma_addr_size
);
210 int rdma_addr_size_in6(struct sockaddr_in6
*addr
)
212 int ret
= rdma_addr_size((struct sockaddr
*) addr
);
214 return ret
<= sizeof(*addr
) ? ret
: 0;
216 EXPORT_SYMBOL(rdma_addr_size_in6
);
218 int rdma_addr_size_kss(struct __kernel_sockaddr_storage
*addr
)
220 int ret
= rdma_addr_size((struct sockaddr
*) addr
);
222 return ret
<= sizeof(*addr
) ? ret
: 0;
224 EXPORT_SYMBOL(rdma_addr_size_kss
);
227 * rdma_copy_src_l2_addr - Copy netdevice source addresses
228 * @dev_addr: Destination address pointer where to copy the addresses
229 * @dev: Netdevice whose source addresses to copy
231 * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice.
232 * This includes unicast address, broadcast address, device type and
235 void rdma_copy_src_l2_addr(struct rdma_dev_addr
*dev_addr
,
236 const struct net_device
*dev
)
238 dev_addr
->dev_type
= dev
->type
;
239 memcpy(dev_addr
->src_dev_addr
, dev
->dev_addr
, MAX_ADDR_LEN
);
240 memcpy(dev_addr
->broadcast
, dev
->broadcast
, MAX_ADDR_LEN
);
241 dev_addr
->bound_dev_if
= dev
->ifindex
;
243 EXPORT_SYMBOL(rdma_copy_src_l2_addr
);
245 static struct net_device
*
246 rdma_find_ndev_for_src_ip_rcu(struct net
*net
, const struct sockaddr
*src_in
)
248 struct net_device
*dev
= NULL
;
249 int ret
= -EADDRNOTAVAIL
;
251 switch (src_in
->sa_family
) {
253 dev
= __ip_dev_find(net
,
254 ((const struct sockaddr_in
*)src_in
)->sin_addr
.s_addr
,
259 #if IS_ENABLED(CONFIG_IPV6)
261 for_each_netdev_rcu(net
, dev
) {
262 if (ipv6_chk_addr(net
,
263 &((const struct sockaddr_in6
*)src_in
)->sin6_addr
,
272 return ret
? ERR_PTR(ret
) : dev
;
275 int rdma_translate_ip(const struct sockaddr
*addr
,
276 struct rdma_dev_addr
*dev_addr
)
278 struct net_device
*dev
;
280 if (dev_addr
->bound_dev_if
) {
281 dev
= dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
284 rdma_copy_src_l2_addr(dev_addr
, dev
);
290 dev
= rdma_find_ndev_for_src_ip_rcu(dev_addr
->net
, addr
);
292 rdma_copy_src_l2_addr(dev_addr
, dev
);
294 return PTR_ERR_OR_ZERO(dev
);
296 EXPORT_SYMBOL(rdma_translate_ip
);
298 static void set_timeout(struct addr_req
*req
, unsigned long time
)
302 delay
= time
- jiffies
;
306 mod_delayed_work(addr_wq
, &req
->work
, delay
);
309 static void queue_req(struct addr_req
*req
)
312 list_add_tail(&req
->list
, &req_list
);
313 set_timeout(req
, req
->timeout
);
314 spin_unlock_bh(&lock
);
317 static int ib_nl_fetch_ha(struct rdma_dev_addr
*dev_addr
,
318 const void *daddr
, u32 seq
, u16 family
)
320 if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS
))
321 return -EADDRNOTAVAIL
;
323 return ib_nl_ip_send_msg(dev_addr
, daddr
, seq
, family
);
326 static int dst_fetch_ha(const struct dst_entry
*dst
,
327 struct rdma_dev_addr
*dev_addr
,
333 n
= dst_neigh_lookup(dst
, daddr
);
337 if (!(n
->nud_state
& NUD_VALID
)) {
338 neigh_event_send(n
, NULL
);
341 neigh_ha_snapshot(dev_addr
->dst_dev_addr
, n
, dst
->dev
);
349 static bool has_gateway(const struct dst_entry
*dst
, sa_family_t family
)
351 if (family
== AF_INET
)
352 return dst_rtable(dst
)->rt_uses_gateway
;
354 return dst_rt6_info(dst
)->rt6i_flags
& RTF_GATEWAY
;
357 static int fetch_ha(const struct dst_entry
*dst
, struct rdma_dev_addr
*dev_addr
,
358 const struct sockaddr
*dst_in
, u32 seq
)
360 const struct sockaddr_in
*dst_in4
=
361 (const struct sockaddr_in
*)dst_in
;
362 const struct sockaddr_in6
*dst_in6
=
363 (const struct sockaddr_in6
*)dst_in
;
364 const void *daddr
= (dst_in
->sa_family
== AF_INET
) ?
365 (const void *)&dst_in4
->sin_addr
.s_addr
:
366 (const void *)&dst_in6
->sin6_addr
;
367 sa_family_t family
= dst_in
->sa_family
;
371 /* If we have a gateway in IB mode then it must be an IB network */
372 if (has_gateway(dst
, family
) && dev_addr
->network
== RDMA_NETWORK_IB
)
373 return ib_nl_fetch_ha(dev_addr
, daddr
, seq
, family
);
375 return dst_fetch_ha(dst
, dev_addr
, daddr
);
378 static int addr4_resolve(struct sockaddr
*src_sock
,
379 const struct sockaddr
*dst_sock
,
380 struct rdma_dev_addr
*addr
,
383 struct sockaddr_in
*src_in
= (struct sockaddr_in
*)src_sock
;
384 const struct sockaddr_in
*dst_in
=
385 (const struct sockaddr_in
*)dst_sock
;
387 __be32 src_ip
= src_in
->sin_addr
.s_addr
;
388 __be32 dst_ip
= dst_in
->sin_addr
.s_addr
;
393 memset(&fl4
, 0, sizeof(fl4
));
396 fl4
.flowi4_oif
= addr
->bound_dev_if
;
397 rt
= ip_route_output_key(addr
->net
, &fl4
);
398 ret
= PTR_ERR_OR_ZERO(rt
);
402 src_in
->sin_addr
.s_addr
= fl4
.saddr
;
404 addr
->hoplimit
= ip4_dst_hoplimit(&rt
->dst
);
410 #if IS_ENABLED(CONFIG_IPV6)
411 static int addr6_resolve(struct sockaddr
*src_sock
,
412 const struct sockaddr
*dst_sock
,
413 struct rdma_dev_addr
*addr
,
414 struct dst_entry
**pdst
)
416 struct sockaddr_in6
*src_in
= (struct sockaddr_in6
*)src_sock
;
417 const struct sockaddr_in6
*dst_in
=
418 (const struct sockaddr_in6
*)dst_sock
;
420 struct dst_entry
*dst
;
422 memset(&fl6
, 0, sizeof fl6
);
423 fl6
.daddr
= dst_in
->sin6_addr
;
424 fl6
.saddr
= src_in
->sin6_addr
;
425 fl6
.flowi6_oif
= addr
->bound_dev_if
;
427 dst
= ipv6_stub
->ipv6_dst_lookup_flow(addr
->net
, NULL
, &fl6
, NULL
);
431 if (ipv6_addr_any(&src_in
->sin6_addr
))
432 src_in
->sin6_addr
= fl6
.saddr
;
434 addr
->hoplimit
= ip6_dst_hoplimit(dst
);
440 static int addr6_resolve(struct sockaddr
*src_sock
,
441 const struct sockaddr
*dst_sock
,
442 struct rdma_dev_addr
*addr
,
443 struct dst_entry
**pdst
)
445 return -EADDRNOTAVAIL
;
449 static int addr_resolve_neigh(const struct dst_entry
*dst
,
450 const struct sockaddr
*dst_in
,
451 struct rdma_dev_addr
*addr
,
452 unsigned int ndev_flags
,
457 if (ndev_flags
& IFF_LOOPBACK
) {
458 memcpy(addr
->dst_dev_addr
, addr
->src_dev_addr
, MAX_ADDR_LEN
);
460 if (!(ndev_flags
& IFF_NOARP
)) {
461 /* If the device doesn't do ARP internally */
462 ret
= fetch_ha(dst
, addr
, dst_in
, seq
);
468 static int copy_src_l2_addr(struct rdma_dev_addr
*dev_addr
,
469 const struct sockaddr
*dst_in
,
470 const struct dst_entry
*dst
,
471 const struct net_device
*ndev
)
475 if (dst
->dev
->flags
& IFF_LOOPBACK
)
476 ret
= rdma_translate_ip(dst_in
, dev_addr
);
478 rdma_copy_src_l2_addr(dev_addr
, dst
->dev
);
481 * If there's a gateway and type of device not ARPHRD_INFINIBAND,
482 * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
483 * network type accordingly.
485 if (has_gateway(dst
, dst_in
->sa_family
) &&
486 ndev
->type
!= ARPHRD_INFINIBAND
)
487 dev_addr
->network
= dst_in
->sa_family
== AF_INET
?
491 dev_addr
->network
= RDMA_NETWORK_IB
;
496 static int rdma_set_src_addr_rcu(struct rdma_dev_addr
*dev_addr
,
497 unsigned int *ndev_flags
,
498 const struct sockaddr
*dst_in
,
499 const struct dst_entry
*dst
)
501 struct net_device
*ndev
= READ_ONCE(dst
->dev
);
503 *ndev_flags
= ndev
->flags
;
504 /* A physical device must be the RDMA device to use */
505 if (ndev
->flags
& IFF_LOOPBACK
) {
507 * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
508 * loopback IP address. So if route is resolved to loopback
509 * interface, translate that to a real ndev based on non
510 * loopback IP address.
512 ndev
= rdma_find_ndev_for_src_ip_rcu(dev_net(ndev
), dst_in
);
517 return copy_src_l2_addr(dev_addr
, dst_in
, dst
, ndev
);
520 static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr
*addr
)
522 struct net_device
*ndev
;
524 ndev
= rdma_read_gid_attr_ndev_rcu(addr
->sgid_attr
);
526 return PTR_ERR(ndev
);
529 * Since we are holding the rcu, reading net and ifindex
530 * are safe without any additional reference; because
531 * change_net_namespace() in net/core/dev.c does rcu sync
532 * after it changes the state to IFF_DOWN and before
533 * updating netdev fields {net, ifindex}.
535 addr
->net
= dev_net(ndev
);
536 addr
->bound_dev_if
= ndev
->ifindex
;
540 static void rdma_addr_set_net_defaults(struct rdma_dev_addr
*addr
)
542 addr
->net
= &init_net
;
543 addr
->bound_dev_if
= 0;
546 static int addr_resolve(struct sockaddr
*src_in
,
547 const struct sockaddr
*dst_in
,
548 struct rdma_dev_addr
*addr
,
550 bool resolve_by_gid_attr
,
553 struct dst_entry
*dst
= NULL
;
554 unsigned int ndev_flags
= 0;
555 struct rtable
*rt
= NULL
;
559 pr_warn_ratelimited("%s: missing namespace\n", __func__
);
564 if (resolve_by_gid_attr
) {
565 if (!addr
->sgid_attr
) {
567 pr_warn_ratelimited("%s: missing gid_attr\n", __func__
);
571 * If the request is for a specific gid attribute of the
572 * rdma_dev_addr, derive net from the netdevice of the
575 ret
= set_addr_netns_by_gid_rcu(addr
);
581 if (src_in
->sa_family
== AF_INET
) {
582 ret
= addr4_resolve(src_in
, dst_in
, addr
, &rt
);
585 ret
= addr6_resolve(src_in
, dst_in
, addr
, &dst
);
591 ret
= rdma_set_src_addr_rcu(addr
, &ndev_flags
, dst_in
, dst
);
595 * Resolve neighbor destination address if requested and
596 * only if src addr translation didn't fail.
598 if (!ret
&& resolve_neigh
)
599 ret
= addr_resolve_neigh(dst
, dst_in
, addr
, ndev_flags
, seq
);
601 if (src_in
->sa_family
== AF_INET
)
607 * Clear the addr net to go back to its original state, only if it was
608 * derived from GID attribute in this context.
610 if (resolve_by_gid_attr
)
611 rdma_addr_set_net_defaults(addr
);
615 static void process_one_req(struct work_struct
*_work
)
617 struct addr_req
*req
;
618 struct sockaddr
*src_in
, *dst_in
;
620 req
= container_of(_work
, struct addr_req
, work
.work
);
622 if (req
->status
== -ENODATA
) {
623 src_in
= (struct sockaddr
*)&req
->src_addr
;
624 dst_in
= (struct sockaddr
*)&req
->dst_addr
;
625 req
->status
= addr_resolve(src_in
, dst_in
, req
->addr
,
626 true, req
->resolve_by_gid_attr
,
628 if (req
->status
&& time_after_eq(jiffies
, req
->timeout
)) {
629 req
->status
= -ETIMEDOUT
;
630 } else if (req
->status
== -ENODATA
) {
631 /* requeue the work for retrying again */
633 if (!list_empty(&req
->list
))
634 set_timeout(req
, req
->timeout
);
635 spin_unlock_bh(&lock
);
640 req
->callback(req
->status
, (struct sockaddr
*)&req
->src_addr
,
641 req
->addr
, req
->context
);
642 req
->callback
= NULL
;
646 * Although the work will normally have been canceled by the workqueue,
647 * it can still be requeued as long as it is on the req_list.
649 cancel_delayed_work(&req
->work
);
650 if (!list_empty(&req
->list
)) {
651 list_del_init(&req
->list
);
654 spin_unlock_bh(&lock
);
657 int rdma_resolve_ip(struct sockaddr
*src_addr
, const struct sockaddr
*dst_addr
,
658 struct rdma_dev_addr
*addr
, unsigned long timeout_ms
,
659 void (*callback
)(int status
, struct sockaddr
*src_addr
,
660 struct rdma_dev_addr
*addr
, void *context
),
661 bool resolve_by_gid_attr
, void *context
)
663 struct sockaddr
*src_in
, *dst_in
;
664 struct addr_req
*req
;
667 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
671 src_in
= (struct sockaddr
*) &req
->src_addr
;
672 dst_in
= (struct sockaddr
*) &req
->dst_addr
;
675 if (src_addr
->sa_family
!= dst_addr
->sa_family
) {
680 memcpy(src_in
, src_addr
, rdma_addr_size(src_addr
));
682 src_in
->sa_family
= dst_addr
->sa_family
;
685 memcpy(dst_in
, dst_addr
, rdma_addr_size(dst_addr
));
687 req
->callback
= callback
;
688 req
->context
= context
;
689 req
->resolve_by_gid_attr
= resolve_by_gid_attr
;
690 INIT_DELAYED_WORK(&req
->work
, process_one_req
);
691 req
->seq
= (u32
)atomic_inc_return(&ib_nl_addr_request_seq
);
693 req
->status
= addr_resolve(src_in
, dst_in
, addr
, true,
694 req
->resolve_by_gid_attr
, req
->seq
);
695 switch (req
->status
) {
697 req
->timeout
= jiffies
;
701 req
->timeout
= msecs_to_jiffies(timeout_ms
) + jiffies
;
713 EXPORT_SYMBOL(rdma_resolve_ip
);
715 int roce_resolve_route_from_path(struct sa_path_rec
*rec
,
716 const struct ib_gid_attr
*attr
)
719 struct sockaddr _sockaddr
;
720 struct sockaddr_in _sockaddr_in
;
721 struct sockaddr_in6 _sockaddr_in6
;
723 struct rdma_dev_addr dev_addr
= {};
728 if (rec
->roce
.route_resolved
)
731 rdma_gid2ip((struct sockaddr
*)&sgid
, &rec
->sgid
);
732 rdma_gid2ip((struct sockaddr
*)&dgid
, &rec
->dgid
);
734 if (sgid
._sockaddr
.sa_family
!= dgid
._sockaddr
.sa_family
)
737 if (!attr
|| !attr
->ndev
)
740 dev_addr
.net
= &init_net
;
741 dev_addr
.sgid_attr
= attr
;
743 ret
= addr_resolve((struct sockaddr
*)&sgid
, (struct sockaddr
*)&dgid
,
744 &dev_addr
, false, true, 0);
748 if ((dev_addr
.network
== RDMA_NETWORK_IPV4
||
749 dev_addr
.network
== RDMA_NETWORK_IPV6
) &&
750 rec
->rec_type
!= SA_PATH_REC_TYPE_ROCE_V2
)
753 rec
->roce
.route_resolved
= true;
758 * rdma_addr_cancel - Cancel resolve ip request
759 * @addr: Pointer to address structure given previously
760 * during rdma_resolve_ip().
761 * rdma_addr_cancel() is synchronous function which cancels any pending
762 * request if there is any.
764 void rdma_addr_cancel(struct rdma_dev_addr
*addr
)
766 struct addr_req
*req
, *temp_req
;
767 struct addr_req
*found
= NULL
;
770 list_for_each_entry_safe(req
, temp_req
, &req_list
, list
) {
771 if (req
->addr
== addr
) {
773 * Removing from the list means we take ownership of
776 list_del_init(&req
->list
);
781 spin_unlock_bh(&lock
);
787 * sync canceling the work after removing it from the req_list
788 * guarentees no work is running and none will be started.
790 cancel_delayed_work_sync(&found
->work
);
793 EXPORT_SYMBOL(rdma_addr_cancel
);
795 struct resolve_cb_context
{
796 struct completion comp
;
800 static void resolve_cb(int status
, struct sockaddr
*src_addr
,
801 struct rdma_dev_addr
*addr
, void *context
)
803 ((struct resolve_cb_context
*)context
)->status
= status
;
804 complete(&((struct resolve_cb_context
*)context
)->comp
);
807 int rdma_addr_find_l2_eth_by_grh(const union ib_gid
*sgid
,
808 const union ib_gid
*dgid
,
809 u8
*dmac
, const struct ib_gid_attr
*sgid_attr
,
812 struct rdma_dev_addr dev_addr
;
813 struct resolve_cb_context ctx
;
815 struct sockaddr_in _sockaddr_in
;
816 struct sockaddr_in6 _sockaddr_in6
;
817 } sgid_addr
, dgid_addr
;
820 rdma_gid2ip((struct sockaddr
*)&sgid_addr
, sgid
);
821 rdma_gid2ip((struct sockaddr
*)&dgid_addr
, dgid
);
823 memset(&dev_addr
, 0, sizeof(dev_addr
));
824 dev_addr
.net
= &init_net
;
825 dev_addr
.sgid_attr
= sgid_attr
;
827 init_completion(&ctx
.comp
);
828 ret
= rdma_resolve_ip((struct sockaddr
*)&sgid_addr
,
829 (struct sockaddr
*)&dgid_addr
, &dev_addr
, 1000,
830 resolve_cb
, true, &ctx
);
834 wait_for_completion(&ctx
.comp
);
840 memcpy(dmac
, dev_addr
.dst_dev_addr
, ETH_ALEN
);
841 *hoplimit
= dev_addr
.hoplimit
;
845 static int netevent_callback(struct notifier_block
*self
, unsigned long event
,
848 struct addr_req
*req
;
850 if (event
== NETEVENT_NEIGH_UPDATE
) {
851 struct neighbour
*neigh
= ctx
;
853 if (neigh
->nud_state
& NUD_VALID
) {
855 list_for_each_entry(req
, &req_list
, list
)
856 set_timeout(req
, jiffies
);
857 spin_unlock_bh(&lock
);
863 static struct notifier_block nb
= {
864 .notifier_call
= netevent_callback
869 addr_wq
= alloc_ordered_workqueue("ib_addr", 0);
873 register_netevent_notifier(&nb
);
878 void addr_cleanup(void)
880 unregister_netevent_notifier(&nb
);
881 destroy_workqueue(addr_wq
);
882 WARN_ON(!list_empty(&req_list
));