1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
8 #include <net/dst_metadata.h>
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
16 #define NFP_FL_MAX_ROUTES 32
18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32
19 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0)
20 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3)
21 #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
24 * struct nfp_tun_pre_tun_rule - rule matched before decap
25 * @flags: options for the rule offset
26 * @port_idx: index of destination MAC address for the rule
27 * @vlan_tci: VLAN info associated with MAC
28 * @host_ctx_id: stats context of rule to update
30 struct nfp_tun_pre_tun_rule
{
38 * struct nfp_tun_active_tuns - periodic message of active tunnels
39 * @seq: sequence number of the message
40 * @count: number of tunnels report in message
41 * @flags: options part of the request
42 * @tun_info.ipv4: dest IPv4 address of active route
43 * @tun_info.egress_port: port the encapsulated packet egressed
44 * @tun_info.extra: reserved for future use
45 * @tun_info: tunnels that have sent traffic in reported period
47 struct nfp_tun_active_tuns
{
51 struct route_ip_info
{
59 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
60 * @seq: sequence number of the message
61 * @count: number of tunnels report in message
62 * @flags: options part of the request
63 * @tun_info.ipv6: dest IPv6 address of active route
64 * @tun_info.egress_port: port the encapsulated packet egressed
65 * @tun_info.extra: reserved for future use
66 * @tun_info: tunnels that have sent traffic in reported period
68 struct nfp_tun_active_tuns_v6
{
72 struct route_ip_info_v6
{
80 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
81 * @ingress_port: ingress port of packet that signalled request
82 * @ipv4_addr: destination ipv4 address for route
83 * @reserved: reserved for future use
85 struct nfp_tun_req_route_ipv4
{
92 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
93 * @ingress_port: ingress port of packet that signalled request
94 * @ipv6_addr: destination ipv6 address for route
96 struct nfp_tun_req_route_ipv6
{
98 struct in6_addr ipv6_addr
;
102 * struct nfp_offloaded_route - routes that are offloaded to the NFP
103 * @list: list pointer
104 * @ip_add: destination of route - can be IPv4 or IPv6
106 struct nfp_offloaded_route
{
107 struct list_head list
;
111 #define NFP_FL_IPV4_ADDRS_MAX 32
114 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
115 * @count: number of IPs populated in the array
116 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
118 struct nfp_tun_ipv4_addr
{
120 __be32 ipv4_addr
[NFP_FL_IPV4_ADDRS_MAX
];
124 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
125 * @ipv4_addr: IP address
126 * @ref_count: number of rules currently using this IP
127 * @list: list pointer
129 struct nfp_ipv4_addr_entry
{
132 struct list_head list
;
135 #define NFP_FL_IPV6_ADDRS_MAX 4
138 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
139 * @count: number of IPs populated in the array
140 * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
142 struct nfp_tun_ipv6_addr
{
144 struct in6_addr ipv6_addr
[NFP_FL_IPV6_ADDRS_MAX
];
147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
150 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
151 * @flags: MAC address offload options
152 * @count: number of MAC addresses in the message (should be 1)
153 * @index: index of MAC address in the lookup table
154 * @addr: interface MAC address
156 struct nfp_tun_mac_addr_offload
{
164 * struct nfp_neigh_update_work - update neighbour information to nfp
165 * @work: Work queue for writing neigh to the nfp
166 * @n: neighbour entry
167 * @app: Back pointer to app
169 struct nfp_neigh_update_work
{
170 struct work_struct work
;
175 enum nfp_flower_mac_offload_cmd
{
176 NFP_TUNNEL_MAC_OFFLOAD_ADD
= 0,
177 NFP_TUNNEL_MAC_OFFLOAD_DEL
= 1,
178 NFP_TUNNEL_MAC_OFFLOAD_MOD
= 2,
181 #define NFP_MAX_MAC_INDEX 0xff
184 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
185 * @ht_node: Hashtable entry
186 * @addr: Offloaded MAC address
187 * @index: Offloaded index for given MAC address
188 * @ref_count: Number of devs using this MAC address
189 * @repr_list: List of reprs sharing this MAC address
190 * @bridge_count: Number of bridge/internal devs with MAC
192 struct nfp_tun_offloaded_mac
{
193 struct rhash_head ht_node
;
197 struct list_head repr_list
;
201 static const struct rhashtable_params offloaded_macs_params
= {
202 .key_offset
= offsetof(struct nfp_tun_offloaded_mac
, addr
),
203 .head_offset
= offsetof(struct nfp_tun_offloaded_mac
, ht_node
),
205 .automatic_shrinking
= true,
208 void nfp_tunnel_keep_alive(struct nfp_app
*app
, struct sk_buff
*skb
)
210 struct nfp_tun_active_tuns
*payload
;
211 struct net_device
*netdev
;
212 int count
, i
, pay_len
;
217 payload
= nfp_flower_cmsg_get_data(skb
);
218 count
= be32_to_cpu(payload
->count
);
219 if (count
> NFP_FL_MAX_ROUTES
) {
220 nfp_flower_cmsg_warn(app
, "Tunnel keep-alive request exceeds max routes.\n");
224 pay_len
= nfp_flower_cmsg_get_data_len(skb
);
225 if (pay_len
!= struct_size(payload
, tun_info
, count
)) {
226 nfp_flower_cmsg_warn(app
, "Corruption in tunnel keep-alive message.\n");
231 for (i
= 0; i
< count
; i
++) {
232 ipv4_addr
= payload
->tun_info
[i
].ipv4
;
233 port
= be32_to_cpu(payload
->tun_info
[i
].egress_port
);
234 netdev
= nfp_app_dev_get(app
, port
, NULL
);
238 n
= neigh_lookup(&arp_tbl
, &ipv4_addr
, netdev
);
242 /* Update the used timestamp of neighbour */
243 neigh_event_send(n
, NULL
);
249 void nfp_tunnel_keep_alive_v6(struct nfp_app
*app
, struct sk_buff
*skb
)
251 #if IS_ENABLED(CONFIG_IPV6)
252 struct nfp_tun_active_tuns_v6
*payload
;
253 struct net_device
*netdev
;
254 int count
, i
, pay_len
;
259 payload
= nfp_flower_cmsg_get_data(skb
);
260 count
= be32_to_cpu(payload
->count
);
261 if (count
> NFP_FL_IPV6_ADDRS_MAX
) {
262 nfp_flower_cmsg_warn(app
, "IPv6 tunnel keep-alive request exceeds max routes.\n");
266 pay_len
= nfp_flower_cmsg_get_data_len(skb
);
267 if (pay_len
!= struct_size(payload
, tun_info
, count
)) {
268 nfp_flower_cmsg_warn(app
, "Corruption in tunnel keep-alive message.\n");
273 for (i
= 0; i
< count
; i
++) {
274 ipv6_add
= &payload
->tun_info
[i
].ipv6
;
275 port
= be32_to_cpu(payload
->tun_info
[i
].egress_port
);
276 netdev
= nfp_app_dev_get(app
, port
, NULL
);
280 n
= neigh_lookup(&nd_tbl
, ipv6_add
, netdev
);
284 /* Update the used timestamp of neighbour */
285 neigh_event_send(n
, NULL
);
293 nfp_flower_xmit_tun_conf(struct nfp_app
*app
, u8 mtype
, u16 plen
, void *pdata
,
296 struct nfp_flower_priv
*priv
= app
->priv
;
300 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_DECAP_V2
) &&
301 (mtype
== NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
||
302 mtype
== NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
))
303 plen
-= sizeof(struct nfp_tun_neigh_ext
);
305 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_TUNNEL_NEIGH_LAG
) &&
306 (mtype
== NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
||
307 mtype
== NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
))
308 plen
-= sizeof(struct nfp_tun_neigh_lag
);
310 skb
= nfp_flower_cmsg_alloc(app
, plen
, mtype
, flag
);
314 msg
= nfp_flower_cmsg_get_data(skb
);
315 memcpy(msg
, pdata
, nfp_flower_cmsg_get_data_len(skb
));
317 nfp_ctrl_tx(app
->ctrl
, skb
);
322 nfp_tun_mutual_link(struct nfp_predt_entry
*predt
,
323 struct nfp_neigh_entry
*neigh
)
325 struct nfp_fl_payload
*flow_pay
= predt
->flow_pay
;
326 struct nfp_tun_neigh_ext
*ext
;
327 struct nfp_tun_neigh
*common
;
329 if (flow_pay
->pre_tun_rule
.is_ipv6
!= neigh
->is_ipv6
)
332 /* In the case of bonding it is possible that there might already
333 * be a flow linked (as the MAC address gets shared). If a flow
334 * is already linked just return.
339 common
= neigh
->is_ipv6
?
340 &((struct nfp_tun_neigh_v6
*)neigh
->payload
)->common
:
341 &((struct nfp_tun_neigh_v4
*)neigh
->payload
)->common
;
342 ext
= neigh
->is_ipv6
?
343 &((struct nfp_tun_neigh_v6
*)neigh
->payload
)->ext
:
344 &((struct nfp_tun_neigh_v4
*)neigh
->payload
)->ext
;
346 if (memcmp(flow_pay
->pre_tun_rule
.loc_mac
,
347 common
->src_addr
, ETH_ALEN
) ||
348 memcmp(flow_pay
->pre_tun_rule
.rem_mac
,
349 common
->dst_addr
, ETH_ALEN
))
352 list_add(&neigh
->list_head
, &predt
->nn_list
);
354 ext
->host_ctx
= flow_pay
->meta
.host_ctx_id
;
355 ext
->vlan_tci
= flow_pay
->pre_tun_rule
.vlan_tci
;
356 ext
->vlan_tpid
= flow_pay
->pre_tun_rule
.vlan_tpid
;
360 nfp_tun_link_predt_entries(struct nfp_app
*app
,
361 struct nfp_neigh_entry
*nn_entry
)
363 struct nfp_flower_priv
*priv
= app
->priv
;
364 struct nfp_predt_entry
*predt
, *tmp
;
366 list_for_each_entry_safe(predt
, tmp
, &priv
->predt_list
, list_head
) {
367 nfp_tun_mutual_link(predt
, nn_entry
);
371 void nfp_tun_link_and_update_nn_entries(struct nfp_app
*app
,
372 struct nfp_predt_entry
*predt
)
374 struct nfp_flower_priv
*priv
= app
->priv
;
375 struct nfp_neigh_entry
*nn_entry
;
376 struct rhashtable_iter iter
;
380 rhashtable_walk_enter(&priv
->neigh_table
, &iter
);
381 rhashtable_walk_start(&iter
);
382 while ((nn_entry
= rhashtable_walk_next(&iter
)) != NULL
) {
383 if (IS_ERR(nn_entry
))
385 nfp_tun_mutual_link(predt
, nn_entry
);
386 neigh_size
= nn_entry
->is_ipv6
?
387 sizeof(struct nfp_tun_neigh_v6
) :
388 sizeof(struct nfp_tun_neigh_v4
);
389 type
= nn_entry
->is_ipv6
? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
:
390 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
;
391 nfp_flower_xmit_tun_conf(app
, type
, neigh_size
,
395 rhashtable_walk_stop(&iter
);
396 rhashtable_walk_exit(&iter
);
399 static void nfp_tun_cleanup_nn_entries(struct nfp_app
*app
)
401 struct nfp_flower_priv
*priv
= app
->priv
;
402 struct nfp_neigh_entry
*neigh
;
403 struct nfp_tun_neigh_ext
*ext
;
404 struct rhashtable_iter iter
;
408 rhashtable_walk_enter(&priv
->neigh_table
, &iter
);
409 rhashtable_walk_start(&iter
);
410 while ((neigh
= rhashtable_walk_next(&iter
)) != NULL
) {
413 ext
= neigh
->is_ipv6
?
414 &((struct nfp_tun_neigh_v6
*)neigh
->payload
)->ext
:
415 &((struct nfp_tun_neigh_v4
*)neigh
->payload
)->ext
;
416 ext
->host_ctx
= cpu_to_be32(U32_MAX
);
417 ext
->vlan_tpid
= cpu_to_be16(U16_MAX
);
418 ext
->vlan_tci
= cpu_to_be16(U16_MAX
);
420 neigh_size
= neigh
->is_ipv6
?
421 sizeof(struct nfp_tun_neigh_v6
) :
422 sizeof(struct nfp_tun_neigh_v4
);
423 type
= neigh
->is_ipv6
? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
:
424 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
;
425 nfp_flower_xmit_tun_conf(app
, type
, neigh_size
, neigh
->payload
,
428 rhashtable_remove_fast(&priv
->neigh_table
, &neigh
->ht_node
,
431 list_del(&neigh
->list_head
);
434 rhashtable_walk_stop(&iter
);
435 rhashtable_walk_exit(&iter
);
438 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app
*app
,
439 struct nfp_predt_entry
*predt
)
441 struct nfp_neigh_entry
*neigh
, *tmp
;
442 struct nfp_tun_neigh_ext
*ext
;
446 list_for_each_entry_safe(neigh
, tmp
, &predt
->nn_list
, list_head
) {
447 ext
= neigh
->is_ipv6
?
448 &((struct nfp_tun_neigh_v6
*)neigh
->payload
)->ext
:
449 &((struct nfp_tun_neigh_v4
*)neigh
->payload
)->ext
;
451 ext
->host_ctx
= cpu_to_be32(U32_MAX
);
452 ext
->vlan_tpid
= cpu_to_be16(U16_MAX
);
453 ext
->vlan_tci
= cpu_to_be16(U16_MAX
);
454 list_del(&neigh
->list_head
);
455 neigh_size
= neigh
->is_ipv6
?
456 sizeof(struct nfp_tun_neigh_v6
) :
457 sizeof(struct nfp_tun_neigh_v4
);
458 type
= neigh
->is_ipv6
? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
:
459 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
;
460 nfp_flower_xmit_tun_conf(app
, type
, neigh_size
, neigh
->payload
,
466 nfp_tun_write_neigh(struct net_device
*netdev
, struct nfp_app
*app
,
467 void *flow
, struct neighbour
*neigh
, bool is_ipv6
,
470 bool neigh_invalid
= !(neigh
->nud_state
& NUD_VALID
) || neigh
->dead
;
471 size_t neigh_size
= is_ipv6
? sizeof(struct nfp_tun_neigh_v6
) :
472 sizeof(struct nfp_tun_neigh_v4
);
473 unsigned long cookie
= (unsigned long)neigh
;
474 struct nfp_flower_priv
*priv
= app
->priv
;
475 struct nfp_tun_neigh_lag lag_info
;
476 struct nfp_neigh_entry
*nn_entry
;
480 port_id
= nfp_flower_get_port_id_from_netdev(app
, netdev
);
484 if ((port_id
& NFP_FL_LAG_OUT
) == NFP_FL_LAG_OUT
) {
485 memset(&lag_info
, 0, sizeof(struct nfp_tun_neigh_lag
));
486 nfp_flower_lag_get_info_from_netdev(app
, netdev
, &lag_info
);
489 spin_lock_bh(&priv
->predt_lock
);
490 nn_entry
= rhashtable_lookup_fast(&priv
->neigh_table
, &cookie
,
492 if (!nn_entry
&& !neigh_invalid
) {
493 struct nfp_tun_neigh_ext
*ext
;
494 struct nfp_tun_neigh_lag
*lag
;
495 struct nfp_tun_neigh
*common
;
497 nn_entry
= kzalloc(sizeof(*nn_entry
) + neigh_size
,
502 nn_entry
->payload
= (char *)&nn_entry
[1];
503 nn_entry
->neigh_cookie
= cookie
;
504 nn_entry
->is_ipv6
= is_ipv6
;
505 nn_entry
->flow
= NULL
;
507 struct flowi6
*flowi6
= (struct flowi6
*)flow
;
508 struct nfp_tun_neigh_v6
*payload
;
510 payload
= (struct nfp_tun_neigh_v6
*)nn_entry
->payload
;
511 payload
->src_ipv6
= flowi6
->saddr
;
512 payload
->dst_ipv6
= flowi6
->daddr
;
513 common
= &payload
->common
;
516 mtype
= NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
;
518 struct flowi4
*flowi4
= (struct flowi4
*)flow
;
519 struct nfp_tun_neigh_v4
*payload
;
521 payload
= (struct nfp_tun_neigh_v4
*)nn_entry
->payload
;
522 payload
->src_ipv4
= flowi4
->saddr
;
523 payload
->dst_ipv4
= flowi4
->daddr
;
524 common
= &payload
->common
;
527 mtype
= NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
;
529 ext
->host_ctx
= cpu_to_be32(U32_MAX
);
530 ext
->vlan_tpid
= cpu_to_be16(U16_MAX
);
531 ext
->vlan_tci
= cpu_to_be16(U16_MAX
);
532 ether_addr_copy(common
->src_addr
, netdev
->dev_addr
);
533 neigh_ha_snapshot(common
->dst_addr
, neigh
, netdev
);
535 if ((port_id
& NFP_FL_LAG_OUT
) == NFP_FL_LAG_OUT
)
536 memcpy(lag
, &lag_info
, sizeof(struct nfp_tun_neigh_lag
));
537 common
->port_id
= cpu_to_be32(port_id
);
539 if (rhashtable_insert_fast(&priv
->neigh_table
,
544 nfp_tun_link_predt_entries(app
, nn_entry
);
545 nfp_flower_xmit_tun_conf(app
, mtype
, neigh_size
,
548 } else if (nn_entry
&& neigh_invalid
) {
550 struct flowi6
*flowi6
= (struct flowi6
*)flow
;
551 struct nfp_tun_neigh_v6
*payload
;
553 payload
= (struct nfp_tun_neigh_v6
*)nn_entry
->payload
;
554 memset(payload
, 0, sizeof(struct nfp_tun_neigh_v6
));
555 payload
->dst_ipv6
= flowi6
->daddr
;
556 mtype
= NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
;
558 struct flowi4
*flowi4
= (struct flowi4
*)flow
;
559 struct nfp_tun_neigh_v4
*payload
;
561 payload
= (struct nfp_tun_neigh_v4
*)nn_entry
->payload
;
562 memset(payload
, 0, sizeof(struct nfp_tun_neigh_v4
));
563 payload
->dst_ipv4
= flowi4
->daddr
;
564 mtype
= NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
;
566 /* Trigger ARP to verify invalid neighbour state. */
567 neigh_event_send(neigh
, NULL
);
568 rhashtable_remove_fast(&priv
->neigh_table
,
572 nfp_flower_xmit_tun_conf(app
, mtype
, neigh_size
,
577 list_del(&nn_entry
->list_head
);
579 } else if (nn_entry
&& !neigh_invalid
) {
580 struct nfp_tun_neigh
*common
;
581 u8 dst_addr
[ETH_ALEN
];
585 struct nfp_tun_neigh_v6
*payload
;
587 payload
= (struct nfp_tun_neigh_v6
*)nn_entry
->payload
;
588 common
= &payload
->common
;
589 mtype
= NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6
;
591 struct nfp_tun_neigh_v4
*payload
;
593 payload
= (struct nfp_tun_neigh_v4
*)nn_entry
->payload
;
594 common
= &payload
->common
;
595 mtype
= NFP_FLOWER_CMSG_TYPE_TUN_NEIGH
;
598 ether_addr_copy(dst_addr
, common
->dst_addr
);
599 neigh_ha_snapshot(common
->dst_addr
, neigh
, netdev
);
600 is_mac_change
= !ether_addr_equal(dst_addr
, common
->dst_addr
);
601 if (override
|| is_mac_change
) {
602 if (is_mac_change
&& nn_entry
->flow
) {
603 list_del(&nn_entry
->list_head
);
604 nn_entry
->flow
= NULL
;
606 nfp_tun_link_predt_entries(app
, nn_entry
);
607 nfp_flower_xmit_tun_conf(app
, mtype
, neigh_size
,
613 spin_unlock_bh(&priv
->predt_lock
);
618 spin_unlock_bh(&priv
->predt_lock
);
619 nfp_flower_cmsg_warn(app
, "Neighbour configuration failed.\n");
623 nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work
*update_work
)
625 neigh_release(update_work
->n
);
629 static void nfp_tun_neigh_update(struct work_struct
*work
)
631 struct nfp_neigh_update_work
*update_work
;
637 update_work
= container_of(work
, struct nfp_neigh_update_work
, work
);
638 app
= update_work
->app
;
641 if (!nfp_flower_get_port_id_from_netdev(app
, n
->dev
))
644 #if IS_ENABLED(CONFIG_INET)
645 neigh_invalid
= !(n
->nud_state
& NUD_VALID
) || n
->dead
;
646 if (n
->tbl
->family
== AF_INET6
) {
647 #if IS_ENABLED(CONFIG_IPV6)
648 struct flowi6 flow6
= {};
650 flow6
.daddr
= *(struct in6_addr
*)n
->primary_key
;
651 if (!neigh_invalid
) {
652 struct dst_entry
*dst
;
653 /* Use ipv6_dst_lookup_flow to populate flow6->saddr
654 * and other fields. This information is only needed
655 * for new entries, lookup can be skipped when an entry
656 * gets invalidated - as only the daddr is needed for
659 dst
= ip6_dst_lookup_flow(dev_net(n
->dev
), NULL
,
666 nfp_tun_write_neigh(n
->dev
, app
, &flow6
, n
, true, false);
667 #endif /* CONFIG_IPV6 */
669 struct flowi4 flow4
= {};
671 flow4
.daddr
= *(__be32
*)n
->primary_key
;
672 if (!neigh_invalid
) {
674 /* Use ip_route_output_key to populate flow4->saddr and
675 * other fields. This information is only needed for
676 * new entries, lookup can be skipped when an entry
677 * gets invalidated - as only the daddr is needed for
680 rt
= ip_route_output_key(dev_net(n
->dev
), &flow4
);
681 err
= PTR_ERR_OR_ZERO(rt
);
687 nfp_tun_write_neigh(n
->dev
, app
, &flow4
, n
, false, false);
689 #endif /* CONFIG_INET */
691 nfp_tun_release_neigh_update_work(update_work
);
694 static struct nfp_neigh_update_work
*
695 nfp_tun_alloc_neigh_update_work(struct nfp_app
*app
, struct neighbour
*n
)
697 struct nfp_neigh_update_work
*update_work
;
699 update_work
= kzalloc(sizeof(*update_work
), GFP_ATOMIC
);
703 INIT_WORK(&update_work
->work
, nfp_tun_neigh_update
);
706 update_work
->app
= app
;
712 nfp_tun_neigh_event_handler(struct notifier_block
*nb
, unsigned long event
,
715 struct nfp_neigh_update_work
*update_work
;
716 struct nfp_flower_priv
*app_priv
;
717 struct netevent_redirect
*redir
;
722 case NETEVENT_REDIRECT
:
723 redir
= (struct netevent_redirect
*)ptr
;
726 case NETEVENT_NEIGH_UPDATE
:
727 n
= (struct neighbour
*)ptr
;
732 #if IS_ENABLED(CONFIG_IPV6)
733 if (n
->tbl
!= ipv6_stub
->nd_tbl
&& n
->tbl
!= &arp_tbl
)
735 if (n
->tbl
!= &arp_tbl
)
739 app_priv
= container_of(nb
, struct nfp_flower_priv
, tun
.neigh_nb
);
741 update_work
= nfp_tun_alloc_neigh_update_work(app
, n
);
745 queue_work(system_highpri_wq
, &update_work
->work
);
750 void nfp_tunnel_request_route_v4(struct nfp_app
*app
, struct sk_buff
*skb
)
752 struct nfp_tun_req_route_ipv4
*payload
;
753 struct net_device
*netdev
;
754 struct flowi4 flow
= {};
759 payload
= nfp_flower_cmsg_get_data(skb
);
762 netdev
= nfp_app_dev_get(app
, be32_to_cpu(payload
->ingress_port
), NULL
);
764 goto fail_rcu_unlock
;
767 flow
.daddr
= payload
->ipv4_addr
;
768 flow
.flowi4_proto
= IPPROTO_UDP
;
770 #if IS_ENABLED(CONFIG_INET)
771 /* Do a route lookup on same namespace as ingress port. */
772 rt
= ip_route_output_key(dev_net(netdev
), &flow
);
773 err
= PTR_ERR_OR_ZERO(rt
);
775 goto fail_rcu_unlock
;
777 goto fail_rcu_unlock
;
780 /* Get the neighbour entry for the lookup */
781 n
= dst_neigh_lookup(&rt
->dst
, &flow
.daddr
);
784 goto fail_rcu_unlock
;
787 nfp_tun_write_neigh(n
->dev
, app
, &flow
, n
, false, true);
795 nfp_flower_cmsg_warn(app
, "Requested route not found.\n");
798 void nfp_tunnel_request_route_v6(struct nfp_app
*app
, struct sk_buff
*skb
)
800 struct nfp_tun_req_route_ipv6
*payload
;
801 struct net_device
*netdev
;
802 struct flowi6 flow
= {};
803 struct dst_entry
*dst
;
806 payload
= nfp_flower_cmsg_get_data(skb
);
809 netdev
= nfp_app_dev_get(app
, be32_to_cpu(payload
->ingress_port
), NULL
);
811 goto fail_rcu_unlock
;
814 flow
.daddr
= payload
->ipv6_addr
;
815 flow
.flowi6_proto
= IPPROTO_UDP
;
817 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
818 dst
= ipv6_stub
->ipv6_dst_lookup_flow(dev_net(netdev
), NULL
, &flow
,
821 goto fail_rcu_unlock
;
823 goto fail_rcu_unlock
;
826 n
= dst_neigh_lookup(dst
, &flow
.daddr
);
829 goto fail_rcu_unlock
;
832 nfp_tun_write_neigh(n
->dev
, app
, &flow
, n
, true, true);
840 nfp_flower_cmsg_warn(app
, "Requested IPv6 route not found.\n");
843 static void nfp_tun_write_ipv4_list(struct nfp_app
*app
)
845 struct nfp_flower_priv
*priv
= app
->priv
;
846 struct nfp_ipv4_addr_entry
*entry
;
847 struct nfp_tun_ipv4_addr payload
;
848 struct list_head
*ptr
, *storage
;
851 memset(&payload
, 0, sizeof(struct nfp_tun_ipv4_addr
));
852 mutex_lock(&priv
->tun
.ipv4_off_lock
);
854 list_for_each_safe(ptr
, storage
, &priv
->tun
.ipv4_off_list
) {
855 if (count
>= NFP_FL_IPV4_ADDRS_MAX
) {
856 mutex_unlock(&priv
->tun
.ipv4_off_lock
);
857 nfp_flower_cmsg_warn(app
, "IPv4 offload exceeds limit.\n");
860 entry
= list_entry(ptr
, struct nfp_ipv4_addr_entry
, list
);
861 payload
.ipv4_addr
[count
++] = entry
->ipv4_addr
;
863 payload
.count
= cpu_to_be32(count
);
864 mutex_unlock(&priv
->tun
.ipv4_off_lock
);
866 nfp_flower_xmit_tun_conf(app
, NFP_FLOWER_CMSG_TYPE_TUN_IPS
,
867 sizeof(struct nfp_tun_ipv4_addr
),
868 &payload
, GFP_KERNEL
);
871 void nfp_tunnel_add_ipv4_off(struct nfp_app
*app
, __be32 ipv4
)
873 struct nfp_flower_priv
*priv
= app
->priv
;
874 struct nfp_ipv4_addr_entry
*entry
;
875 struct list_head
*ptr
, *storage
;
877 mutex_lock(&priv
->tun
.ipv4_off_lock
);
878 list_for_each_safe(ptr
, storage
, &priv
->tun
.ipv4_off_list
) {
879 entry
= list_entry(ptr
, struct nfp_ipv4_addr_entry
, list
);
880 if (entry
->ipv4_addr
== ipv4
) {
882 mutex_unlock(&priv
->tun
.ipv4_off_lock
);
887 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
889 mutex_unlock(&priv
->tun
.ipv4_off_lock
);
890 nfp_flower_cmsg_warn(app
, "Mem error when offloading IP address.\n");
893 entry
->ipv4_addr
= ipv4
;
894 entry
->ref_count
= 1;
895 list_add_tail(&entry
->list
, &priv
->tun
.ipv4_off_list
);
896 mutex_unlock(&priv
->tun
.ipv4_off_lock
);
898 nfp_tun_write_ipv4_list(app
);
901 void nfp_tunnel_del_ipv4_off(struct nfp_app
*app
, __be32 ipv4
)
903 struct nfp_flower_priv
*priv
= app
->priv
;
904 struct nfp_ipv4_addr_entry
*entry
;
905 struct list_head
*ptr
, *storage
;
907 mutex_lock(&priv
->tun
.ipv4_off_lock
);
908 list_for_each_safe(ptr
, storage
, &priv
->tun
.ipv4_off_list
) {
909 entry
= list_entry(ptr
, struct nfp_ipv4_addr_entry
, list
);
910 if (entry
->ipv4_addr
== ipv4
) {
912 if (!entry
->ref_count
) {
913 list_del(&entry
->list
);
919 mutex_unlock(&priv
->tun
.ipv4_off_lock
);
921 nfp_tun_write_ipv4_list(app
);
924 static void nfp_tun_write_ipv6_list(struct nfp_app
*app
)
926 struct nfp_flower_priv
*priv
= app
->priv
;
927 struct nfp_ipv6_addr_entry
*entry
;
928 struct nfp_tun_ipv6_addr payload
;
931 memset(&payload
, 0, sizeof(struct nfp_tun_ipv6_addr
));
932 mutex_lock(&priv
->tun
.ipv6_off_lock
);
933 list_for_each_entry(entry
, &priv
->tun
.ipv6_off_list
, list
) {
934 if (count
>= NFP_FL_IPV6_ADDRS_MAX
) {
935 nfp_flower_cmsg_warn(app
, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
938 payload
.ipv6_addr
[count
++] = entry
->ipv6_addr
;
940 mutex_unlock(&priv
->tun
.ipv6_off_lock
);
941 payload
.count
= cpu_to_be32(count
);
943 nfp_flower_xmit_tun_conf(app
, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6
,
944 sizeof(struct nfp_tun_ipv6_addr
),
945 &payload
, GFP_KERNEL
);
948 struct nfp_ipv6_addr_entry
*
949 nfp_tunnel_add_ipv6_off(struct nfp_app
*app
, struct in6_addr
*ipv6
)
951 struct nfp_flower_priv
*priv
= app
->priv
;
952 struct nfp_ipv6_addr_entry
*entry
;
954 mutex_lock(&priv
->tun
.ipv6_off_lock
);
955 list_for_each_entry(entry
, &priv
->tun
.ipv6_off_list
, list
)
956 if (!memcmp(&entry
->ipv6_addr
, ipv6
, sizeof(*ipv6
))) {
958 mutex_unlock(&priv
->tun
.ipv6_off_lock
);
962 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
964 mutex_unlock(&priv
->tun
.ipv6_off_lock
);
965 nfp_flower_cmsg_warn(app
, "Mem error when offloading IP address.\n");
968 entry
->ipv6_addr
= *ipv6
;
969 entry
->ref_count
= 1;
970 list_add_tail(&entry
->list
, &priv
->tun
.ipv6_off_list
);
971 mutex_unlock(&priv
->tun
.ipv6_off_lock
);
973 nfp_tun_write_ipv6_list(app
);
979 nfp_tunnel_put_ipv6_off(struct nfp_app
*app
, struct nfp_ipv6_addr_entry
*entry
)
981 struct nfp_flower_priv
*priv
= app
->priv
;
984 mutex_lock(&priv
->tun
.ipv6_off_lock
);
985 if (!--entry
->ref_count
) {
986 list_del(&entry
->list
);
990 mutex_unlock(&priv
->tun
.ipv6_off_lock
);
993 nfp_tun_write_ipv6_list(app
);
997 __nfp_tunnel_offload_mac(struct nfp_app
*app
, const u8
*mac
, u16 idx
, bool del
)
999 struct nfp_tun_mac_addr_offload payload
;
1001 memset(&payload
, 0, sizeof(payload
));
1004 payload
.flags
= cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG
);
1006 /* FW supports multiple MACs per cmsg but restrict to single. */
1007 payload
.count
= cpu_to_be16(1);
1008 payload
.index
= cpu_to_be16(idx
);
1009 ether_addr_copy(payload
.addr
, mac
);
1011 return nfp_flower_xmit_tun_conf(app
, NFP_FLOWER_CMSG_TYPE_TUN_MAC
,
1012 sizeof(struct nfp_tun_mac_addr_offload
),
1013 &payload
, GFP_KERNEL
);
1016 static bool nfp_tunnel_port_is_phy_repr(int port
)
1018 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE
, port
) ==
1019 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT
)
1025 static u16
nfp_tunnel_get_mac_idx_from_phy_port_id(int port
)
1027 return port
<< 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT
;
1030 static u16
nfp_tunnel_get_global_mac_idx_from_ida(int id
)
1032 return id
<< 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT
;
1035 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx
)
1037 return nfp_mac_idx
>> 8;
1040 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx
)
1042 return (nfp_mac_idx
& 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT
;
1045 static struct nfp_tun_offloaded_mac
*
1046 nfp_tunnel_lookup_offloaded_macs(struct nfp_app
*app
, const u8
*mac
)
1048 struct nfp_flower_priv
*priv
= app
->priv
;
1050 return rhashtable_lookup_fast(&priv
->tun
.offloaded_macs
, mac
,
1051 offloaded_macs_params
);
1055 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac
*entry
,
1056 struct net_device
*netdev
, bool mod
)
1058 if (nfp_netdev_is_nfp_repr(netdev
)) {
1059 struct nfp_flower_repr_priv
*repr_priv
;
1060 struct nfp_repr
*repr
;
1062 repr
= netdev_priv(netdev
);
1063 repr_priv
= repr
->app_priv
;
1065 /* If modifing MAC, remove repr from old list first. */
1067 list_del(&repr_priv
->mac_list
);
1069 list_add_tail(&repr_priv
->mac_list
, &entry
->repr_list
);
1070 } else if (nfp_flower_is_supported_bridge(netdev
)) {
1071 entry
->bridge_count
++;
1078 nfp_tunnel_add_shared_mac(struct nfp_app
*app
, struct net_device
*netdev
,
1081 struct nfp_flower_priv
*priv
= app
->priv
;
1082 struct nfp_tun_offloaded_mac
*entry
;
1083 int ida_idx
= -1, err
;
1084 u16 nfp_mac_idx
= 0;
1086 entry
= nfp_tunnel_lookup_offloaded_macs(app
, netdev
->dev_addr
);
1087 if (entry
&& (nfp_tunnel_is_mac_idx_global(entry
->index
) || netif_is_lag_port(netdev
))) {
1088 if (entry
->bridge_count
||
1089 !nfp_flower_is_supported_bridge(netdev
)) {
1090 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry
,
1095 /* MAC is global but matches need to go to pre_tun table. */
1096 nfp_mac_idx
= entry
->index
| NFP_TUN_PRE_TUN_IDX_BIT
;
1100 /* Assign a global index if non-repr or MAC is now shared. */
1101 if (entry
|| !port
) {
1102 ida_idx
= ida_alloc_max(&priv
->tun
.mac_off_ids
,
1103 NFP_MAX_MAC_INDEX
, GFP_KERNEL
);
1108 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx
);
1110 if (nfp_flower_is_supported_bridge(netdev
))
1111 nfp_mac_idx
|= NFP_TUN_PRE_TUN_IDX_BIT
;
1115 nfp_tunnel_get_mac_idx_from_phy_port_id(port
);
1120 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1126 ether_addr_copy(entry
->addr
, netdev
->dev_addr
);
1127 INIT_LIST_HEAD(&entry
->repr_list
);
1129 if (rhashtable_insert_fast(&priv
->tun
.offloaded_macs
,
1131 offloaded_macs_params
)) {
1133 goto err_free_entry
;
1137 err
= __nfp_tunnel_offload_mac(app
, netdev
->dev_addr
,
1138 nfp_mac_idx
, false);
1140 /* If not shared then free. */
1141 if (!entry
->ref_count
)
1142 goto err_remove_hash
;
1146 entry
->index
= nfp_mac_idx
;
1147 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry
, netdev
, mod
);
1152 rhashtable_remove_fast(&priv
->tun
.offloaded_macs
, &entry
->ht_node
,
1153 offloaded_macs_params
);
1158 ida_free(&priv
->tun
.mac_off_ids
, ida_idx
);
1164 nfp_tunnel_del_shared_mac(struct nfp_app
*app
, struct net_device
*netdev
,
1165 const u8
*mac
, bool mod
)
1167 struct nfp_flower_priv
*priv
= app
->priv
;
1168 struct nfp_flower_repr_priv
*repr_priv
;
1169 struct nfp_tun_offloaded_mac
*entry
;
1170 struct nfp_repr
*repr
;
1174 entry
= nfp_tunnel_lookup_offloaded_macs(app
, mac
);
1179 /* If del is part of a mod then mac_list is still in use elsewhere. */
1180 if (nfp_netdev_is_nfp_repr(netdev
) && !mod
) {
1181 repr
= netdev_priv(netdev
);
1182 repr_priv
= repr
->app_priv
;
1183 list_del(&repr_priv
->mac_list
);
1186 if (nfp_flower_is_supported_bridge(netdev
)) {
1187 entry
->bridge_count
--;
1189 if (!entry
->bridge_count
&& entry
->ref_count
) {
1190 nfp_mac_idx
= entry
->index
& ~NFP_TUN_PRE_TUN_IDX_BIT
;
1191 if (__nfp_tunnel_offload_mac(app
, mac
, nfp_mac_idx
,
1193 nfp_flower_cmsg_warn(app
, "MAC offload index revert failed on %s.\n",
1194 netdev_name(netdev
));
1198 entry
->index
= nfp_mac_idx
;
1203 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1204 if (entry
->ref_count
== 1 && list_is_singular(&entry
->repr_list
)) {
1207 repr_priv
= list_first_entry(&entry
->repr_list
,
1208 struct nfp_flower_repr_priv
,
1210 repr
= repr_priv
->nfp_repr
;
1211 port
= nfp_repr_get_port_id(repr
->netdev
);
1212 nfp_mac_idx
= nfp_tunnel_get_mac_idx_from_phy_port_id(port
);
1213 err
= __nfp_tunnel_offload_mac(app
, mac
, nfp_mac_idx
, false);
1215 nfp_flower_cmsg_warn(app
, "MAC offload index revert failed on %s.\n",
1216 netdev_name(netdev
));
1220 ida_idx
= nfp_tunnel_get_ida_from_global_mac_idx(entry
->index
);
1221 ida_free(&priv
->tun
.mac_off_ids
, ida_idx
);
1222 entry
->index
= nfp_mac_idx
;
1226 if (entry
->ref_count
)
1229 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->tun
.offloaded_macs
,
1231 offloaded_macs_params
));
1233 if (nfp_flower_is_supported_bridge(netdev
))
1234 nfp_mac_idx
= entry
->index
& ~NFP_TUN_PRE_TUN_IDX_BIT
;
1236 nfp_mac_idx
= entry
->index
;
1238 /* If MAC has global ID then extract and free the ida entry. */
1239 if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx
)) {
1240 ida_idx
= nfp_tunnel_get_ida_from_global_mac_idx(entry
->index
);
1241 ida_free(&priv
->tun
.mac_off_ids
, ida_idx
);
1246 return __nfp_tunnel_offload_mac(app
, mac
, 0, true);
1250 nfp_tunnel_offload_mac(struct nfp_app
*app
, struct net_device
*netdev
,
1251 enum nfp_flower_mac_offload_cmd cmd
)
1253 struct nfp_flower_non_repr_priv
*nr_priv
= NULL
;
1254 bool non_repr
= false, *mac_offloaded
;
1258 if (nfp_netdev_is_nfp_repr(netdev
)) {
1259 struct nfp_flower_repr_priv
*repr_priv
;
1260 struct nfp_repr
*repr
;
1262 repr
= netdev_priv(netdev
);
1263 if (repr
->app
!= app
)
1266 repr_priv
= repr
->app_priv
;
1267 if (repr_priv
->on_bridge
)
1270 mac_offloaded
= &repr_priv
->mac_offloaded
;
1271 off_mac
= &repr_priv
->offloaded_mac_addr
[0];
1272 port
= nfp_repr_get_port_id(netdev
);
1273 if (!nfp_tunnel_port_is_phy_repr(port
))
1275 } else if (nfp_fl_is_netdev_to_offload(netdev
)) {
1276 nr_priv
= nfp_flower_non_repr_priv_get(app
, netdev
);
1280 mac_offloaded
= &nr_priv
->mac_offloaded
;
1281 off_mac
= &nr_priv
->offloaded_mac_addr
[0];
1287 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
1289 goto err_put_non_repr_priv
;
1292 if (cmd
== NFP_TUNNEL_MAC_OFFLOAD_MOD
&& !*mac_offloaded
)
1293 cmd
= NFP_TUNNEL_MAC_OFFLOAD_ADD
;
1296 case NFP_TUNNEL_MAC_OFFLOAD_ADD
:
1297 err
= nfp_tunnel_add_shared_mac(app
, netdev
, port
, false);
1299 goto err_put_non_repr_priv
;
1302 __nfp_flower_non_repr_priv_get(nr_priv
);
1304 *mac_offloaded
= true;
1305 ether_addr_copy(off_mac
, netdev
->dev_addr
);
1307 case NFP_TUNNEL_MAC_OFFLOAD_DEL
:
1308 /* Only attempt delete if add was successful. */
1309 if (!*mac_offloaded
)
1313 __nfp_flower_non_repr_priv_put(nr_priv
);
1315 *mac_offloaded
= false;
1317 err
= nfp_tunnel_del_shared_mac(app
, netdev
, netdev
->dev_addr
,
1320 goto err_put_non_repr_priv
;
1323 case NFP_TUNNEL_MAC_OFFLOAD_MOD
:
1324 /* Ignore if changing to the same address. */
1325 if (ether_addr_equal(netdev
->dev_addr
, off_mac
))
1328 err
= nfp_tunnel_add_shared_mac(app
, netdev
, port
, true);
1330 goto err_put_non_repr_priv
;
1332 /* Delete the previous MAC address. */
1333 err
= nfp_tunnel_del_shared_mac(app
, netdev
, off_mac
, true);
1335 nfp_flower_cmsg_warn(app
, "Failed to remove offload of replaced MAC addr on %s.\n",
1336 netdev_name(netdev
));
1338 ether_addr_copy(off_mac
, netdev
->dev_addr
);
1342 goto err_put_non_repr_priv
;
1346 __nfp_flower_non_repr_priv_put(nr_priv
);
1350 err_put_non_repr_priv
:
1352 __nfp_flower_non_repr_priv_put(nr_priv
);
1357 int nfp_tunnel_mac_event_handler(struct nfp_app
*app
,
1358 struct net_device
*netdev
,
1359 unsigned long event
, void *ptr
)
1363 if (event
== NETDEV_DOWN
) {
1364 err
= nfp_tunnel_offload_mac(app
, netdev
,
1365 NFP_TUNNEL_MAC_OFFLOAD_DEL
);
1367 nfp_flower_cmsg_warn(app
, "Failed to delete offload MAC on %s.\n",
1368 netdev_name(netdev
));
1369 } else if (event
== NETDEV_UP
) {
1370 err
= nfp_tunnel_offload_mac(app
, netdev
,
1371 NFP_TUNNEL_MAC_OFFLOAD_ADD
);
1373 nfp_flower_cmsg_warn(app
, "Failed to offload MAC on %s.\n",
1374 netdev_name(netdev
));
1375 } else if (event
== NETDEV_CHANGEADDR
) {
1376 /* Only offload addr change if netdev is already up. */
1377 if (!(netdev
->flags
& IFF_UP
))
1380 err
= nfp_tunnel_offload_mac(app
, netdev
,
1381 NFP_TUNNEL_MAC_OFFLOAD_MOD
);
1383 nfp_flower_cmsg_warn(app
, "Failed to offload MAC change on %s.\n",
1384 netdev_name(netdev
));
1385 } else if (event
== NETDEV_CHANGEUPPER
) {
1386 /* If a repr is attached to a bridge then tunnel packets
1387 * entering the physical port are directed through the bridge
1388 * datapath and cannot be directly detunneled. Therefore,
1389 * associated offloaded MACs and indexes should not be used
1390 * by fw for detunneling.
1392 struct netdev_notifier_changeupper_info
*info
= ptr
;
1393 struct net_device
*upper
= info
->upper_dev
;
1394 struct nfp_flower_repr_priv
*repr_priv
;
1395 struct nfp_repr
*repr
;
1397 if (!nfp_netdev_is_nfp_repr(netdev
) ||
1398 !nfp_flower_is_supported_bridge(upper
))
1401 repr
= netdev_priv(netdev
);
1402 if (repr
->app
!= app
)
1405 repr_priv
= repr
->app_priv
;
1407 if (info
->linking
) {
1408 if (nfp_tunnel_offload_mac(app
, netdev
,
1409 NFP_TUNNEL_MAC_OFFLOAD_DEL
))
1410 nfp_flower_cmsg_warn(app
, "Failed to delete offloaded MAC on %s.\n",
1411 netdev_name(netdev
));
1412 repr_priv
->on_bridge
= true;
1414 repr_priv
->on_bridge
= false;
1416 if (!(netdev
->flags
& IFF_UP
))
1419 if (nfp_tunnel_offload_mac(app
, netdev
,
1420 NFP_TUNNEL_MAC_OFFLOAD_ADD
))
1421 nfp_flower_cmsg_warn(app
, "Failed to offload MAC on %s.\n",
1422 netdev_name(netdev
));
1428 int nfp_flower_xmit_pre_tun_flow(struct nfp_app
*app
,
1429 struct nfp_fl_payload
*flow
)
1431 struct nfp_flower_priv
*app_priv
= app
->priv
;
1432 struct nfp_tun_offloaded_mac
*mac_entry
;
1433 struct nfp_flower_meta_tci
*key_meta
;
1434 struct nfp_tun_pre_tun_rule payload
;
1435 struct net_device
*internal_dev
;
1438 if (app_priv
->pre_tun_rule_cnt
== NFP_TUN_PRE_TUN_RULE_LIMIT
)
1441 memset(&payload
, 0, sizeof(struct nfp_tun_pre_tun_rule
));
1443 internal_dev
= flow
->pre_tun_rule
.dev
;
1444 payload
.vlan_tci
= flow
->pre_tun_rule
.vlan_tci
;
1445 payload
.host_ctx_id
= flow
->meta
.host_ctx_id
;
1447 /* Lookup MAC index for the pre-tunnel rule egress device.
1448 * Note that because the device is always an internal port, it will
1449 * have a constant global index so does not need to be tracked.
1451 mac_entry
= nfp_tunnel_lookup_offloaded_macs(app
,
1452 internal_dev
->dev_addr
);
1456 /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
1457 * set/clear for port_idx.
1459 key_meta
= (struct nfp_flower_meta_tci
*)flow
->unmasked_data
;
1460 if (key_meta
->nfp_flow_key_layer
& NFP_FLOWER_LAYER_IPV6
)
1461 mac_entry
->index
|= NFP_TUN_PRE_TUN_IPV6_BIT
;
1463 mac_entry
->index
&= ~NFP_TUN_PRE_TUN_IPV6_BIT
;
1465 payload
.port_idx
= cpu_to_be16(mac_entry
->index
);
1467 /* Copy mac id and vlan to flow - dev may not exist at delete time. */
1468 flow
->pre_tun_rule
.vlan_tci
= payload
.vlan_tci
;
1469 flow
->pre_tun_rule
.port_idx
= payload
.port_idx
;
1471 err
= nfp_flower_xmit_tun_conf(app
, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE
,
1472 sizeof(struct nfp_tun_pre_tun_rule
),
1473 (unsigned char *)&payload
, GFP_KERNEL
);
1477 app_priv
->pre_tun_rule_cnt
++;
1482 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app
*app
,
1483 struct nfp_fl_payload
*flow
)
1485 struct nfp_flower_priv
*app_priv
= app
->priv
;
1486 struct nfp_tun_pre_tun_rule payload
;
1490 memset(&payload
, 0, sizeof(struct nfp_tun_pre_tun_rule
));
1492 tmp_flags
|= NFP_TUN_PRE_TUN_RULE_DEL
;
1493 payload
.flags
= cpu_to_be32(tmp_flags
);
1494 payload
.vlan_tci
= flow
->pre_tun_rule
.vlan_tci
;
1495 payload
.port_idx
= flow
->pre_tun_rule
.port_idx
;
1497 err
= nfp_flower_xmit_tun_conf(app
, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE
,
1498 sizeof(struct nfp_tun_pre_tun_rule
),
1499 (unsigned char *)&payload
, GFP_KERNEL
);
1503 app_priv
->pre_tun_rule_cnt
--;
1508 int nfp_tunnel_config_start(struct nfp_app
*app
)
1510 struct nfp_flower_priv
*priv
= app
->priv
;
1513 /* Initialise rhash for MAC offload tracking. */
1514 err
= rhashtable_init(&priv
->tun
.offloaded_macs
,
1515 &offloaded_macs_params
);
1519 ida_init(&priv
->tun
.mac_off_ids
);
1521 /* Initialise priv data for IPv4/v6 offloading. */
1522 mutex_init(&priv
->tun
.ipv4_off_lock
);
1523 INIT_LIST_HEAD(&priv
->tun
.ipv4_off_list
);
1524 mutex_init(&priv
->tun
.ipv6_off_lock
);
1525 INIT_LIST_HEAD(&priv
->tun
.ipv6_off_list
);
1527 /* Initialise priv data for neighbour offloading. */
1528 priv
->tun
.neigh_nb
.notifier_call
= nfp_tun_neigh_event_handler
;
1530 err
= register_netevent_notifier(&priv
->tun
.neigh_nb
);
1532 rhashtable_free_and_destroy(&priv
->tun
.offloaded_macs
,
1533 nfp_check_rhashtable_empty
, NULL
);
1540 void nfp_tunnel_config_stop(struct nfp_app
*app
)
1542 struct nfp_flower_priv
*priv
= app
->priv
;
1543 struct nfp_ipv4_addr_entry
*ip_entry
;
1544 struct list_head
*ptr
, *storage
;
1546 unregister_netevent_notifier(&priv
->tun
.neigh_nb
);
1548 ida_destroy(&priv
->tun
.mac_off_ids
);
1550 /* Free any memory that may be occupied by ipv4 list. */
1551 list_for_each_safe(ptr
, storage
, &priv
->tun
.ipv4_off_list
) {
1552 ip_entry
= list_entry(ptr
, struct nfp_ipv4_addr_entry
, list
);
1553 list_del(&ip_entry
->list
);
1557 mutex_destroy(&priv
->tun
.ipv6_off_lock
);
1559 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1560 rhashtable_free_and_destroy(&priv
->tun
.offloaded_macs
,
1561 nfp_check_rhashtable_empty
, NULL
);
1563 nfp_tun_cleanup_nn_entries(app
);