1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/hashtable.h>
13 #include <linux/crc32.h>
14 #include <linux/netdevice.h>
15 #include <linux/inetdevice.h>
16 #include <linux/if_vlan.h>
17 #include <linux/if_bridge.h>
18 #include <net/neighbour.h>
19 #include <net/switchdev.h>
20 #include <net/ip_fib.h>
21 #include <net/nexthop.h>
25 #include "rocker_tlv.h"
27 struct ofdpa_flow_tbl_key
{
29 enum rocker_of_dpa_table_id tbl_id
;
34 enum rocker_of_dpa_table_id goto_tbl
;
40 enum rocker_of_dpa_table_id goto_tbl
;
49 u8 eth_dst_mask
[ETH_ALEN
];
52 enum rocker_of_dpa_table_id goto_tbl
;
59 enum rocker_of_dpa_table_id goto_tbl
;
64 u8 eth_dst_mask
[ETH_ALEN
];
69 enum rocker_of_dpa_table_id goto_tbl
;
77 u8 eth_src_mask
[ETH_ALEN
];
79 u8 eth_dst_mask
[ETH_ALEN
];
92 struct ofdpa_flow_tbl_entry
{
93 struct hlist_node entry
;
96 struct ofdpa_flow_tbl_key key
;
98 u32 key_crc32
; /* key */
102 struct ofdpa_group_tbl_entry
{
103 struct hlist_node entry
;
105 u32 group_id
; /* key */
113 u8 eth_src
[ETH_ALEN
];
114 u8 eth_dst
[ETH_ALEN
];
119 u8 eth_src
[ETH_ALEN
];
120 u8 eth_dst
[ETH_ALEN
];
128 struct ofdpa_fdb_tbl_entry
{
129 struct hlist_node entry
;
130 u32 key_crc32
; /* key */
132 unsigned long touched
;
133 struct ofdpa_fdb_tbl_key
{
134 struct ofdpa_port
*ofdpa_port
;
140 struct ofdpa_internal_vlan_tbl_entry
{
141 struct hlist_node entry
;
142 int ifindex
; /* key */
147 struct ofdpa_neigh_tbl_entry
{
148 struct hlist_node entry
;
149 __be32 ip_addr
; /* key */
150 struct net_device
*dev
;
153 u8 eth_dst
[ETH_ALEN
];
158 OFDPA_CTRL_LINK_LOCAL_MCAST
,
159 OFDPA_CTRL_LOCAL_ARP
,
160 OFDPA_CTRL_IPV4_MCAST
,
161 OFDPA_CTRL_IPV6_MCAST
,
162 OFDPA_CTRL_DFLT_BRIDGING
,
167 #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
168 #define OFDPA_N_INTERNAL_VLANS 255
169 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
170 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
171 #define OFDPA_UNTAGGED_VID 0
174 struct rocker
*rocker
;
175 DECLARE_HASHTABLE(flow_tbl
, 16);
176 spinlock_t flow_tbl_lock
; /* for flow tbl accesses */
177 u64 flow_tbl_next_cookie
;
178 DECLARE_HASHTABLE(group_tbl
, 16);
179 spinlock_t group_tbl_lock
; /* for group tbl accesses */
180 struct timer_list fdb_cleanup_timer
;
181 DECLARE_HASHTABLE(fdb_tbl
, 16);
182 spinlock_t fdb_tbl_lock
; /* for fdb tbl accesses */
183 unsigned long internal_vlan_bitmap
[OFDPA_INTERNAL_VLAN_BITMAP_LEN
];
184 DECLARE_HASHTABLE(internal_vlan_tbl
, 8);
185 spinlock_t internal_vlan_tbl_lock
; /* for vlan tbl accesses */
186 DECLARE_HASHTABLE(neigh_tbl
, 16);
187 spinlock_t neigh_tbl_lock
; /* for neigh tbl accesses */
188 u32 neigh_tbl_next_index
;
189 unsigned long ageing_time
;
195 struct rocker_port
*rocker_port
;
196 struct net_device
*dev
;
198 struct net_device
*bridge_dev
;
199 __be16 internal_vlan_id
;
202 unsigned long ageing_time
;
203 bool ctrls
[OFDPA_CTRL_MAX
];
204 unsigned long vlan_bitmap
[OFDPA_VLAN_BITMAP_LEN
];
207 static const u8 zero_mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
208 static const u8 ff_mac
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
209 static const u8 ll_mac
[ETH_ALEN
] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
210 static const u8 ll_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
211 static const u8 mcast_mac
[ETH_ALEN
] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
212 static const u8 ipv4_mcast
[ETH_ALEN
] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
213 static const u8 ipv4_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
214 static const u8 ipv6_mcast
[ETH_ALEN
] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
215 static const u8 ipv6_mask
[ETH_ALEN
] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
217 /* Rocker priority levels for flow table entries. Higher
218 * priority match takes precedence over lower priority match.
222 OFDPA_PRIORITY_UNKNOWN
= 0,
223 OFDPA_PRIORITY_IG_PORT
= 1,
224 OFDPA_PRIORITY_VLAN
= 1,
225 OFDPA_PRIORITY_TERM_MAC_UCAST
= 0,
226 OFDPA_PRIORITY_TERM_MAC_MCAST
= 1,
227 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
= 1,
228 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD
= 2,
229 OFDPA_PRIORITY_BRIDGING_VLAN
= 3,
230 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
= 1,
231 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD
= 2,
232 OFDPA_PRIORITY_BRIDGING_TENANT
= 3,
233 OFDPA_PRIORITY_ACL_CTRL
= 3,
234 OFDPA_PRIORITY_ACL_NORMAL
= 2,
235 OFDPA_PRIORITY_ACL_DFLT
= 1,
238 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id
)
240 u16 start
= OFDPA_INTERNAL_VLAN_ID_BASE
;
242 u16 _vlan_id
= ntohs(vlan_id
);
244 return (_vlan_id
>= start
&& _vlan_id
<= end
);
247 static __be16
ofdpa_port_vid_to_vlan(const struct ofdpa_port
*ofdpa_port
,
248 u16 vid
, bool *pop_vlan
)
254 vlan_id
= htons(vid
);
256 vlan_id
= ofdpa_port
->internal_vlan_id
;
264 static u16
ofdpa_port_vlan_to_vid(const struct ofdpa_port
*ofdpa_port
,
267 if (ofdpa_vlan_id_is_internal(vlan_id
))
270 return ntohs(vlan_id
);
273 static bool ofdpa_port_is_slave(const struct ofdpa_port
*ofdpa_port
,
276 return ofdpa_port
->bridge_dev
&&
277 !strcmp(ofdpa_port
->bridge_dev
->rtnl_link_ops
->kind
, kind
);
280 static bool ofdpa_port_is_bridged(const struct ofdpa_port
*ofdpa_port
)
282 return ofdpa_port_is_slave(ofdpa_port
, "bridge");
285 static bool ofdpa_port_is_ovsed(const struct ofdpa_port
*ofdpa_port
)
287 return ofdpa_port_is_slave(ofdpa_port
, "openvswitch");
290 #define OFDPA_OP_FLAG_REMOVE BIT(0)
291 #define OFDPA_OP_FLAG_NOWAIT BIT(1)
292 #define OFDPA_OP_FLAG_LEARNED BIT(2)
293 #define OFDPA_OP_FLAG_REFRESH BIT(3)
295 static bool ofdpa_flags_nowait(int flags
)
297 return flags
& OFDPA_OP_FLAG_NOWAIT
;
300 /*************************************************************
301 * Flow, group, FDB, internal VLAN and neigh command prepares
302 *************************************************************/
305 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info
*desc_info
,
306 const struct ofdpa_flow_tbl_entry
*entry
)
308 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
309 entry
->key
.ig_port
.in_pport
))
311 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
312 entry
->key
.ig_port
.in_pport_mask
))
314 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
315 entry
->key
.ig_port
.goto_tbl
))
322 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info
*desc_info
,
323 const struct ofdpa_flow_tbl_entry
*entry
)
325 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
326 entry
->key
.vlan
.in_pport
))
328 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
329 entry
->key
.vlan
.vlan_id
))
331 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
332 entry
->key
.vlan
.vlan_id_mask
))
334 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
335 entry
->key
.vlan
.goto_tbl
))
337 if (entry
->key
.vlan
.untagged
&&
338 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_NEW_VLAN_ID
,
339 entry
->key
.vlan
.new_vlan_id
))
346 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info
*desc_info
,
347 const struct ofdpa_flow_tbl_entry
*entry
)
349 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
350 entry
->key
.term_mac
.in_pport
))
352 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
353 entry
->key
.term_mac
.in_pport_mask
))
355 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
356 entry
->key
.term_mac
.eth_type
))
358 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
359 ETH_ALEN
, entry
->key
.term_mac
.eth_dst
))
361 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
362 ETH_ALEN
, entry
->key
.term_mac
.eth_dst_mask
))
364 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
365 entry
->key
.term_mac
.vlan_id
))
367 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
368 entry
->key
.term_mac
.vlan_id_mask
))
370 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
371 entry
->key
.term_mac
.goto_tbl
))
373 if (entry
->key
.term_mac
.copy_to_cpu
&&
374 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
375 entry
->key
.term_mac
.copy_to_cpu
))
382 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info
*desc_info
,
383 const struct ofdpa_flow_tbl_entry
*entry
)
385 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
386 entry
->key
.ucast_routing
.eth_type
))
388 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP
,
389 entry
->key
.ucast_routing
.dst4
))
391 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP_MASK
,
392 entry
->key
.ucast_routing
.dst4_mask
))
394 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
395 entry
->key
.ucast_routing
.goto_tbl
))
397 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
398 entry
->key
.ucast_routing
.group_id
))
405 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info
*desc_info
,
406 const struct ofdpa_flow_tbl_entry
*entry
)
408 if (entry
->key
.bridge
.has_eth_dst
&&
409 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
410 ETH_ALEN
, entry
->key
.bridge
.eth_dst
))
412 if (entry
->key
.bridge
.has_eth_dst_mask
&&
413 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
414 ETH_ALEN
, entry
->key
.bridge
.eth_dst_mask
))
416 if (entry
->key
.bridge
.vlan_id
&&
417 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
418 entry
->key
.bridge
.vlan_id
))
420 if (entry
->key
.bridge
.tunnel_id
&&
421 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_TUNNEL_ID
,
422 entry
->key
.bridge
.tunnel_id
))
424 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
425 entry
->key
.bridge
.goto_tbl
))
427 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
428 entry
->key
.bridge
.group_id
))
430 if (entry
->key
.bridge
.copy_to_cpu
&&
431 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
432 entry
->key
.bridge
.copy_to_cpu
))
439 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info
*desc_info
,
440 const struct ofdpa_flow_tbl_entry
*entry
)
442 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
443 entry
->key
.acl
.in_pport
))
445 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
446 entry
->key
.acl
.in_pport_mask
))
448 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
449 ETH_ALEN
, entry
->key
.acl
.eth_src
))
451 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC_MASK
,
452 ETH_ALEN
, entry
->key
.acl
.eth_src_mask
))
454 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
455 ETH_ALEN
, entry
->key
.acl
.eth_dst
))
457 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
458 ETH_ALEN
, entry
->key
.acl
.eth_dst_mask
))
460 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
461 entry
->key
.acl
.eth_type
))
463 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
464 entry
->key
.acl
.vlan_id
))
466 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
467 entry
->key
.acl
.vlan_id_mask
))
470 switch (ntohs(entry
->key
.acl
.eth_type
)) {
473 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_PROTO
,
474 entry
->key
.acl
.ip_proto
))
476 if (rocker_tlv_put_u8(desc_info
,
477 ROCKER_TLV_OF_DPA_IP_PROTO_MASK
,
478 entry
->key
.acl
.ip_proto_mask
))
480 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_DSCP
,
481 entry
->key
.acl
.ip_tos
& 0x3f))
483 if (rocker_tlv_put_u8(desc_info
,
484 ROCKER_TLV_OF_DPA_IP_DSCP_MASK
,
485 entry
->key
.acl
.ip_tos_mask
& 0x3f))
487 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_ECN
,
488 (entry
->key
.acl
.ip_tos
& 0xc0) >> 6))
490 if (rocker_tlv_put_u8(desc_info
,
491 ROCKER_TLV_OF_DPA_IP_ECN_MASK
,
492 (entry
->key
.acl
.ip_tos_mask
& 0xc0) >> 6))
497 if (entry
->key
.acl
.group_id
!= ROCKER_GROUP_NONE
&&
498 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
499 entry
->key
.acl
.group_id
))
505 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port
*rocker_port
,
506 struct rocker_desc_info
*desc_info
,
509 const struct ofdpa_flow_tbl_entry
*entry
= priv
;
510 struct rocker_tlv
*cmd_info
;
513 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
515 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
518 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_TABLE_ID
,
521 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_PRIORITY
,
522 entry
->key
.priority
))
524 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_HARDTIME
, 0))
526 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
530 switch (entry
->key
.tbl_id
) {
531 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
:
532 err
= ofdpa_cmd_flow_tbl_add_ig_port(desc_info
, entry
);
534 case ROCKER_OF_DPA_TABLE_ID_VLAN
:
535 err
= ofdpa_cmd_flow_tbl_add_vlan(desc_info
, entry
);
537 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
:
538 err
= ofdpa_cmd_flow_tbl_add_term_mac(desc_info
, entry
);
540 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
:
541 err
= ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info
, entry
);
543 case ROCKER_OF_DPA_TABLE_ID_BRIDGING
:
544 err
= ofdpa_cmd_flow_tbl_add_bridge(desc_info
, entry
);
546 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
:
547 err
= ofdpa_cmd_flow_tbl_add_acl(desc_info
, entry
);
557 rocker_tlv_nest_end(desc_info
, cmd_info
);
562 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port
*rocker_port
,
563 struct rocker_desc_info
*desc_info
,
566 const struct ofdpa_flow_tbl_entry
*entry
= priv
;
567 struct rocker_tlv
*cmd_info
;
569 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
571 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
574 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
577 rocker_tlv_nest_end(desc_info
, cmd_info
);
583 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info
*desc_info
,
584 struct ofdpa_group_tbl_entry
*entry
)
586 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_OUT_PPORT
,
587 ROCKER_GROUP_PORT_GET(entry
->group_id
)))
589 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_POP_VLAN
,
590 entry
->l2_interface
.pop_vlan
))
597 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info
*desc_info
,
598 const struct ofdpa_group_tbl_entry
*entry
)
600 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
601 entry
->l2_rewrite
.group_id
))
603 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_src
) &&
604 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
605 ETH_ALEN
, entry
->l2_rewrite
.eth_src
))
607 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_dst
) &&
608 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
609 ETH_ALEN
, entry
->l2_rewrite
.eth_dst
))
611 if (entry
->l2_rewrite
.vlan_id
&&
612 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
613 entry
->l2_rewrite
.vlan_id
))
620 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info
*desc_info
,
621 const struct ofdpa_group_tbl_entry
*entry
)
624 struct rocker_tlv
*group_ids
;
626 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GROUP_COUNT
,
630 group_ids
= rocker_tlv_nest_start(desc_info
,
631 ROCKER_TLV_OF_DPA_GROUP_IDS
);
635 for (i
= 0; i
< entry
->group_count
; i
++)
636 /* Note TLV array is 1-based */
637 if (rocker_tlv_put_u32(desc_info
, i
+ 1, entry
->group_ids
[i
]))
640 rocker_tlv_nest_end(desc_info
, group_ids
);
646 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info
*desc_info
,
647 const struct ofdpa_group_tbl_entry
*entry
)
649 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_src
) &&
650 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
651 ETH_ALEN
, entry
->l3_unicast
.eth_src
))
653 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_dst
) &&
654 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
655 ETH_ALEN
, entry
->l3_unicast
.eth_dst
))
657 if (entry
->l3_unicast
.vlan_id
&&
658 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
659 entry
->l3_unicast
.vlan_id
))
661 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_TTL_CHECK
,
662 entry
->l3_unicast
.ttl_check
))
664 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
665 entry
->l3_unicast
.group_id
))
671 static int ofdpa_cmd_group_tbl_add(const struct rocker_port
*rocker_port
,
672 struct rocker_desc_info
*desc_info
,
675 struct ofdpa_group_tbl_entry
*entry
= priv
;
676 struct rocker_tlv
*cmd_info
;
679 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
681 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
685 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
689 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
690 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
691 err
= ofdpa_cmd_group_tbl_add_l2_interface(desc_info
, entry
);
693 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
694 err
= ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info
, entry
);
696 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
697 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
698 err
= ofdpa_cmd_group_tbl_add_group_ids(desc_info
, entry
);
700 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
701 err
= ofdpa_cmd_group_tbl_add_l3_unicast(desc_info
, entry
);
711 rocker_tlv_nest_end(desc_info
, cmd_info
);
716 static int ofdpa_cmd_group_tbl_del(const struct rocker_port
*rocker_port
,
717 struct rocker_desc_info
*desc_info
,
720 const struct ofdpa_group_tbl_entry
*entry
= priv
;
721 struct rocker_tlv
*cmd_info
;
723 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
725 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
728 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
731 rocker_tlv_nest_end(desc_info
, cmd_info
);
736 /***************************************************
737 * Flow, group, FDB, internal VLAN and neigh tables
738 ***************************************************/
740 static struct ofdpa_flow_tbl_entry
*
741 ofdpa_flow_tbl_find(const struct ofdpa
*ofdpa
,
742 const struct ofdpa_flow_tbl_entry
*match
)
744 struct ofdpa_flow_tbl_entry
*found
;
745 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
747 hash_for_each_possible(ofdpa
->flow_tbl
, found
,
748 entry
, match
->key_crc32
) {
749 if (memcmp(&found
->key
, &match
->key
, key_len
) == 0)
756 static int ofdpa_flow_tbl_add(struct ofdpa_port
*ofdpa_port
,
757 int flags
, struct ofdpa_flow_tbl_entry
*match
)
759 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
760 struct ofdpa_flow_tbl_entry
*found
;
761 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
762 unsigned long lock_flags
;
764 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
766 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, lock_flags
);
768 found
= ofdpa_flow_tbl_find(ofdpa
, match
);
771 match
->cookie
= found
->cookie
;
772 hash_del(&found
->entry
);
775 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
;
778 found
->cookie
= ofdpa
->flow_tbl_next_cookie
++;
779 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
;
782 hash_add(ofdpa
->flow_tbl
, &found
->entry
, found
->key_crc32
);
783 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, lock_flags
);
785 return rocker_cmd_exec(ofdpa_port
->rocker_port
,
786 ofdpa_flags_nowait(flags
),
787 ofdpa_cmd_flow_tbl_add
,
791 static int ofdpa_flow_tbl_del(struct ofdpa_port
*ofdpa_port
,
792 int flags
, struct ofdpa_flow_tbl_entry
*match
)
794 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
795 struct ofdpa_flow_tbl_entry
*found
;
796 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
797 unsigned long lock_flags
;
800 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
802 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, lock_flags
);
804 found
= ofdpa_flow_tbl_find(ofdpa
, match
);
807 hash_del(&found
->entry
);
808 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
;
811 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, lock_flags
);
816 err
= rocker_cmd_exec(ofdpa_port
->rocker_port
,
817 ofdpa_flags_nowait(flags
),
818 ofdpa_cmd_flow_tbl_del
,
826 static int ofdpa_flow_tbl_do(struct ofdpa_port
*ofdpa_port
, int flags
,
827 struct ofdpa_flow_tbl_entry
*entry
)
829 if (flags
& OFDPA_OP_FLAG_REMOVE
)
830 return ofdpa_flow_tbl_del(ofdpa_port
, flags
, entry
);
832 return ofdpa_flow_tbl_add(ofdpa_port
, flags
, entry
);
835 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port
*ofdpa_port
, int flags
,
836 u32 in_pport
, u32 in_pport_mask
,
837 enum rocker_of_dpa_table_id goto_tbl
)
839 struct ofdpa_flow_tbl_entry
*entry
;
841 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
845 entry
->key
.priority
= OFDPA_PRIORITY_IG_PORT
;
846 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
847 entry
->key
.ig_port
.in_pport
= in_pport
;
848 entry
->key
.ig_port
.in_pport_mask
= in_pport_mask
;
849 entry
->key
.ig_port
.goto_tbl
= goto_tbl
;
851 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
854 static int ofdpa_flow_tbl_vlan(struct ofdpa_port
*ofdpa_port
,
856 u32 in_pport
, __be16 vlan_id
,
858 enum rocker_of_dpa_table_id goto_tbl
,
859 bool untagged
, __be16 new_vlan_id
)
861 struct ofdpa_flow_tbl_entry
*entry
;
863 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
867 entry
->key
.priority
= OFDPA_PRIORITY_VLAN
;
868 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
869 entry
->key
.vlan
.in_pport
= in_pport
;
870 entry
->key
.vlan
.vlan_id
= vlan_id
;
871 entry
->key
.vlan
.vlan_id_mask
= vlan_id_mask
;
872 entry
->key
.vlan
.goto_tbl
= goto_tbl
;
874 entry
->key
.vlan
.untagged
= untagged
;
875 entry
->key
.vlan
.new_vlan_id
= new_vlan_id
;
877 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
880 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port
*ofdpa_port
,
881 u32 in_pport
, u32 in_pport_mask
,
882 __be16 eth_type
, const u8
*eth_dst
,
883 const u8
*eth_dst_mask
, __be16 vlan_id
,
884 __be16 vlan_id_mask
, bool copy_to_cpu
,
887 struct ofdpa_flow_tbl_entry
*entry
;
889 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
893 if (is_multicast_ether_addr(eth_dst
)) {
894 entry
->key
.priority
= OFDPA_PRIORITY_TERM_MAC_MCAST
;
895 entry
->key
.term_mac
.goto_tbl
=
896 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
898 entry
->key
.priority
= OFDPA_PRIORITY_TERM_MAC_UCAST
;
899 entry
->key
.term_mac
.goto_tbl
=
900 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
903 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
904 entry
->key
.term_mac
.in_pport
= in_pport
;
905 entry
->key
.term_mac
.in_pport_mask
= in_pport_mask
;
906 entry
->key
.term_mac
.eth_type
= eth_type
;
907 ether_addr_copy(entry
->key
.term_mac
.eth_dst
, eth_dst
);
908 ether_addr_copy(entry
->key
.term_mac
.eth_dst_mask
, eth_dst_mask
);
909 entry
->key
.term_mac
.vlan_id
= vlan_id
;
910 entry
->key
.term_mac
.vlan_id_mask
= vlan_id_mask
;
911 entry
->key
.term_mac
.copy_to_cpu
= copy_to_cpu
;
913 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
916 static int ofdpa_flow_tbl_bridge(struct ofdpa_port
*ofdpa_port
,
917 int flags
, const u8
*eth_dst
,
918 const u8
*eth_dst_mask
, __be16 vlan_id
,
920 enum rocker_of_dpa_table_id goto_tbl
,
921 u32 group_id
, bool copy_to_cpu
)
923 struct ofdpa_flow_tbl_entry
*entry
;
925 bool vlan_bridging
= !!vlan_id
;
926 bool dflt
= !eth_dst
|| (eth_dst
&& eth_dst_mask
);
929 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
933 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
936 entry
->key
.bridge
.has_eth_dst
= 1;
937 ether_addr_copy(entry
->key
.bridge
.eth_dst
, eth_dst
);
940 entry
->key
.bridge
.has_eth_dst_mask
= 1;
941 ether_addr_copy(entry
->key
.bridge
.eth_dst_mask
, eth_dst_mask
);
942 if (!ether_addr_equal(eth_dst_mask
, ff_mac
))
946 priority
= OFDPA_PRIORITY_UNKNOWN
;
947 if (vlan_bridging
&& dflt
&& wild
)
948 priority
= OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD
;
949 else if (vlan_bridging
&& dflt
&& !wild
)
950 priority
= OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
;
951 else if (vlan_bridging
&& !dflt
)
952 priority
= OFDPA_PRIORITY_BRIDGING_VLAN
;
953 else if (!vlan_bridging
&& dflt
&& wild
)
954 priority
= OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD
;
955 else if (!vlan_bridging
&& dflt
&& !wild
)
956 priority
= OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
;
957 else if (!vlan_bridging
&& !dflt
)
958 priority
= OFDPA_PRIORITY_BRIDGING_TENANT
;
960 entry
->key
.priority
= priority
;
961 entry
->key
.bridge
.vlan_id
= vlan_id
;
962 entry
->key
.bridge
.tunnel_id
= tunnel_id
;
963 entry
->key
.bridge
.goto_tbl
= goto_tbl
;
964 entry
->key
.bridge
.group_id
= group_id
;
965 entry
->key
.bridge
.copy_to_cpu
= copy_to_cpu
;
967 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
970 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port
*ofdpa_port
,
971 __be16 eth_type
, __be32 dst
,
972 __be32 dst_mask
, u32 priority
,
973 enum rocker_of_dpa_table_id goto_tbl
,
974 u32 group_id
, struct fib_info
*fi
,
977 struct ofdpa_flow_tbl_entry
*entry
;
979 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
983 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
984 entry
->key
.priority
= priority
;
985 entry
->key
.ucast_routing
.eth_type
= eth_type
;
986 entry
->key
.ucast_routing
.dst4
= dst
;
987 entry
->key
.ucast_routing
.dst4_mask
= dst_mask
;
988 entry
->key
.ucast_routing
.goto_tbl
= goto_tbl
;
989 entry
->key
.ucast_routing
.group_id
= group_id
;
990 entry
->key_len
= offsetof(struct ofdpa_flow_tbl_key
,
991 ucast_routing
.group_id
);
994 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
997 static int ofdpa_flow_tbl_acl(struct ofdpa_port
*ofdpa_port
, int flags
,
998 u32 in_pport
, u32 in_pport_mask
,
999 const u8
*eth_src
, const u8
*eth_src_mask
,
1000 const u8
*eth_dst
, const u8
*eth_dst_mask
,
1001 __be16 eth_type
, __be16 vlan_id
,
1002 __be16 vlan_id_mask
, u8 ip_proto
,
1003 u8 ip_proto_mask
, u8 ip_tos
, u8 ip_tos_mask
,
1007 struct ofdpa_flow_tbl_entry
*entry
;
1009 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1013 priority
= OFDPA_PRIORITY_ACL_NORMAL
;
1014 if (eth_dst
&& eth_dst_mask
) {
1015 if (ether_addr_equal(eth_dst_mask
, mcast_mac
))
1016 priority
= OFDPA_PRIORITY_ACL_DFLT
;
1017 else if (is_link_local_ether_addr(eth_dst
))
1018 priority
= OFDPA_PRIORITY_ACL_CTRL
;
1021 entry
->key
.priority
= priority
;
1022 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1023 entry
->key
.acl
.in_pport
= in_pport
;
1024 entry
->key
.acl
.in_pport_mask
= in_pport_mask
;
1027 ether_addr_copy(entry
->key
.acl
.eth_src
, eth_src
);
1029 ether_addr_copy(entry
->key
.acl
.eth_src_mask
, eth_src_mask
);
1031 ether_addr_copy(entry
->key
.acl
.eth_dst
, eth_dst
);
1033 ether_addr_copy(entry
->key
.acl
.eth_dst_mask
, eth_dst_mask
);
1035 entry
->key
.acl
.eth_type
= eth_type
;
1036 entry
->key
.acl
.vlan_id
= vlan_id
;
1037 entry
->key
.acl
.vlan_id_mask
= vlan_id_mask
;
1038 entry
->key
.acl
.ip_proto
= ip_proto
;
1039 entry
->key
.acl
.ip_proto_mask
= ip_proto_mask
;
1040 entry
->key
.acl
.ip_tos
= ip_tos
;
1041 entry
->key
.acl
.ip_tos_mask
= ip_tos_mask
;
1042 entry
->key
.acl
.group_id
= group_id
;
1044 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
1047 static struct ofdpa_group_tbl_entry
*
1048 ofdpa_group_tbl_find(const struct ofdpa
*ofdpa
,
1049 const struct ofdpa_group_tbl_entry
*match
)
1051 struct ofdpa_group_tbl_entry
*found
;
1053 hash_for_each_possible(ofdpa
->group_tbl
, found
,
1054 entry
, match
->group_id
) {
1055 if (found
->group_id
== match
->group_id
)
1062 static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry
*entry
)
1064 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
1065 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
1066 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
1067 kfree(entry
->group_ids
);
1075 static int ofdpa_group_tbl_add(struct ofdpa_port
*ofdpa_port
, int flags
,
1076 struct ofdpa_group_tbl_entry
*match
)
1078 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1079 struct ofdpa_group_tbl_entry
*found
;
1080 unsigned long lock_flags
;
1082 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, lock_flags
);
1084 found
= ofdpa_group_tbl_find(ofdpa
, match
);
1087 hash_del(&found
->entry
);
1088 ofdpa_group_tbl_entry_free(found
);
1090 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
;
1093 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
;
1096 hash_add(ofdpa
->group_tbl
, &found
->entry
, found
->group_id
);
1098 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, lock_flags
);
1100 return rocker_cmd_exec(ofdpa_port
->rocker_port
,
1101 ofdpa_flags_nowait(flags
),
1102 ofdpa_cmd_group_tbl_add
,
1106 static int ofdpa_group_tbl_del(struct ofdpa_port
*ofdpa_port
, int flags
,
1107 struct ofdpa_group_tbl_entry
*match
)
1109 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1110 struct ofdpa_group_tbl_entry
*found
;
1111 unsigned long lock_flags
;
1114 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, lock_flags
);
1116 found
= ofdpa_group_tbl_find(ofdpa
, match
);
1119 hash_del(&found
->entry
);
1120 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
;
1123 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, lock_flags
);
1125 ofdpa_group_tbl_entry_free(match
);
1128 err
= rocker_cmd_exec(ofdpa_port
->rocker_port
,
1129 ofdpa_flags_nowait(flags
),
1130 ofdpa_cmd_group_tbl_del
,
1132 ofdpa_group_tbl_entry_free(found
);
1138 static int ofdpa_group_tbl_do(struct ofdpa_port
*ofdpa_port
, int flags
,
1139 struct ofdpa_group_tbl_entry
*entry
)
1141 if (flags
& OFDPA_OP_FLAG_REMOVE
)
1142 return ofdpa_group_tbl_del(ofdpa_port
, flags
, entry
);
1144 return ofdpa_group_tbl_add(ofdpa_port
, flags
, entry
);
1147 static int ofdpa_group_l2_interface(struct ofdpa_port
*ofdpa_port
,
1148 int flags
, __be16 vlan_id
,
1149 u32 out_pport
, int pop_vlan
)
1151 struct ofdpa_group_tbl_entry
*entry
;
1153 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1157 entry
->group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1158 entry
->l2_interface
.pop_vlan
= pop_vlan
;
1160 return ofdpa_group_tbl_do(ofdpa_port
, flags
, entry
);
1163 static int ofdpa_group_l2_fan_out(struct ofdpa_port
*ofdpa_port
,
1164 int flags
, u8 group_count
,
1165 const u32
*group_ids
, u32 group_id
)
1167 struct ofdpa_group_tbl_entry
*entry
;
1169 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1173 entry
->group_id
= group_id
;
1174 entry
->group_count
= group_count
;
1176 entry
->group_ids
= kcalloc(group_count
, sizeof(u32
), GFP_KERNEL
);
1177 if (!entry
->group_ids
) {
1181 memcpy(entry
->group_ids
, group_ids
, group_count
* sizeof(u32
));
1183 return ofdpa_group_tbl_do(ofdpa_port
, flags
, entry
);
1186 static int ofdpa_group_l2_flood(struct ofdpa_port
*ofdpa_port
,
1187 int flags
, __be16 vlan_id
,
1188 u8 group_count
, const u32
*group_ids
,
1191 return ofdpa_group_l2_fan_out(ofdpa_port
, flags
,
1192 group_count
, group_ids
,
1196 static int ofdpa_group_l3_unicast(struct ofdpa_port
*ofdpa_port
, int flags
,
1197 u32 index
, const u8
*src_mac
, const u8
*dst_mac
,
1198 __be16 vlan_id
, bool ttl_check
, u32 pport
)
1200 struct ofdpa_group_tbl_entry
*entry
;
1202 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1206 entry
->group_id
= ROCKER_GROUP_L3_UNICAST(index
);
1208 ether_addr_copy(entry
->l3_unicast
.eth_src
, src_mac
);
1210 ether_addr_copy(entry
->l3_unicast
.eth_dst
, dst_mac
);
1211 entry
->l3_unicast
.vlan_id
= vlan_id
;
1212 entry
->l3_unicast
.ttl_check
= ttl_check
;
1213 entry
->l3_unicast
.group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, pport
);
1215 return ofdpa_group_tbl_do(ofdpa_port
, flags
, entry
);
1218 static struct ofdpa_neigh_tbl_entry
*
1219 ofdpa_neigh_tbl_find(const struct ofdpa
*ofdpa
, __be32 ip_addr
)
1221 struct ofdpa_neigh_tbl_entry
*found
;
1223 hash_for_each_possible(ofdpa
->neigh_tbl
, found
,
1224 entry
, be32_to_cpu(ip_addr
))
1225 if (found
->ip_addr
== ip_addr
)
1231 static void ofdpa_neigh_add(struct ofdpa
*ofdpa
,
1232 struct ofdpa_neigh_tbl_entry
*entry
)
1234 entry
->index
= ofdpa
->neigh_tbl_next_index
++;
1236 hash_add(ofdpa
->neigh_tbl
, &entry
->entry
,
1237 be32_to_cpu(entry
->ip_addr
));
1240 static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry
*entry
)
1242 if (--entry
->ref_count
== 0) {
1243 hash_del(&entry
->entry
);
1248 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry
*entry
,
1249 const u8
*eth_dst
, bool ttl_check
)
1252 ether_addr_copy(entry
->eth_dst
, eth_dst
);
1253 entry
->ttl_check
= ttl_check
;
1259 static int ofdpa_port_ipv4_neigh(struct ofdpa_port
*ofdpa_port
,
1260 int flags
, __be32 ip_addr
, const u8
*eth_dst
)
1262 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1263 struct ofdpa_neigh_tbl_entry
*entry
;
1264 struct ofdpa_neigh_tbl_entry
*found
;
1265 unsigned long lock_flags
;
1266 __be16 eth_type
= htons(ETH_P_IP
);
1267 enum rocker_of_dpa_table_id goto_tbl
=
1268 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1271 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1276 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1280 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1282 found
= ofdpa_neigh_tbl_find(ofdpa
, ip_addr
);
1284 updating
= found
&& adding
;
1285 removing
= found
&& !adding
;
1286 adding
= !found
&& adding
;
1289 entry
->ip_addr
= ip_addr
;
1290 entry
->dev
= ofdpa_port
->dev
;
1291 ether_addr_copy(entry
->eth_dst
, eth_dst
);
1292 entry
->ttl_check
= true;
1293 ofdpa_neigh_add(ofdpa
, entry
);
1294 } else if (removing
) {
1295 memcpy(entry
, found
, sizeof(*entry
));
1296 ofdpa_neigh_del(found
);
1297 } else if (updating
) {
1298 ofdpa_neigh_update(found
, eth_dst
, true);
1299 memcpy(entry
, found
, sizeof(*entry
));
1304 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1309 /* For each active neighbor, we have an L3 unicast group and
1310 * a /32 route to the neighbor, which uses the L3 unicast
1311 * group. The L3 unicast group can also be referred to by
1312 * other routes' nexthops.
1315 err
= ofdpa_group_l3_unicast(ofdpa_port
, flags
,
1317 ofdpa_port
->dev
->dev_addr
,
1319 ofdpa_port
->internal_vlan_id
,
1323 netdev_err(ofdpa_port
->dev
, "Error (%d) L3 unicast group index %d\n",
1328 if (adding
|| removing
) {
1329 group_id
= ROCKER_GROUP_L3_UNICAST(entry
->index
);
1330 err
= ofdpa_flow_tbl_ucast4_routing(ofdpa_port
,
1334 group_id
, NULL
, flags
);
1337 netdev_err(ofdpa_port
->dev
, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1338 err
, &entry
->ip_addr
, group_id
);
1348 static int ofdpa_port_ipv4_resolve(struct ofdpa_port
*ofdpa_port
,
1351 struct net_device
*dev
= ofdpa_port
->dev
;
1352 struct neighbour
*n
= __ipv4_neigh_lookup(dev
, (__force u32
)ip_addr
);
1356 n
= neigh_create(&arp_tbl
, &ip_addr
, dev
);
1361 /* If the neigh is already resolved, then go ahead and
1362 * install the entry, otherwise start the ARP process to
1363 * resolve the neigh.
1366 if (n
->nud_state
& NUD_VALID
)
1367 err
= ofdpa_port_ipv4_neigh(ofdpa_port
, 0,
1370 neigh_event_send(n
, NULL
);
1376 static int ofdpa_port_ipv4_nh(struct ofdpa_port
*ofdpa_port
,
1377 int flags
, __be32 ip_addr
, u32
*index
)
1379 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1380 struct ofdpa_neigh_tbl_entry
*entry
;
1381 struct ofdpa_neigh_tbl_entry
*found
;
1382 unsigned long lock_flags
;
1383 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1386 bool resolved
= true;
1389 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1393 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1395 found
= ofdpa_neigh_tbl_find(ofdpa
, ip_addr
);
1397 updating
= found
&& adding
;
1398 removing
= found
&& !adding
;
1399 adding
= !found
&& adding
;
1402 entry
->ip_addr
= ip_addr
;
1403 entry
->dev
= ofdpa_port
->dev
;
1404 ofdpa_neigh_add(ofdpa
, entry
);
1405 *index
= entry
->index
;
1407 } else if (removing
) {
1408 *index
= found
->index
;
1409 ofdpa_neigh_del(found
);
1410 } else if (updating
) {
1411 ofdpa_neigh_update(found
, NULL
, false);
1412 resolved
= !is_zero_ether_addr(found
->eth_dst
);
1413 *index
= found
->index
;
1418 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1426 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1429 err
= ofdpa_port_ipv4_resolve(ofdpa_port
, ip_addr
);
1434 static struct ofdpa_port
*ofdpa_port_get(const struct ofdpa
*ofdpa
,
1437 struct rocker_port
*rocker_port
;
1439 rocker_port
= ofdpa
->rocker
->ports
[port_index
];
1440 return rocker_port
? rocker_port
->wpriv
: NULL
;
1443 static int ofdpa_port_vlan_flood_group(struct ofdpa_port
*ofdpa_port
,
1444 int flags
, __be16 vlan_id
)
1446 struct ofdpa_port
*p
;
1447 const struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1448 unsigned int port_count
= ofdpa
->rocker
->port_count
;
1449 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
1455 group_ids
= kcalloc(port_count
, sizeof(u32
), GFP_KERNEL
);
1459 /* Adjust the flood group for this VLAN. The flood group
1460 * references an L2 interface group for each port in this
1464 for (i
= 0; i
< port_count
; i
++) {
1465 p
= ofdpa_port_get(ofdpa
, i
);
1468 if (!ofdpa_port_is_bridged(p
))
1470 if (test_bit(ntohs(vlan_id
), p
->vlan_bitmap
)) {
1471 group_ids
[group_count
++] =
1472 ROCKER_GROUP_L2_INTERFACE(vlan_id
, p
->pport
);
1476 /* If there are no bridged ports in this VLAN, we're done */
1477 if (group_count
== 0)
1478 goto no_ports_in_vlan
;
1480 err
= ofdpa_group_l2_flood(ofdpa_port
, flags
, vlan_id
,
1481 group_count
, group_ids
, group_id
);
1483 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 flood group\n", err
);
1490 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port
*ofdpa_port
, int flags
,
1491 __be16 vlan_id
, bool pop_vlan
)
1493 const struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1494 unsigned int port_count
= ofdpa
->rocker
->port_count
;
1495 struct ofdpa_port
*p
;
1496 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1502 /* An L2 interface group for this port in this VLAN, but
1503 * only when port STP state is LEARNING|FORWARDING.
1506 if (ofdpa_port
->stp_state
== BR_STATE_LEARNING
||
1507 ofdpa_port
->stp_state
== BR_STATE_FORWARDING
) {
1508 out_pport
= ofdpa_port
->pport
;
1509 err
= ofdpa_group_l2_interface(ofdpa_port
, flags
,
1510 vlan_id
, out_pport
, pop_vlan
);
1512 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for pport %d\n",
1518 /* An L2 interface group for this VLAN to CPU port.
1519 * Add when first port joins this VLAN and destroy when
1520 * last port leaves this VLAN.
1523 for (i
= 0; i
< port_count
; i
++) {
1524 p
= ofdpa_port_get(ofdpa
, i
);
1525 if (p
&& test_bit(ntohs(vlan_id
), p
->vlan_bitmap
))
1529 if ((!adding
|| ref
!= 1) && (adding
|| ref
!= 0))
1533 err
= ofdpa_group_l2_interface(ofdpa_port
, flags
,
1534 vlan_id
, out_pport
, pop_vlan
);
1536 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for CPU port\n", err
);
1543 static struct ofdpa_ctrl
{
1545 const u8
*eth_dst_mask
;
1552 [OFDPA_CTRL_LINK_LOCAL_MCAST
] = {
1553 /* pass link local multicast pkts up to CPU for filtering */
1555 .eth_dst_mask
= ll_mask
,
1558 [OFDPA_CTRL_LOCAL_ARP
] = {
1559 /* pass local ARP pkts up to CPU */
1560 .eth_dst
= zero_mac
,
1561 .eth_dst_mask
= zero_mac
,
1562 .eth_type
= htons(ETH_P_ARP
),
1565 [OFDPA_CTRL_IPV4_MCAST
] = {
1566 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1567 .eth_dst
= ipv4_mcast
,
1568 .eth_dst_mask
= ipv4_mask
,
1569 .eth_type
= htons(ETH_P_IP
),
1571 .copy_to_cpu
= true,
1573 [OFDPA_CTRL_IPV6_MCAST
] = {
1574 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1575 .eth_dst
= ipv6_mcast
,
1576 .eth_dst_mask
= ipv6_mask
,
1577 .eth_type
= htons(ETH_P_IPV6
),
1579 .copy_to_cpu
= true,
1581 [OFDPA_CTRL_DFLT_BRIDGING
] = {
1582 /* flood any pkts on vlan */
1584 .copy_to_cpu
= true,
1586 [OFDPA_CTRL_DFLT_OVS
] = {
1587 /* pass all pkts up to CPU */
1588 .eth_dst
= zero_mac
,
1589 .eth_dst_mask
= zero_mac
,
1594 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port
*ofdpa_port
, int flags
,
1595 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1597 u32 in_pport
= ofdpa_port
->pport
;
1598 u32 in_pport_mask
= 0xffffffff;
1600 const u8
*eth_src
= NULL
;
1601 const u8
*eth_src_mask
= NULL
;
1602 __be16 vlan_id_mask
= htons(0xffff);
1604 u8 ip_proto_mask
= 0;
1607 u32 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1610 err
= ofdpa_flow_tbl_acl(ofdpa_port
, flags
,
1611 in_pport
, in_pport_mask
,
1612 eth_src
, eth_src_mask
,
1613 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
1615 vlan_id
, vlan_id_mask
,
1616 ip_proto
, ip_proto_mask
,
1617 ip_tos
, ip_tos_mask
,
1621 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl ACL\n", err
);
1626 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port
*ofdpa_port
,
1627 int flags
, const struct ofdpa_ctrl
*ctrl
,
1630 enum rocker_of_dpa_table_id goto_tbl
=
1631 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1632 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
1636 if (!ofdpa_port_is_bridged(ofdpa_port
))
1639 err
= ofdpa_flow_tbl_bridge(ofdpa_port
, flags
,
1640 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
1642 goto_tbl
, group_id
, ctrl
->copy_to_cpu
);
1645 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl FLOOD\n", err
);
1650 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port
*ofdpa_port
, int flags
,
1651 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1653 u32 in_pport_mask
= 0xffffffff;
1654 __be16 vlan_id_mask
= htons(0xffff);
1657 if (ntohs(vlan_id
) == 0)
1658 vlan_id
= ofdpa_port
->internal_vlan_id
;
1660 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, ofdpa_port
->pport
, in_pport_mask
,
1661 ctrl
->eth_type
, ctrl
->eth_dst
,
1662 ctrl
->eth_dst_mask
, vlan_id
,
1663 vlan_id_mask
, ctrl
->copy_to_cpu
,
1667 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl term\n", err
);
1672 static int ofdpa_port_ctrl_vlan(struct ofdpa_port
*ofdpa_port
, int flags
,
1673 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1676 return ofdpa_port_ctrl_vlan_acl(ofdpa_port
, flags
,
1679 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port
, flags
,
1683 return ofdpa_port_ctrl_vlan_term(ofdpa_port
, flags
,
1689 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port
*ofdpa_port
, int flags
,
1695 for (i
= 0; i
< OFDPA_CTRL_MAX
; i
++) {
1696 if (ofdpa_port
->ctrls
[i
]) {
1697 err
= ofdpa_port_ctrl_vlan(ofdpa_port
, flags
,
1698 &ofdpa_ctrls
[i
], vlan_id
);
1707 static int ofdpa_port_ctrl(struct ofdpa_port
*ofdpa_port
, int flags
,
1708 const struct ofdpa_ctrl
*ctrl
)
1713 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
1714 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
1716 err
= ofdpa_port_ctrl_vlan(ofdpa_port
, flags
,
1725 static int ofdpa_port_vlan(struct ofdpa_port
*ofdpa_port
, int flags
,
1728 enum rocker_of_dpa_table_id goto_tbl
=
1729 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
1730 u32 in_pport
= ofdpa_port
->pport
;
1731 __be16 vlan_id
= htons(vid
);
1732 __be16 vlan_id_mask
= htons(0xffff);
1733 __be16 internal_vlan_id
;
1735 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1738 internal_vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, &untagged
);
1741 test_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
))
1742 return 0; /* already added */
1744 !test_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
))
1745 return 0; /* already removed */
1747 change_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
);
1750 err
= ofdpa_port_ctrl_vlan_add(ofdpa_port
, flags
,
1753 netdev_err(ofdpa_port
->dev
, "Error (%d) port ctrl vlan add\n", err
);
1758 err
= ofdpa_port_vlan_l2_groups(ofdpa_port
, flags
,
1759 internal_vlan_id
, untagged
);
1761 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 groups\n", err
);
1762 goto err_vlan_l2_groups
;
1765 err
= ofdpa_port_vlan_flood_group(ofdpa_port
, flags
,
1768 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 flood group\n", err
);
1769 goto err_flood_group
;
1772 err
= ofdpa_flow_tbl_vlan(ofdpa_port
, flags
,
1773 in_pport
, vlan_id
, vlan_id_mask
,
1774 goto_tbl
, untagged
, internal_vlan_id
);
1776 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN table\n", err
);
1783 change_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
);
1787 static int ofdpa_port_ig_tbl(struct ofdpa_port
*ofdpa_port
, int flags
)
1789 enum rocker_of_dpa_table_id goto_tbl
;
1794 /* Normal Ethernet Frames. Matches pkts from any local physical
1795 * ports. Goto VLAN tbl.
1799 in_pport_mask
= 0xffff0000;
1800 goto_tbl
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
1802 err
= ofdpa_flow_tbl_ig_port(ofdpa_port
, flags
,
1803 in_pport
, in_pport_mask
,
1806 netdev_err(ofdpa_port
->dev
, "Error (%d) ingress port table entry\n", err
);
1811 struct ofdpa_fdb_learn_work
{
1812 struct work_struct work
;
1813 struct ofdpa_port
*ofdpa_port
;
1819 static void ofdpa_port_fdb_learn_work(struct work_struct
*work
)
1821 const struct ofdpa_fdb_learn_work
*lw
=
1822 container_of(work
, struct ofdpa_fdb_learn_work
, work
);
1823 bool removing
= (lw
->flags
& OFDPA_OP_FLAG_REMOVE
);
1824 bool learned
= (lw
->flags
& OFDPA_OP_FLAG_LEARNED
);
1825 struct switchdev_notifier_fdb_info info
;
1827 info
.addr
= lw
->addr
;
1831 if (learned
&& removing
)
1832 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE
,
1833 lw
->ofdpa_port
->dev
, &info
.info
, NULL
);
1834 else if (learned
&& !removing
)
1835 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE
,
1836 lw
->ofdpa_port
->dev
, &info
.info
, NULL
);
1842 static int ofdpa_port_fdb_learn(struct ofdpa_port
*ofdpa_port
,
1843 int flags
, const u8
*addr
, __be16 vlan_id
)
1845 struct ofdpa_fdb_learn_work
*lw
;
1846 enum rocker_of_dpa_table_id goto_tbl
=
1847 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1848 u32 out_pport
= ofdpa_port
->pport
;
1850 u32 group_id
= ROCKER_GROUP_NONE
;
1851 bool copy_to_cpu
= false;
1854 if (ofdpa_port_is_bridged(ofdpa_port
))
1855 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1857 if (!(flags
& OFDPA_OP_FLAG_REFRESH
)) {
1858 err
= ofdpa_flow_tbl_bridge(ofdpa_port
, flags
, addr
,
1859 NULL
, vlan_id
, tunnel_id
, goto_tbl
,
1860 group_id
, copy_to_cpu
);
1865 if (!ofdpa_port_is_bridged(ofdpa_port
))
1868 lw
= kzalloc(sizeof(*lw
), GFP_ATOMIC
);
1872 INIT_WORK(&lw
->work
, ofdpa_port_fdb_learn_work
);
1874 lw
->ofdpa_port
= ofdpa_port
;
1876 ether_addr_copy(lw
->addr
, addr
);
1877 lw
->vid
= ofdpa_port_vlan_to_vid(ofdpa_port
, vlan_id
);
1879 schedule_work(&lw
->work
);
1883 static struct ofdpa_fdb_tbl_entry
*
1884 ofdpa_fdb_tbl_find(const struct ofdpa
*ofdpa
,
1885 const struct ofdpa_fdb_tbl_entry
*match
)
1887 struct ofdpa_fdb_tbl_entry
*found
;
1889 hash_for_each_possible(ofdpa
->fdb_tbl
, found
, entry
, match
->key_crc32
)
1890 if (memcmp(&found
->key
, &match
->key
, sizeof(found
->key
)) == 0)
1896 static int ofdpa_port_fdb(struct ofdpa_port
*ofdpa_port
,
1897 const unsigned char *addr
,
1898 __be16 vlan_id
, int flags
)
1900 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1901 struct ofdpa_fdb_tbl_entry
*fdb
;
1902 struct ofdpa_fdb_tbl_entry
*found
;
1903 bool removing
= (flags
& OFDPA_OP_FLAG_REMOVE
);
1904 unsigned long lock_flags
;
1906 fdb
= kzalloc(sizeof(*fdb
), GFP_KERNEL
);
1910 fdb
->learned
= (flags
& OFDPA_OP_FLAG_LEARNED
);
1911 fdb
->touched
= jiffies
;
1912 fdb
->key
.ofdpa_port
= ofdpa_port
;
1913 ether_addr_copy(fdb
->key
.addr
, addr
);
1914 fdb
->key
.vlan_id
= vlan_id
;
1915 fdb
->key_crc32
= crc32(~0, &fdb
->key
, sizeof(fdb
->key
));
1917 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1919 found
= ofdpa_fdb_tbl_find(ofdpa
, fdb
);
1922 found
->touched
= jiffies
;
1925 hash_del(&found
->entry
);
1927 } else if (!removing
) {
1928 hash_add(ofdpa
->fdb_tbl
, &fdb
->entry
,
1932 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1934 /* Check if adding and already exists, or removing and can't find */
1935 if (!found
!= !removing
) {
1937 if (!found
&& removing
)
1939 /* Refreshing existing to update aging timers */
1940 flags
|= OFDPA_OP_FLAG_REFRESH
;
1943 return ofdpa_port_fdb_learn(ofdpa_port
, flags
, addr
, vlan_id
);
1946 static int ofdpa_port_fdb_flush(struct ofdpa_port
*ofdpa_port
, int flags
)
1948 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1949 struct ofdpa_fdb_tbl_entry
*found
;
1950 unsigned long lock_flags
;
1951 struct hlist_node
*tmp
;
1955 if (ofdpa_port
->stp_state
== BR_STATE_LEARNING
||
1956 ofdpa_port
->stp_state
== BR_STATE_FORWARDING
)
1959 flags
|= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_REMOVE
;
1961 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1963 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
1964 if (found
->key
.ofdpa_port
!= ofdpa_port
)
1966 if (!found
->learned
)
1968 err
= ofdpa_port_fdb_learn(ofdpa_port
, flags
,
1970 found
->key
.vlan_id
);
1973 hash_del(&found
->entry
);
1977 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1982 static void ofdpa_fdb_cleanup(struct timer_list
*t
)
1984 struct ofdpa
*ofdpa
= from_timer(ofdpa
, t
, fdb_cleanup_timer
);
1985 struct ofdpa_port
*ofdpa_port
;
1986 struct ofdpa_fdb_tbl_entry
*entry
;
1987 struct hlist_node
*tmp
;
1988 unsigned long next_timer
= jiffies
+ ofdpa
->ageing_time
;
1989 unsigned long expires
;
1990 unsigned long lock_flags
;
1991 int flags
= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_REMOVE
|
1992 OFDPA_OP_FLAG_LEARNED
;
1995 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1997 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, entry
, entry
) {
1998 if (!entry
->learned
)
2000 ofdpa_port
= entry
->key
.ofdpa_port
;
2001 expires
= entry
->touched
+ ofdpa_port
->ageing_time
;
2002 if (time_before_eq(expires
, jiffies
)) {
2003 ofdpa_port_fdb_learn(ofdpa_port
, flags
,
2005 entry
->key
.vlan_id
);
2006 hash_del(&entry
->entry
);
2007 } else if (time_before(expires
, next_timer
)) {
2008 next_timer
= expires
;
2012 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2014 mod_timer(&ofdpa
->fdb_cleanup_timer
, round_jiffies_up(next_timer
));
2017 static int ofdpa_port_router_mac(struct ofdpa_port
*ofdpa_port
,
2018 int flags
, __be16 vlan_id
)
2020 u32 in_pport_mask
= 0xffffffff;
2022 const u8
*dst_mac_mask
= ff_mac
;
2023 __be16 vlan_id_mask
= htons(0xffff);
2024 bool copy_to_cpu
= false;
2027 if (ntohs(vlan_id
) == 0)
2028 vlan_id
= ofdpa_port
->internal_vlan_id
;
2030 eth_type
= htons(ETH_P_IP
);
2031 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, ofdpa_port
->pport
,
2032 in_pport_mask
, eth_type
,
2033 ofdpa_port
->dev
->dev_addr
,
2034 dst_mac_mask
, vlan_id
, vlan_id_mask
,
2035 copy_to_cpu
, flags
);
2039 eth_type
= htons(ETH_P_IPV6
);
2040 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, ofdpa_port
->pport
,
2041 in_pport_mask
, eth_type
,
2042 ofdpa_port
->dev
->dev_addr
,
2043 dst_mac_mask
, vlan_id
, vlan_id_mask
,
2044 copy_to_cpu
, flags
);
2049 static int ofdpa_port_fwding(struct ofdpa_port
*ofdpa_port
, int flags
)
2057 /* Port will be forwarding-enabled if its STP state is LEARNING
2058 * or FORWARDING. Traffic from CPU can still egress, regardless of
2059 * port STP state. Use L2 interface group on port VLANs as a way
2060 * to toggle port forwarding: if forwarding is disabled, L2
2061 * interface group will not exist.
2064 if (ofdpa_port
->stp_state
!= BR_STATE_LEARNING
&&
2065 ofdpa_port
->stp_state
!= BR_STATE_FORWARDING
)
2066 flags
|= OFDPA_OP_FLAG_REMOVE
;
2068 out_pport
= ofdpa_port
->pport
;
2069 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
2070 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
2072 vlan_id
= htons(vid
);
2073 pop_vlan
= ofdpa_vlan_id_is_internal(vlan_id
);
2074 err
= ofdpa_group_l2_interface(ofdpa_port
, flags
,
2075 vlan_id
, out_pport
, pop_vlan
);
2077 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for pport %d\n",
2086 static int ofdpa_port_stp_update(struct ofdpa_port
*ofdpa_port
,
2087 int flags
, u8 state
)
2089 bool want
[OFDPA_CTRL_MAX
] = { 0, };
2090 bool prev_ctrls
[OFDPA_CTRL_MAX
];
2095 memcpy(prev_ctrls
, ofdpa_port
->ctrls
, sizeof(prev_ctrls
));
2096 prev_state
= ofdpa_port
->stp_state
;
2098 if (ofdpa_port
->stp_state
== state
)
2101 ofdpa_port
->stp_state
= state
;
2104 case BR_STATE_DISABLED
:
2105 /* port is completely disabled */
2107 case BR_STATE_LISTENING
:
2108 case BR_STATE_BLOCKING
:
2109 want
[OFDPA_CTRL_LINK_LOCAL_MCAST
] = true;
2111 case BR_STATE_LEARNING
:
2112 case BR_STATE_FORWARDING
:
2113 if (!ofdpa_port_is_ovsed(ofdpa_port
))
2114 want
[OFDPA_CTRL_LINK_LOCAL_MCAST
] = true;
2115 want
[OFDPA_CTRL_IPV4_MCAST
] = true;
2116 want
[OFDPA_CTRL_IPV6_MCAST
] = true;
2117 if (ofdpa_port_is_bridged(ofdpa_port
))
2118 want
[OFDPA_CTRL_DFLT_BRIDGING
] = true;
2119 else if (ofdpa_port_is_ovsed(ofdpa_port
))
2120 want
[OFDPA_CTRL_DFLT_OVS
] = true;
2122 want
[OFDPA_CTRL_LOCAL_ARP
] = true;
2126 for (i
= 0; i
< OFDPA_CTRL_MAX
; i
++) {
2127 if (want
[i
] != ofdpa_port
->ctrls
[i
]) {
2128 int ctrl_flags
= flags
|
2129 (want
[i
] ? 0 : OFDPA_OP_FLAG_REMOVE
);
2130 err
= ofdpa_port_ctrl(ofdpa_port
, ctrl_flags
,
2134 ofdpa_port
->ctrls
[i
] = want
[i
];
2138 err
= ofdpa_port_fdb_flush(ofdpa_port
, flags
);
2142 err
= ofdpa_port_fwding(ofdpa_port
, flags
);
2144 goto err_port_fwding
;
2151 memcpy(ofdpa_port
->ctrls
, prev_ctrls
, sizeof(prev_ctrls
));
2152 ofdpa_port
->stp_state
= prev_state
;
2156 static int ofdpa_port_fwd_enable(struct ofdpa_port
*ofdpa_port
, int flags
)
2158 if (ofdpa_port_is_bridged(ofdpa_port
))
2159 /* bridge STP will enable port */
2162 /* port is not bridged, so simulate going to FORWARDING state */
2163 return ofdpa_port_stp_update(ofdpa_port
, flags
,
2164 BR_STATE_FORWARDING
);
2167 static int ofdpa_port_fwd_disable(struct ofdpa_port
*ofdpa_port
, int flags
)
2169 if (ofdpa_port_is_bridged(ofdpa_port
))
2170 /* bridge STP will disable port */
2173 /* port is not bridged, so simulate going to DISABLED state */
2174 return ofdpa_port_stp_update(ofdpa_port
, flags
,
2178 static int ofdpa_port_vlan_add(struct ofdpa_port
*ofdpa_port
,
2183 /* XXX deal with flags for PVID and untagged */
2185 err
= ofdpa_port_vlan(ofdpa_port
, 0, vid
);
2189 err
= ofdpa_port_router_mac(ofdpa_port
, 0, htons(vid
));
2191 ofdpa_port_vlan(ofdpa_port
,
2192 OFDPA_OP_FLAG_REMOVE
, vid
);
2197 static int ofdpa_port_vlan_del(struct ofdpa_port
*ofdpa_port
,
2202 err
= ofdpa_port_router_mac(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
,
2207 return ofdpa_port_vlan(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
,
2211 static struct ofdpa_internal_vlan_tbl_entry
*
2212 ofdpa_internal_vlan_tbl_find(const struct ofdpa
*ofdpa
, int ifindex
)
2214 struct ofdpa_internal_vlan_tbl_entry
*found
;
2216 hash_for_each_possible(ofdpa
->internal_vlan_tbl
, found
,
2218 if (found
->ifindex
== ifindex
)
2225 static __be16
ofdpa_port_internal_vlan_id_get(struct ofdpa_port
*ofdpa_port
,
2228 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2229 struct ofdpa_internal_vlan_tbl_entry
*entry
;
2230 struct ofdpa_internal_vlan_tbl_entry
*found
;
2231 unsigned long lock_flags
;
2234 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2238 entry
->ifindex
= ifindex
;
2240 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2242 found
= ofdpa_internal_vlan_tbl_find(ofdpa
, ifindex
);
2249 hash_add(ofdpa
->internal_vlan_tbl
, &found
->entry
, found
->ifindex
);
2251 for (i
= 0; i
< OFDPA_N_INTERNAL_VLANS
; i
++) {
2252 if (test_and_set_bit(i
, ofdpa
->internal_vlan_bitmap
))
2254 found
->vlan_id
= htons(OFDPA_INTERNAL_VLAN_ID_BASE
+ i
);
2258 netdev_err(ofdpa_port
->dev
, "Out of internal VLAN IDs\n");
2262 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2264 return found
->vlan_id
;
2267 static int ofdpa_port_fib_ipv4(struct ofdpa_port
*ofdpa_port
, __be32 dst
,
2268 int dst_len
, struct fib_info
*fi
, u32 tb_id
,
2271 const struct fib_nh
*nh
;
2272 __be16 eth_type
= htons(ETH_P_IP
);
2273 __be32 dst_mask
= inet_make_mask(dst_len
);
2274 __be16 internal_vlan_id
= ofdpa_port
->internal_vlan_id
;
2275 u32 priority
= fi
->fib_priority
;
2276 enum rocker_of_dpa_table_id goto_tbl
=
2277 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2284 /* XXX support ECMP */
2286 nh
= fib_info_nh(fi
, 0);
2287 nh_on_port
= (nh
->fib_nh_dev
== ofdpa_port
->dev
);
2288 has_gw
= !!nh
->fib_nh_gw4
;
2290 if (has_gw
&& nh_on_port
) {
2291 err
= ofdpa_port_ipv4_nh(ofdpa_port
, flags
,
2292 nh
->fib_nh_gw4
, &index
);
2296 group_id
= ROCKER_GROUP_L3_UNICAST(index
);
2298 /* Send to CPU for processing */
2299 group_id
= ROCKER_GROUP_L2_INTERFACE(internal_vlan_id
, 0);
2302 err
= ofdpa_flow_tbl_ucast4_routing(ofdpa_port
, eth_type
, dst
,
2303 dst_mask
, priority
, goto_tbl
,
2304 group_id
, fi
, flags
);
2306 netdev_err(ofdpa_port
->dev
, "Error (%d) IPv4 route %pI4\n",
2313 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port
*ofdpa_port
,
2316 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2317 struct ofdpa_internal_vlan_tbl_entry
*found
;
2318 unsigned long lock_flags
;
2321 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2323 found
= ofdpa_internal_vlan_tbl_find(ofdpa
, ifindex
);
2325 netdev_err(ofdpa_port
->dev
,
2326 "ifindex (%d) not found in internal VLAN tbl\n",
2331 if (--found
->ref_count
<= 0) {
2332 bit
= ntohs(found
->vlan_id
) - OFDPA_INTERNAL_VLAN_ID_BASE
;
2333 clear_bit(bit
, ofdpa
->internal_vlan_bitmap
);
2334 hash_del(&found
->entry
);
2339 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2342 /**********************************
2343 * Rocker world ops implementation
2344 **********************************/
2346 static int ofdpa_init(struct rocker
*rocker
)
2348 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2350 ofdpa
->rocker
= rocker
;
2352 hash_init(ofdpa
->flow_tbl
);
2353 spin_lock_init(&ofdpa
->flow_tbl_lock
);
2355 hash_init(ofdpa
->group_tbl
);
2356 spin_lock_init(&ofdpa
->group_tbl_lock
);
2358 hash_init(ofdpa
->fdb_tbl
);
2359 spin_lock_init(&ofdpa
->fdb_tbl_lock
);
2361 hash_init(ofdpa
->internal_vlan_tbl
);
2362 spin_lock_init(&ofdpa
->internal_vlan_tbl_lock
);
2364 hash_init(ofdpa
->neigh_tbl
);
2365 spin_lock_init(&ofdpa
->neigh_tbl_lock
);
2367 timer_setup(&ofdpa
->fdb_cleanup_timer
, ofdpa_fdb_cleanup
, 0);
2368 mod_timer(&ofdpa
->fdb_cleanup_timer
, jiffies
);
2370 ofdpa
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
2375 static void ofdpa_fini(struct rocker
*rocker
)
2377 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2379 unsigned long flags
;
2380 struct ofdpa_flow_tbl_entry
*flow_entry
;
2381 struct ofdpa_group_tbl_entry
*group_entry
;
2382 struct ofdpa_fdb_tbl_entry
*fdb_entry
;
2383 struct ofdpa_internal_vlan_tbl_entry
*internal_vlan_entry
;
2384 struct ofdpa_neigh_tbl_entry
*neigh_entry
;
2385 struct hlist_node
*tmp
;
2388 del_timer_sync(&ofdpa
->fdb_cleanup_timer
);
2389 flush_workqueue(rocker
->rocker_owq
);
2391 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, flags
);
2392 hash_for_each_safe(ofdpa
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
)
2393 hash_del(&flow_entry
->entry
);
2394 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, flags
);
2396 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, flags
);
2397 hash_for_each_safe(ofdpa
->group_tbl
, bkt
, tmp
, group_entry
, entry
)
2398 hash_del(&group_entry
->entry
);
2399 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, flags
);
2401 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, flags
);
2402 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, fdb_entry
, entry
)
2403 hash_del(&fdb_entry
->entry
);
2404 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, flags
);
2406 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, flags
);
2407 hash_for_each_safe(ofdpa
->internal_vlan_tbl
, bkt
,
2408 tmp
, internal_vlan_entry
, entry
)
2409 hash_del(&internal_vlan_entry
->entry
);
2410 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, flags
);
2412 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, flags
);
2413 hash_for_each_safe(ofdpa
->neigh_tbl
, bkt
, tmp
, neigh_entry
, entry
)
2414 hash_del(&neigh_entry
->entry
);
2415 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, flags
);
2418 static int ofdpa_port_pre_init(struct rocker_port
*rocker_port
)
2420 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2422 ofdpa_port
->ofdpa
= rocker_port
->rocker
->wpriv
;
2423 ofdpa_port
->rocker_port
= rocker_port
;
2424 ofdpa_port
->dev
= rocker_port
->dev
;
2425 ofdpa_port
->pport
= rocker_port
->pport
;
2426 ofdpa_port
->brport_flags
= BR_LEARNING
;
2427 ofdpa_port
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
2431 static int ofdpa_port_init(struct rocker_port
*rocker_port
)
2433 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2436 rocker_port_set_learning(rocker_port
,
2437 !!(ofdpa_port
->brport_flags
& BR_LEARNING
));
2439 err
= ofdpa_port_ig_tbl(ofdpa_port
, 0);
2441 netdev_err(ofdpa_port
->dev
, "install ig port table failed\n");
2445 ofdpa_port
->internal_vlan_id
=
2446 ofdpa_port_internal_vlan_id_get(ofdpa_port
,
2447 ofdpa_port
->dev
->ifindex
);
2449 err
= ofdpa_port_vlan_add(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2451 netdev_err(ofdpa_port
->dev
, "install untagged VLAN failed\n");
2452 goto err_untagged_vlan
;
2457 ofdpa_port_ig_tbl(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
);
2461 static void ofdpa_port_fini(struct rocker_port
*rocker_port
)
2463 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2465 ofdpa_port_ig_tbl(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
);
2468 static int ofdpa_port_open(struct rocker_port
*rocker_port
)
2470 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2472 return ofdpa_port_fwd_enable(ofdpa_port
, 0);
2475 static void ofdpa_port_stop(struct rocker_port
*rocker_port
)
2477 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2479 ofdpa_port_fwd_disable(ofdpa_port
, OFDPA_OP_FLAG_NOWAIT
);
2482 static int ofdpa_port_attr_stp_state_set(struct rocker_port
*rocker_port
,
2485 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2487 return ofdpa_port_stp_update(ofdpa_port
, 0, state
);
2490 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port
*rocker_port
,
2491 unsigned long brport_flags
,
2492 struct switchdev_trans
*trans
)
2494 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2495 unsigned long orig_flags
;
2498 orig_flags
= ofdpa_port
->brport_flags
;
2499 ofdpa_port
->brport_flags
= brport_flags
;
2500 if ((orig_flags
^ ofdpa_port
->brport_flags
) & BR_LEARNING
&&
2501 !switchdev_trans_ph_prepare(trans
))
2502 err
= rocker_port_set_learning(ofdpa_port
->rocker_port
,
2503 !!(ofdpa_port
->brport_flags
& BR_LEARNING
));
2505 if (switchdev_trans_ph_prepare(trans
))
2506 ofdpa_port
->brport_flags
= orig_flags
;
2512 ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port
*
2515 p_brport_flags_support
)
2517 *p_brport_flags_support
= BR_LEARNING
;
2522 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port
*rocker_port
,
2524 struct switchdev_trans
*trans
)
2526 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2527 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2529 if (!switchdev_trans_ph_prepare(trans
)) {
2530 ofdpa_port
->ageing_time
= clock_t_to_jiffies(ageing_time
);
2531 if (ofdpa_port
->ageing_time
< ofdpa
->ageing_time
)
2532 ofdpa
->ageing_time
= ofdpa_port
->ageing_time
;
2533 mod_timer(&ofdpa_port
->ofdpa
->fdb_cleanup_timer
, jiffies
);
2539 static int ofdpa_port_obj_vlan_add(struct rocker_port
*rocker_port
,
2540 const struct switchdev_obj_port_vlan
*vlan
)
2542 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2546 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
2547 err
= ofdpa_port_vlan_add(ofdpa_port
, vid
, vlan
->flags
);
2555 static int ofdpa_port_obj_vlan_del(struct rocker_port
*rocker_port
,
2556 const struct switchdev_obj_port_vlan
*vlan
)
2558 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2562 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
2563 err
= ofdpa_port_vlan_del(ofdpa_port
, vid
, vlan
->flags
);
2571 static int ofdpa_port_obj_fdb_add(struct rocker_port
*rocker_port
,
2572 u16 vid
, const unsigned char *addr
)
2574 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2575 __be16 vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, NULL
);
2577 if (!ofdpa_port_is_bridged(ofdpa_port
))
2580 return ofdpa_port_fdb(ofdpa_port
, addr
, vlan_id
, 0);
2583 static int ofdpa_port_obj_fdb_del(struct rocker_port
*rocker_port
,
2584 u16 vid
, const unsigned char *addr
)
2586 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2587 __be16 vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, NULL
);
2588 int flags
= OFDPA_OP_FLAG_REMOVE
;
2590 if (!ofdpa_port_is_bridged(ofdpa_port
))
2593 return ofdpa_port_fdb(ofdpa_port
, addr
, vlan_id
, flags
);
2596 static int ofdpa_port_bridge_join(struct ofdpa_port
*ofdpa_port
,
2597 struct net_device
*bridge
)
2601 /* Port is joining bridge, so the internal VLAN for the
2602 * port is going to change to the bridge internal VLAN.
2603 * Let's remove untagged VLAN (vid=0) from port and
2604 * re-add once internal VLAN has changed.
2607 err
= ofdpa_port_vlan_del(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2611 ofdpa_port_internal_vlan_id_put(ofdpa_port
,
2612 ofdpa_port
->dev
->ifindex
);
2613 ofdpa_port
->internal_vlan_id
=
2614 ofdpa_port_internal_vlan_id_get(ofdpa_port
, bridge
->ifindex
);
2616 ofdpa_port
->bridge_dev
= bridge
;
2618 return ofdpa_port_vlan_add(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2621 static int ofdpa_port_bridge_leave(struct ofdpa_port
*ofdpa_port
)
2625 err
= ofdpa_port_vlan_del(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2629 ofdpa_port_internal_vlan_id_put(ofdpa_port
,
2630 ofdpa_port
->bridge_dev
->ifindex
);
2631 ofdpa_port
->internal_vlan_id
=
2632 ofdpa_port_internal_vlan_id_get(ofdpa_port
,
2633 ofdpa_port
->dev
->ifindex
);
2635 ofdpa_port
->bridge_dev
= NULL
;
2637 err
= ofdpa_port_vlan_add(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2641 if (ofdpa_port
->dev
->flags
& IFF_UP
)
2642 err
= ofdpa_port_fwd_enable(ofdpa_port
, 0);
2647 static int ofdpa_port_ovs_changed(struct ofdpa_port
*ofdpa_port
,
2648 struct net_device
*master
)
2652 ofdpa_port
->bridge_dev
= master
;
2654 err
= ofdpa_port_fwd_disable(ofdpa_port
, 0);
2657 err
= ofdpa_port_fwd_enable(ofdpa_port
, 0);
2662 static int ofdpa_port_master_linked(struct rocker_port
*rocker_port
,
2663 struct net_device
*master
)
2665 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2668 if (netif_is_bridge_master(master
))
2669 err
= ofdpa_port_bridge_join(ofdpa_port
, master
);
2670 else if (netif_is_ovs_master(master
))
2671 err
= ofdpa_port_ovs_changed(ofdpa_port
, master
);
2675 static int ofdpa_port_master_unlinked(struct rocker_port
*rocker_port
,
2676 struct net_device
*master
)
2678 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2681 if (ofdpa_port_is_bridged(ofdpa_port
))
2682 err
= ofdpa_port_bridge_leave(ofdpa_port
);
2683 else if (ofdpa_port_is_ovsed(ofdpa_port
))
2684 err
= ofdpa_port_ovs_changed(ofdpa_port
, NULL
);
2688 static int ofdpa_port_neigh_update(struct rocker_port
*rocker_port
,
2689 struct neighbour
*n
)
2691 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2692 int flags
= (n
->nud_state
& NUD_VALID
? 0 : OFDPA_OP_FLAG_REMOVE
) |
2693 OFDPA_OP_FLAG_NOWAIT
;
2694 __be32 ip_addr
= *(__be32
*) n
->primary_key
;
2696 return ofdpa_port_ipv4_neigh(ofdpa_port
, flags
, ip_addr
, n
->ha
);
2699 static int ofdpa_port_neigh_destroy(struct rocker_port
*rocker_port
,
2700 struct neighbour
*n
)
2702 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2703 int flags
= OFDPA_OP_FLAG_REMOVE
| OFDPA_OP_FLAG_NOWAIT
;
2704 __be32 ip_addr
= *(__be32
*) n
->primary_key
;
2706 return ofdpa_port_ipv4_neigh(ofdpa_port
, flags
, ip_addr
, n
->ha
);
2709 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port
*rocker_port
,
2710 const unsigned char *addr
,
2713 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2714 int flags
= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_LEARNED
;
2716 if (ofdpa_port
->stp_state
!= BR_STATE_LEARNING
&&
2717 ofdpa_port
->stp_state
!= BR_STATE_FORWARDING
)
2720 return ofdpa_port_fdb(ofdpa_port
, addr
, vlan_id
, flags
);
2723 static struct ofdpa_port
*ofdpa_port_dev_lower_find(struct net_device
*dev
,
2724 struct rocker
*rocker
)
2726 struct rocker_port
*rocker_port
;
2728 rocker_port
= rocker_port_dev_lower_find(dev
, rocker
);
2729 return rocker_port
? rocker_port
->wpriv
: NULL
;
2732 static int ofdpa_fib4_add(struct rocker
*rocker
,
2733 const struct fib_entry_notifier_info
*fen_info
)
2735 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2736 struct ofdpa_port
*ofdpa_port
;
2740 if (ofdpa
->fib_aborted
)
2742 nh
= fib_info_nh(fen_info
->fi
, 0);
2743 ofdpa_port
= ofdpa_port_dev_lower_find(nh
->fib_nh_dev
, rocker
);
2746 err
= ofdpa_port_fib_ipv4(ofdpa_port
, htonl(fen_info
->dst
),
2747 fen_info
->dst_len
, fen_info
->fi
,
2748 fen_info
->tb_id
, 0);
2751 nh
->fib_nh_flags
|= RTNH_F_OFFLOAD
;
2755 static int ofdpa_fib4_del(struct rocker
*rocker
,
2756 const struct fib_entry_notifier_info
*fen_info
)
2758 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2759 struct ofdpa_port
*ofdpa_port
;
2762 if (ofdpa
->fib_aborted
)
2764 nh
= fib_info_nh(fen_info
->fi
, 0);
2765 ofdpa_port
= ofdpa_port_dev_lower_find(nh
->fib_nh_dev
, rocker
);
2768 nh
->fib_nh_flags
&= ~RTNH_F_OFFLOAD
;
2769 return ofdpa_port_fib_ipv4(ofdpa_port
, htonl(fen_info
->dst
),
2770 fen_info
->dst_len
, fen_info
->fi
,
2771 fen_info
->tb_id
, OFDPA_OP_FLAG_REMOVE
);
2774 static void ofdpa_fib4_abort(struct rocker
*rocker
)
2776 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2777 struct ofdpa_port
*ofdpa_port
;
2778 struct ofdpa_flow_tbl_entry
*flow_entry
;
2779 struct hlist_node
*tmp
;
2780 unsigned long flags
;
2783 if (ofdpa
->fib_aborted
)
2786 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, flags
);
2787 hash_for_each_safe(ofdpa
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
) {
2790 if (flow_entry
->key
.tbl_id
!=
2791 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
)
2793 nh
= fib_info_nh(flow_entry
->fi
, 0);
2794 ofdpa_port
= ofdpa_port_dev_lower_find(nh
->fib_nh_dev
, rocker
);
2797 nh
->fib_nh_flags
&= ~RTNH_F_OFFLOAD
;
2798 ofdpa_flow_tbl_del(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
,
2801 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, flags
);
2802 ofdpa
->fib_aborted
= true;
2805 struct rocker_world_ops rocker_ofdpa_ops
= {
2807 .priv_size
= sizeof(struct ofdpa
),
2808 .port_priv_size
= sizeof(struct ofdpa_port
),
2809 .mode
= ROCKER_PORT_MODE_OF_DPA
,
2812 .port_pre_init
= ofdpa_port_pre_init
,
2813 .port_init
= ofdpa_port_init
,
2814 .port_fini
= ofdpa_port_fini
,
2815 .port_open
= ofdpa_port_open
,
2816 .port_stop
= ofdpa_port_stop
,
2817 .port_attr_stp_state_set
= ofdpa_port_attr_stp_state_set
,
2818 .port_attr_bridge_flags_set
= ofdpa_port_attr_bridge_flags_set
,
2819 .port_attr_bridge_flags_support_get
= ofdpa_port_attr_bridge_flags_support_get
,
2820 .port_attr_bridge_ageing_time_set
= ofdpa_port_attr_bridge_ageing_time_set
,
2821 .port_obj_vlan_add
= ofdpa_port_obj_vlan_add
,
2822 .port_obj_vlan_del
= ofdpa_port_obj_vlan_del
,
2823 .port_obj_fdb_add
= ofdpa_port_obj_fdb_add
,
2824 .port_obj_fdb_del
= ofdpa_port_obj_fdb_del
,
2825 .port_master_linked
= ofdpa_port_master_linked
,
2826 .port_master_unlinked
= ofdpa_port_master_unlinked
,
2827 .port_neigh_update
= ofdpa_port_neigh_update
,
2828 .port_neigh_destroy
= ofdpa_port_neigh_destroy
,
2829 .port_ev_mac_vlan_seen
= ofdpa_port_ev_mac_vlan_seen
,
2830 .fib4_add
= ofdpa_fib4_add
,
2831 .fib4_del
= ofdpa_fib4_del
,
2832 .fib4_abort
= ofdpa_fib4_abort
,