2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/hashtable.h>
17 #include <linux/crc32.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_vlan.h>
21 #include <linux/if_bridge.h>
22 #include <net/neighbour.h>
23 #include <net/switchdev.h>
24 #include <net/ip_fib.h>
28 #include "rocker_tlv.h"
30 struct ofdpa_flow_tbl_key
{
32 enum rocker_of_dpa_table_id tbl_id
;
37 enum rocker_of_dpa_table_id goto_tbl
;
43 enum rocker_of_dpa_table_id goto_tbl
;
52 u8 eth_dst_mask
[ETH_ALEN
];
55 enum rocker_of_dpa_table_id goto_tbl
;
62 enum rocker_of_dpa_table_id goto_tbl
;
67 u8 eth_dst_mask
[ETH_ALEN
];
72 enum rocker_of_dpa_table_id goto_tbl
;
80 u8 eth_src_mask
[ETH_ALEN
];
82 u8 eth_dst_mask
[ETH_ALEN
];
95 struct ofdpa_flow_tbl_entry
{
96 struct hlist_node entry
;
99 struct ofdpa_flow_tbl_key key
;
101 u32 key_crc32
; /* key */
105 struct ofdpa_group_tbl_entry
{
106 struct hlist_node entry
;
108 u32 group_id
; /* key */
116 u8 eth_src
[ETH_ALEN
];
117 u8 eth_dst
[ETH_ALEN
];
122 u8 eth_src
[ETH_ALEN
];
123 u8 eth_dst
[ETH_ALEN
];
131 struct ofdpa_fdb_tbl_entry
{
132 struct hlist_node entry
;
133 u32 key_crc32
; /* key */
135 unsigned long touched
;
136 struct ofdpa_fdb_tbl_key
{
137 struct ofdpa_port
*ofdpa_port
;
143 struct ofdpa_internal_vlan_tbl_entry
{
144 struct hlist_node entry
;
145 int ifindex
; /* key */
150 struct ofdpa_neigh_tbl_entry
{
151 struct hlist_node entry
;
152 __be32 ip_addr
; /* key */
153 struct net_device
*dev
;
156 u8 eth_dst
[ETH_ALEN
];
161 OFDPA_CTRL_LINK_LOCAL_MCAST
,
162 OFDPA_CTRL_LOCAL_ARP
,
163 OFDPA_CTRL_IPV4_MCAST
,
164 OFDPA_CTRL_IPV6_MCAST
,
165 OFDPA_CTRL_DFLT_BRIDGING
,
170 #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
171 #define OFDPA_N_INTERNAL_VLANS 255
172 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
173 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
174 #define OFDPA_UNTAGGED_VID 0
177 struct rocker
*rocker
;
178 DECLARE_HASHTABLE(flow_tbl
, 16);
179 spinlock_t flow_tbl_lock
; /* for flow tbl accesses */
180 u64 flow_tbl_next_cookie
;
181 DECLARE_HASHTABLE(group_tbl
, 16);
182 spinlock_t group_tbl_lock
; /* for group tbl accesses */
183 struct timer_list fdb_cleanup_timer
;
184 DECLARE_HASHTABLE(fdb_tbl
, 16);
185 spinlock_t fdb_tbl_lock
; /* for fdb tbl accesses */
186 unsigned long internal_vlan_bitmap
[OFDPA_INTERNAL_VLAN_BITMAP_LEN
];
187 DECLARE_HASHTABLE(internal_vlan_tbl
, 8);
188 spinlock_t internal_vlan_tbl_lock
; /* for vlan tbl accesses */
189 DECLARE_HASHTABLE(neigh_tbl
, 16);
190 spinlock_t neigh_tbl_lock
; /* for neigh tbl accesses */
191 u32 neigh_tbl_next_index
;
192 unsigned long ageing_time
;
198 struct rocker_port
*rocker_port
;
199 struct net_device
*dev
;
201 struct net_device
*bridge_dev
;
202 __be16 internal_vlan_id
;
205 unsigned long ageing_time
;
206 bool ctrls
[OFDPA_CTRL_MAX
];
207 unsigned long vlan_bitmap
[OFDPA_VLAN_BITMAP_LEN
];
210 static const u8 zero_mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
211 static const u8 ff_mac
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
212 static const u8 ll_mac
[ETH_ALEN
] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
213 static const u8 ll_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
214 static const u8 mcast_mac
[ETH_ALEN
] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
215 static const u8 ipv4_mcast
[ETH_ALEN
] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
216 static const u8 ipv4_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
217 static const u8 ipv6_mcast
[ETH_ALEN
] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
218 static const u8 ipv6_mask
[ETH_ALEN
] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
220 /* Rocker priority levels for flow table entries. Higher
221 * priority match takes precedence over lower priority match.
225 OFDPA_PRIORITY_UNKNOWN
= 0,
226 OFDPA_PRIORITY_IG_PORT
= 1,
227 OFDPA_PRIORITY_VLAN
= 1,
228 OFDPA_PRIORITY_TERM_MAC_UCAST
= 0,
229 OFDPA_PRIORITY_TERM_MAC_MCAST
= 1,
230 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
= 1,
231 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD
= 2,
232 OFDPA_PRIORITY_BRIDGING_VLAN
= 3,
233 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
= 1,
234 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD
= 2,
235 OFDPA_PRIORITY_BRIDGING_TENANT
= 3,
236 OFDPA_PRIORITY_ACL_CTRL
= 3,
237 OFDPA_PRIORITY_ACL_NORMAL
= 2,
238 OFDPA_PRIORITY_ACL_DFLT
= 1,
241 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id
)
243 u16 start
= OFDPA_INTERNAL_VLAN_ID_BASE
;
245 u16 _vlan_id
= ntohs(vlan_id
);
247 return (_vlan_id
>= start
&& _vlan_id
<= end
);
250 static __be16
ofdpa_port_vid_to_vlan(const struct ofdpa_port
*ofdpa_port
,
251 u16 vid
, bool *pop_vlan
)
257 vlan_id
= htons(vid
);
259 vlan_id
= ofdpa_port
->internal_vlan_id
;
267 static u16
ofdpa_port_vlan_to_vid(const struct ofdpa_port
*ofdpa_port
,
270 if (ofdpa_vlan_id_is_internal(vlan_id
))
273 return ntohs(vlan_id
);
276 static bool ofdpa_port_is_slave(const struct ofdpa_port
*ofdpa_port
,
279 return ofdpa_port
->bridge_dev
&&
280 !strcmp(ofdpa_port
->bridge_dev
->rtnl_link_ops
->kind
, kind
);
283 static bool ofdpa_port_is_bridged(const struct ofdpa_port
*ofdpa_port
)
285 return ofdpa_port_is_slave(ofdpa_port
, "bridge");
288 static bool ofdpa_port_is_ovsed(const struct ofdpa_port
*ofdpa_port
)
290 return ofdpa_port_is_slave(ofdpa_port
, "openvswitch");
293 #define OFDPA_OP_FLAG_REMOVE BIT(0)
294 #define OFDPA_OP_FLAG_NOWAIT BIT(1)
295 #define OFDPA_OP_FLAG_LEARNED BIT(2)
296 #define OFDPA_OP_FLAG_REFRESH BIT(3)
298 static bool ofdpa_flags_nowait(int flags
)
300 return flags
& OFDPA_OP_FLAG_NOWAIT
;
303 /*************************************************************
304 * Flow, group, FDB, internal VLAN and neigh command prepares
305 *************************************************************/
308 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info
*desc_info
,
309 const struct ofdpa_flow_tbl_entry
*entry
)
311 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
312 entry
->key
.ig_port
.in_pport
))
314 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
315 entry
->key
.ig_port
.in_pport_mask
))
317 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
318 entry
->key
.ig_port
.goto_tbl
))
325 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info
*desc_info
,
326 const struct ofdpa_flow_tbl_entry
*entry
)
328 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
329 entry
->key
.vlan
.in_pport
))
331 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
332 entry
->key
.vlan
.vlan_id
))
334 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
335 entry
->key
.vlan
.vlan_id_mask
))
337 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
338 entry
->key
.vlan
.goto_tbl
))
340 if (entry
->key
.vlan
.untagged
&&
341 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_NEW_VLAN_ID
,
342 entry
->key
.vlan
.new_vlan_id
))
349 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info
*desc_info
,
350 const struct ofdpa_flow_tbl_entry
*entry
)
352 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
353 entry
->key
.term_mac
.in_pport
))
355 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
356 entry
->key
.term_mac
.in_pport_mask
))
358 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
359 entry
->key
.term_mac
.eth_type
))
361 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
362 ETH_ALEN
, entry
->key
.term_mac
.eth_dst
))
364 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
365 ETH_ALEN
, entry
->key
.term_mac
.eth_dst_mask
))
367 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
368 entry
->key
.term_mac
.vlan_id
))
370 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
371 entry
->key
.term_mac
.vlan_id_mask
))
373 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
374 entry
->key
.term_mac
.goto_tbl
))
376 if (entry
->key
.term_mac
.copy_to_cpu
&&
377 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
378 entry
->key
.term_mac
.copy_to_cpu
))
385 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info
*desc_info
,
386 const struct ofdpa_flow_tbl_entry
*entry
)
388 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
389 entry
->key
.ucast_routing
.eth_type
))
391 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP
,
392 entry
->key
.ucast_routing
.dst4
))
394 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP_MASK
,
395 entry
->key
.ucast_routing
.dst4_mask
))
397 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
398 entry
->key
.ucast_routing
.goto_tbl
))
400 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
401 entry
->key
.ucast_routing
.group_id
))
408 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info
*desc_info
,
409 const struct ofdpa_flow_tbl_entry
*entry
)
411 if (entry
->key
.bridge
.has_eth_dst
&&
412 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
413 ETH_ALEN
, entry
->key
.bridge
.eth_dst
))
415 if (entry
->key
.bridge
.has_eth_dst_mask
&&
416 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
417 ETH_ALEN
, entry
->key
.bridge
.eth_dst_mask
))
419 if (entry
->key
.bridge
.vlan_id
&&
420 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
421 entry
->key
.bridge
.vlan_id
))
423 if (entry
->key
.bridge
.tunnel_id
&&
424 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_TUNNEL_ID
,
425 entry
->key
.bridge
.tunnel_id
))
427 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
428 entry
->key
.bridge
.goto_tbl
))
430 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
431 entry
->key
.bridge
.group_id
))
433 if (entry
->key
.bridge
.copy_to_cpu
&&
434 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
435 entry
->key
.bridge
.copy_to_cpu
))
442 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info
*desc_info
,
443 const struct ofdpa_flow_tbl_entry
*entry
)
445 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
446 entry
->key
.acl
.in_pport
))
448 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
449 entry
->key
.acl
.in_pport_mask
))
451 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
452 ETH_ALEN
, entry
->key
.acl
.eth_src
))
454 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC_MASK
,
455 ETH_ALEN
, entry
->key
.acl
.eth_src_mask
))
457 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
458 ETH_ALEN
, entry
->key
.acl
.eth_dst
))
460 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
461 ETH_ALEN
, entry
->key
.acl
.eth_dst_mask
))
463 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
464 entry
->key
.acl
.eth_type
))
466 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
467 entry
->key
.acl
.vlan_id
))
469 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
470 entry
->key
.acl
.vlan_id_mask
))
473 switch (ntohs(entry
->key
.acl
.eth_type
)) {
476 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_PROTO
,
477 entry
->key
.acl
.ip_proto
))
479 if (rocker_tlv_put_u8(desc_info
,
480 ROCKER_TLV_OF_DPA_IP_PROTO_MASK
,
481 entry
->key
.acl
.ip_proto_mask
))
483 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_DSCP
,
484 entry
->key
.acl
.ip_tos
& 0x3f))
486 if (rocker_tlv_put_u8(desc_info
,
487 ROCKER_TLV_OF_DPA_IP_DSCP_MASK
,
488 entry
->key
.acl
.ip_tos_mask
& 0x3f))
490 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_ECN
,
491 (entry
->key
.acl
.ip_tos
& 0xc0) >> 6))
493 if (rocker_tlv_put_u8(desc_info
,
494 ROCKER_TLV_OF_DPA_IP_ECN_MASK
,
495 (entry
->key
.acl
.ip_tos_mask
& 0xc0) >> 6))
500 if (entry
->key
.acl
.group_id
!= ROCKER_GROUP_NONE
&&
501 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
502 entry
->key
.acl
.group_id
))
508 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port
*rocker_port
,
509 struct rocker_desc_info
*desc_info
,
512 const struct ofdpa_flow_tbl_entry
*entry
= priv
;
513 struct rocker_tlv
*cmd_info
;
516 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
518 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
521 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_TABLE_ID
,
524 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_PRIORITY
,
525 entry
->key
.priority
))
527 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_HARDTIME
, 0))
529 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
533 switch (entry
->key
.tbl_id
) {
534 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
:
535 err
= ofdpa_cmd_flow_tbl_add_ig_port(desc_info
, entry
);
537 case ROCKER_OF_DPA_TABLE_ID_VLAN
:
538 err
= ofdpa_cmd_flow_tbl_add_vlan(desc_info
, entry
);
540 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
:
541 err
= ofdpa_cmd_flow_tbl_add_term_mac(desc_info
, entry
);
543 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
:
544 err
= ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info
, entry
);
546 case ROCKER_OF_DPA_TABLE_ID_BRIDGING
:
547 err
= ofdpa_cmd_flow_tbl_add_bridge(desc_info
, entry
);
549 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
:
550 err
= ofdpa_cmd_flow_tbl_add_acl(desc_info
, entry
);
560 rocker_tlv_nest_end(desc_info
, cmd_info
);
565 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port
*rocker_port
,
566 struct rocker_desc_info
*desc_info
,
569 const struct ofdpa_flow_tbl_entry
*entry
= priv
;
570 struct rocker_tlv
*cmd_info
;
572 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
574 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
577 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
580 rocker_tlv_nest_end(desc_info
, cmd_info
);
586 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info
*desc_info
,
587 struct ofdpa_group_tbl_entry
*entry
)
589 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_OUT_PPORT
,
590 ROCKER_GROUP_PORT_GET(entry
->group_id
)))
592 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_POP_VLAN
,
593 entry
->l2_interface
.pop_vlan
))
600 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info
*desc_info
,
601 const struct ofdpa_group_tbl_entry
*entry
)
603 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
604 entry
->l2_rewrite
.group_id
))
606 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_src
) &&
607 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
608 ETH_ALEN
, entry
->l2_rewrite
.eth_src
))
610 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_dst
) &&
611 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
612 ETH_ALEN
, entry
->l2_rewrite
.eth_dst
))
614 if (entry
->l2_rewrite
.vlan_id
&&
615 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
616 entry
->l2_rewrite
.vlan_id
))
623 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info
*desc_info
,
624 const struct ofdpa_group_tbl_entry
*entry
)
627 struct rocker_tlv
*group_ids
;
629 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GROUP_COUNT
,
633 group_ids
= rocker_tlv_nest_start(desc_info
,
634 ROCKER_TLV_OF_DPA_GROUP_IDS
);
638 for (i
= 0; i
< entry
->group_count
; i
++)
639 /* Note TLV array is 1-based */
640 if (rocker_tlv_put_u32(desc_info
, i
+ 1, entry
->group_ids
[i
]))
643 rocker_tlv_nest_end(desc_info
, group_ids
);
649 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info
*desc_info
,
650 const struct ofdpa_group_tbl_entry
*entry
)
652 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_src
) &&
653 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
654 ETH_ALEN
, entry
->l3_unicast
.eth_src
))
656 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_dst
) &&
657 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
658 ETH_ALEN
, entry
->l3_unicast
.eth_dst
))
660 if (entry
->l3_unicast
.vlan_id
&&
661 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
662 entry
->l3_unicast
.vlan_id
))
664 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_TTL_CHECK
,
665 entry
->l3_unicast
.ttl_check
))
667 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
668 entry
->l3_unicast
.group_id
))
674 static int ofdpa_cmd_group_tbl_add(const struct rocker_port
*rocker_port
,
675 struct rocker_desc_info
*desc_info
,
678 struct ofdpa_group_tbl_entry
*entry
= priv
;
679 struct rocker_tlv
*cmd_info
;
682 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
684 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
688 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
692 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
693 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
694 err
= ofdpa_cmd_group_tbl_add_l2_interface(desc_info
, entry
);
696 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
697 err
= ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info
, entry
);
699 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
700 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
701 err
= ofdpa_cmd_group_tbl_add_group_ids(desc_info
, entry
);
703 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
704 err
= ofdpa_cmd_group_tbl_add_l3_unicast(desc_info
, entry
);
714 rocker_tlv_nest_end(desc_info
, cmd_info
);
719 static int ofdpa_cmd_group_tbl_del(const struct rocker_port
*rocker_port
,
720 struct rocker_desc_info
*desc_info
,
723 const struct ofdpa_group_tbl_entry
*entry
= priv
;
724 struct rocker_tlv
*cmd_info
;
726 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
728 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
731 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
734 rocker_tlv_nest_end(desc_info
, cmd_info
);
739 /***************************************************
740 * Flow, group, FDB, internal VLAN and neigh tables
741 ***************************************************/
743 static struct ofdpa_flow_tbl_entry
*
744 ofdpa_flow_tbl_find(const struct ofdpa
*ofdpa
,
745 const struct ofdpa_flow_tbl_entry
*match
)
747 struct ofdpa_flow_tbl_entry
*found
;
748 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
750 hash_for_each_possible(ofdpa
->flow_tbl
, found
,
751 entry
, match
->key_crc32
) {
752 if (memcmp(&found
->key
, &match
->key
, key_len
) == 0)
759 static int ofdpa_flow_tbl_add(struct ofdpa_port
*ofdpa_port
,
760 int flags
, struct ofdpa_flow_tbl_entry
*match
)
762 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
763 struct ofdpa_flow_tbl_entry
*found
;
764 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
765 unsigned long lock_flags
;
767 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
769 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, lock_flags
);
771 found
= ofdpa_flow_tbl_find(ofdpa
, match
);
774 match
->cookie
= found
->cookie
;
775 hash_del(&found
->entry
);
778 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
;
781 found
->cookie
= ofdpa
->flow_tbl_next_cookie
++;
782 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
;
785 hash_add(ofdpa
->flow_tbl
, &found
->entry
, found
->key_crc32
);
786 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, lock_flags
);
788 return rocker_cmd_exec(ofdpa_port
->rocker_port
,
789 ofdpa_flags_nowait(flags
),
790 ofdpa_cmd_flow_tbl_add
,
795 static int ofdpa_flow_tbl_del(struct ofdpa_port
*ofdpa_port
,
796 int flags
, struct ofdpa_flow_tbl_entry
*match
)
798 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
799 struct ofdpa_flow_tbl_entry
*found
;
800 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
801 unsigned long lock_flags
;
804 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
806 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, lock_flags
);
808 found
= ofdpa_flow_tbl_find(ofdpa
, match
);
811 hash_del(&found
->entry
);
812 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
;
815 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, lock_flags
);
820 err
= rocker_cmd_exec(ofdpa_port
->rocker_port
,
821 ofdpa_flags_nowait(flags
),
822 ofdpa_cmd_flow_tbl_del
,
830 static int ofdpa_flow_tbl_do(struct ofdpa_port
*ofdpa_port
, int flags
,
831 struct ofdpa_flow_tbl_entry
*entry
)
833 if (flags
& OFDPA_OP_FLAG_REMOVE
)
834 return ofdpa_flow_tbl_del(ofdpa_port
, flags
, entry
);
836 return ofdpa_flow_tbl_add(ofdpa_port
, flags
, entry
);
839 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port
*ofdpa_port
, int flags
,
840 u32 in_pport
, u32 in_pport_mask
,
841 enum rocker_of_dpa_table_id goto_tbl
)
843 struct ofdpa_flow_tbl_entry
*entry
;
845 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
849 entry
->key
.priority
= OFDPA_PRIORITY_IG_PORT
;
850 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
851 entry
->key
.ig_port
.in_pport
= in_pport
;
852 entry
->key
.ig_port
.in_pport_mask
= in_pport_mask
;
853 entry
->key
.ig_port
.goto_tbl
= goto_tbl
;
855 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
858 static int ofdpa_flow_tbl_vlan(struct ofdpa_port
*ofdpa_port
,
860 u32 in_pport
, __be16 vlan_id
,
862 enum rocker_of_dpa_table_id goto_tbl
,
863 bool untagged
, __be16 new_vlan_id
)
865 struct ofdpa_flow_tbl_entry
*entry
;
867 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
871 entry
->key
.priority
= OFDPA_PRIORITY_VLAN
;
872 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
873 entry
->key
.vlan
.in_pport
= in_pport
;
874 entry
->key
.vlan
.vlan_id
= vlan_id
;
875 entry
->key
.vlan
.vlan_id_mask
= vlan_id_mask
;
876 entry
->key
.vlan
.goto_tbl
= goto_tbl
;
878 entry
->key
.vlan
.untagged
= untagged
;
879 entry
->key
.vlan
.new_vlan_id
= new_vlan_id
;
881 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
884 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port
*ofdpa_port
,
885 u32 in_pport
, u32 in_pport_mask
,
886 __be16 eth_type
, const u8
*eth_dst
,
887 const u8
*eth_dst_mask
, __be16 vlan_id
,
888 __be16 vlan_id_mask
, bool copy_to_cpu
,
891 struct ofdpa_flow_tbl_entry
*entry
;
893 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
897 if (is_multicast_ether_addr(eth_dst
)) {
898 entry
->key
.priority
= OFDPA_PRIORITY_TERM_MAC_MCAST
;
899 entry
->key
.term_mac
.goto_tbl
=
900 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
902 entry
->key
.priority
= OFDPA_PRIORITY_TERM_MAC_UCAST
;
903 entry
->key
.term_mac
.goto_tbl
=
904 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
907 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
908 entry
->key
.term_mac
.in_pport
= in_pport
;
909 entry
->key
.term_mac
.in_pport_mask
= in_pport_mask
;
910 entry
->key
.term_mac
.eth_type
= eth_type
;
911 ether_addr_copy(entry
->key
.term_mac
.eth_dst
, eth_dst
);
912 ether_addr_copy(entry
->key
.term_mac
.eth_dst_mask
, eth_dst_mask
);
913 entry
->key
.term_mac
.vlan_id
= vlan_id
;
914 entry
->key
.term_mac
.vlan_id_mask
= vlan_id_mask
;
915 entry
->key
.term_mac
.copy_to_cpu
= copy_to_cpu
;
917 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
920 static int ofdpa_flow_tbl_bridge(struct ofdpa_port
*ofdpa_port
,
921 int flags
, const u8
*eth_dst
,
922 const u8
*eth_dst_mask
, __be16 vlan_id
,
924 enum rocker_of_dpa_table_id goto_tbl
,
925 u32 group_id
, bool copy_to_cpu
)
927 struct ofdpa_flow_tbl_entry
*entry
;
929 bool vlan_bridging
= !!vlan_id
;
930 bool dflt
= !eth_dst
|| (eth_dst
&& eth_dst_mask
);
933 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
937 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
940 entry
->key
.bridge
.has_eth_dst
= 1;
941 ether_addr_copy(entry
->key
.bridge
.eth_dst
, eth_dst
);
944 entry
->key
.bridge
.has_eth_dst_mask
= 1;
945 ether_addr_copy(entry
->key
.bridge
.eth_dst_mask
, eth_dst_mask
);
946 if (!ether_addr_equal(eth_dst_mask
, ff_mac
))
950 priority
= OFDPA_PRIORITY_UNKNOWN
;
951 if (vlan_bridging
&& dflt
&& wild
)
952 priority
= OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD
;
953 else if (vlan_bridging
&& dflt
&& !wild
)
954 priority
= OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
;
955 else if (vlan_bridging
&& !dflt
)
956 priority
= OFDPA_PRIORITY_BRIDGING_VLAN
;
957 else if (!vlan_bridging
&& dflt
&& wild
)
958 priority
= OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD
;
959 else if (!vlan_bridging
&& dflt
&& !wild
)
960 priority
= OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
;
961 else if (!vlan_bridging
&& !dflt
)
962 priority
= OFDPA_PRIORITY_BRIDGING_TENANT
;
964 entry
->key
.priority
= priority
;
965 entry
->key
.bridge
.vlan_id
= vlan_id
;
966 entry
->key
.bridge
.tunnel_id
= tunnel_id
;
967 entry
->key
.bridge
.goto_tbl
= goto_tbl
;
968 entry
->key
.bridge
.group_id
= group_id
;
969 entry
->key
.bridge
.copy_to_cpu
= copy_to_cpu
;
971 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
974 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port
*ofdpa_port
,
975 __be16 eth_type
, __be32 dst
,
976 __be32 dst_mask
, u32 priority
,
977 enum rocker_of_dpa_table_id goto_tbl
,
978 u32 group_id
, struct fib_info
*fi
,
981 struct ofdpa_flow_tbl_entry
*entry
;
983 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
987 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
988 entry
->key
.priority
= priority
;
989 entry
->key
.ucast_routing
.eth_type
= eth_type
;
990 entry
->key
.ucast_routing
.dst4
= dst
;
991 entry
->key
.ucast_routing
.dst4_mask
= dst_mask
;
992 entry
->key
.ucast_routing
.goto_tbl
= goto_tbl
;
993 entry
->key
.ucast_routing
.group_id
= group_id
;
994 entry
->key_len
= offsetof(struct ofdpa_flow_tbl_key
,
995 ucast_routing
.group_id
);
998 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
1001 static int ofdpa_flow_tbl_acl(struct ofdpa_port
*ofdpa_port
, int flags
,
1002 u32 in_pport
, u32 in_pport_mask
,
1003 const u8
*eth_src
, const u8
*eth_src_mask
,
1004 const u8
*eth_dst
, const u8
*eth_dst_mask
,
1005 __be16 eth_type
, __be16 vlan_id
,
1006 __be16 vlan_id_mask
, u8 ip_proto
,
1007 u8 ip_proto_mask
, u8 ip_tos
, u8 ip_tos_mask
,
1011 struct ofdpa_flow_tbl_entry
*entry
;
1013 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1017 priority
= OFDPA_PRIORITY_ACL_NORMAL
;
1018 if (eth_dst
&& eth_dst_mask
) {
1019 if (ether_addr_equal(eth_dst_mask
, mcast_mac
))
1020 priority
= OFDPA_PRIORITY_ACL_DFLT
;
1021 else if (is_link_local_ether_addr(eth_dst
))
1022 priority
= OFDPA_PRIORITY_ACL_CTRL
;
1025 entry
->key
.priority
= priority
;
1026 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1027 entry
->key
.acl
.in_pport
= in_pport
;
1028 entry
->key
.acl
.in_pport_mask
= in_pport_mask
;
1031 ether_addr_copy(entry
->key
.acl
.eth_src
, eth_src
);
1033 ether_addr_copy(entry
->key
.acl
.eth_src_mask
, eth_src_mask
);
1035 ether_addr_copy(entry
->key
.acl
.eth_dst
, eth_dst
);
1037 ether_addr_copy(entry
->key
.acl
.eth_dst_mask
, eth_dst_mask
);
1039 entry
->key
.acl
.eth_type
= eth_type
;
1040 entry
->key
.acl
.vlan_id
= vlan_id
;
1041 entry
->key
.acl
.vlan_id_mask
= vlan_id_mask
;
1042 entry
->key
.acl
.ip_proto
= ip_proto
;
1043 entry
->key
.acl
.ip_proto_mask
= ip_proto_mask
;
1044 entry
->key
.acl
.ip_tos
= ip_tos
;
1045 entry
->key
.acl
.ip_tos_mask
= ip_tos_mask
;
1046 entry
->key
.acl
.group_id
= group_id
;
1048 return ofdpa_flow_tbl_do(ofdpa_port
, flags
, entry
);
1051 static struct ofdpa_group_tbl_entry
*
1052 ofdpa_group_tbl_find(const struct ofdpa
*ofdpa
,
1053 const struct ofdpa_group_tbl_entry
*match
)
1055 struct ofdpa_group_tbl_entry
*found
;
1057 hash_for_each_possible(ofdpa
->group_tbl
, found
,
1058 entry
, match
->group_id
) {
1059 if (found
->group_id
== match
->group_id
)
1066 static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry
*entry
)
1068 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
1069 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
1070 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
1071 kfree(entry
->group_ids
);
1079 static int ofdpa_group_tbl_add(struct ofdpa_port
*ofdpa_port
, int flags
,
1080 struct ofdpa_group_tbl_entry
*match
)
1082 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1083 struct ofdpa_group_tbl_entry
*found
;
1084 unsigned long lock_flags
;
1086 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, lock_flags
);
1088 found
= ofdpa_group_tbl_find(ofdpa
, match
);
1091 hash_del(&found
->entry
);
1092 ofdpa_group_tbl_entry_free(found
);
1094 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
;
1097 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
;
1100 hash_add(ofdpa
->group_tbl
, &found
->entry
, found
->group_id
);
1102 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, lock_flags
);
1104 return rocker_cmd_exec(ofdpa_port
->rocker_port
,
1105 ofdpa_flags_nowait(flags
),
1106 ofdpa_cmd_group_tbl_add
,
1110 static int ofdpa_group_tbl_del(struct ofdpa_port
*ofdpa_port
, int flags
,
1111 struct ofdpa_group_tbl_entry
*match
)
1113 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1114 struct ofdpa_group_tbl_entry
*found
;
1115 unsigned long lock_flags
;
1118 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, lock_flags
);
1120 found
= ofdpa_group_tbl_find(ofdpa
, match
);
1123 hash_del(&found
->entry
);
1124 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
;
1127 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, lock_flags
);
1129 ofdpa_group_tbl_entry_free(match
);
1132 err
= rocker_cmd_exec(ofdpa_port
->rocker_port
,
1133 ofdpa_flags_nowait(flags
),
1134 ofdpa_cmd_group_tbl_del
,
1136 ofdpa_group_tbl_entry_free(found
);
1142 static int ofdpa_group_tbl_do(struct ofdpa_port
*ofdpa_port
, int flags
,
1143 struct ofdpa_group_tbl_entry
*entry
)
1145 if (flags
& OFDPA_OP_FLAG_REMOVE
)
1146 return ofdpa_group_tbl_del(ofdpa_port
, flags
, entry
);
1148 return ofdpa_group_tbl_add(ofdpa_port
, flags
, entry
);
1151 static int ofdpa_group_l2_interface(struct ofdpa_port
*ofdpa_port
,
1152 int flags
, __be16 vlan_id
,
1153 u32 out_pport
, int pop_vlan
)
1155 struct ofdpa_group_tbl_entry
*entry
;
1157 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1161 entry
->group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1162 entry
->l2_interface
.pop_vlan
= pop_vlan
;
1164 return ofdpa_group_tbl_do(ofdpa_port
, flags
, entry
);
1167 static int ofdpa_group_l2_fan_out(struct ofdpa_port
*ofdpa_port
,
1168 int flags
, u8 group_count
,
1169 const u32
*group_ids
, u32 group_id
)
1171 struct ofdpa_group_tbl_entry
*entry
;
1173 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1177 entry
->group_id
= group_id
;
1178 entry
->group_count
= group_count
;
1180 entry
->group_ids
= kcalloc(flags
, group_count
, sizeof(u32
));
1181 if (!entry
->group_ids
) {
1185 memcpy(entry
->group_ids
, group_ids
, group_count
* sizeof(u32
));
1187 return ofdpa_group_tbl_do(ofdpa_port
, flags
, entry
);
1190 static int ofdpa_group_l2_flood(struct ofdpa_port
*ofdpa_port
,
1191 int flags
, __be16 vlan_id
,
1192 u8 group_count
, const u32
*group_ids
,
1195 return ofdpa_group_l2_fan_out(ofdpa_port
, flags
,
1196 group_count
, group_ids
,
1200 static int ofdpa_group_l3_unicast(struct ofdpa_port
*ofdpa_port
, int flags
,
1201 u32 index
, const u8
*src_mac
, const u8
*dst_mac
,
1202 __be16 vlan_id
, bool ttl_check
, u32 pport
)
1204 struct ofdpa_group_tbl_entry
*entry
;
1206 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1210 entry
->group_id
= ROCKER_GROUP_L3_UNICAST(index
);
1212 ether_addr_copy(entry
->l3_unicast
.eth_src
, src_mac
);
1214 ether_addr_copy(entry
->l3_unicast
.eth_dst
, dst_mac
);
1215 entry
->l3_unicast
.vlan_id
= vlan_id
;
1216 entry
->l3_unicast
.ttl_check
= ttl_check
;
1217 entry
->l3_unicast
.group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, pport
);
1219 return ofdpa_group_tbl_do(ofdpa_port
, flags
, entry
);
1222 static struct ofdpa_neigh_tbl_entry
*
1223 ofdpa_neigh_tbl_find(const struct ofdpa
*ofdpa
, __be32 ip_addr
)
1225 struct ofdpa_neigh_tbl_entry
*found
;
1227 hash_for_each_possible(ofdpa
->neigh_tbl
, found
,
1228 entry
, be32_to_cpu(ip_addr
))
1229 if (found
->ip_addr
== ip_addr
)
1235 static void ofdpa_neigh_add(struct ofdpa
*ofdpa
,
1236 struct ofdpa_neigh_tbl_entry
*entry
)
1238 entry
->index
= ofdpa
->neigh_tbl_next_index
++;
1240 hash_add(ofdpa
->neigh_tbl
, &entry
->entry
,
1241 be32_to_cpu(entry
->ip_addr
));
1244 static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry
*entry
)
1246 if (--entry
->ref_count
== 0) {
1247 hash_del(&entry
->entry
);
1252 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry
*entry
,
1253 const u8
*eth_dst
, bool ttl_check
)
1256 ether_addr_copy(entry
->eth_dst
, eth_dst
);
1257 entry
->ttl_check
= ttl_check
;
1263 static int ofdpa_port_ipv4_neigh(struct ofdpa_port
*ofdpa_port
,
1264 int flags
, __be32 ip_addr
, const u8
*eth_dst
)
1266 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1267 struct ofdpa_neigh_tbl_entry
*entry
;
1268 struct ofdpa_neigh_tbl_entry
*found
;
1269 unsigned long lock_flags
;
1270 __be16 eth_type
= htons(ETH_P_IP
);
1271 enum rocker_of_dpa_table_id goto_tbl
=
1272 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1275 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1280 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1284 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1286 found
= ofdpa_neigh_tbl_find(ofdpa
, ip_addr
);
1288 updating
= found
&& adding
;
1289 removing
= found
&& !adding
;
1290 adding
= !found
&& adding
;
1293 entry
->ip_addr
= ip_addr
;
1294 entry
->dev
= ofdpa_port
->dev
;
1295 ether_addr_copy(entry
->eth_dst
, eth_dst
);
1296 entry
->ttl_check
= true;
1297 ofdpa_neigh_add(ofdpa
, entry
);
1298 } else if (removing
) {
1299 memcpy(entry
, found
, sizeof(*entry
));
1300 ofdpa_neigh_del(found
);
1301 } else if (updating
) {
1302 ofdpa_neigh_update(found
, eth_dst
, true);
1303 memcpy(entry
, found
, sizeof(*entry
));
1308 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1313 /* For each active neighbor, we have an L3 unicast group and
1314 * a /32 route to the neighbor, which uses the L3 unicast
1315 * group. The L3 unicast group can also be referred to by
1316 * other routes' nexthops.
1319 err
= ofdpa_group_l3_unicast(ofdpa_port
, flags
,
1321 ofdpa_port
->dev
->dev_addr
,
1323 ofdpa_port
->internal_vlan_id
,
1327 netdev_err(ofdpa_port
->dev
, "Error (%d) L3 unicast group index %d\n",
1332 if (adding
|| removing
) {
1333 group_id
= ROCKER_GROUP_L3_UNICAST(entry
->index
);
1334 err
= ofdpa_flow_tbl_ucast4_routing(ofdpa_port
,
1338 group_id
, NULL
, flags
);
1341 netdev_err(ofdpa_port
->dev
, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1342 err
, &entry
->ip_addr
, group_id
);
1352 static int ofdpa_port_ipv4_resolve(struct ofdpa_port
*ofdpa_port
,
1355 struct net_device
*dev
= ofdpa_port
->dev
;
1356 struct neighbour
*n
= __ipv4_neigh_lookup(dev
, (__force u32
)ip_addr
);
1360 n
= neigh_create(&arp_tbl
, &ip_addr
, dev
);
1365 /* If the neigh is already resolved, then go ahead and
1366 * install the entry, otherwise start the ARP process to
1367 * resolve the neigh.
1370 if (n
->nud_state
& NUD_VALID
)
1371 err
= ofdpa_port_ipv4_neigh(ofdpa_port
, 0,
1374 neigh_event_send(n
, NULL
);
1380 static int ofdpa_port_ipv4_nh(struct ofdpa_port
*ofdpa_port
,
1381 int flags
, __be32 ip_addr
, u32
*index
)
1383 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1384 struct ofdpa_neigh_tbl_entry
*entry
;
1385 struct ofdpa_neigh_tbl_entry
*found
;
1386 unsigned long lock_flags
;
1387 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1390 bool resolved
= true;
1393 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1397 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1399 found
= ofdpa_neigh_tbl_find(ofdpa
, ip_addr
);
1401 updating
= found
&& adding
;
1402 removing
= found
&& !adding
;
1403 adding
= !found
&& adding
;
1406 entry
->ip_addr
= ip_addr
;
1407 entry
->dev
= ofdpa_port
->dev
;
1408 ofdpa_neigh_add(ofdpa
, entry
);
1409 *index
= entry
->index
;
1411 } else if (removing
) {
1412 *index
= found
->index
;
1413 ofdpa_neigh_del(found
);
1414 } else if (updating
) {
1415 ofdpa_neigh_update(found
, NULL
, false);
1416 resolved
= !is_zero_ether_addr(found
->eth_dst
);
1417 *index
= found
->index
;
1422 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1430 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1433 err
= ofdpa_port_ipv4_resolve(ofdpa_port
, ip_addr
);
1438 static struct ofdpa_port
*ofdpa_port_get(const struct ofdpa
*ofdpa
,
1441 struct rocker_port
*rocker_port
;
1443 rocker_port
= ofdpa
->rocker
->ports
[port_index
];
1444 return rocker_port
? rocker_port
->wpriv
: NULL
;
1447 static int ofdpa_port_vlan_flood_group(struct ofdpa_port
*ofdpa_port
,
1448 int flags
, __be16 vlan_id
)
1450 struct ofdpa_port
*p
;
1451 const struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1452 unsigned int port_count
= ofdpa
->rocker
->port_count
;
1453 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
1459 group_ids
= kcalloc(flags
, port_count
, sizeof(u32
));
1463 /* Adjust the flood group for this VLAN. The flood group
1464 * references an L2 interface group for each port in this
1468 for (i
= 0; i
< port_count
; i
++) {
1469 p
= ofdpa_port_get(ofdpa
, i
);
1472 if (!ofdpa_port_is_bridged(p
))
1474 if (test_bit(ntohs(vlan_id
), p
->vlan_bitmap
)) {
1475 group_ids
[group_count
++] =
1476 ROCKER_GROUP_L2_INTERFACE(vlan_id
, p
->pport
);
1480 /* If there are no bridged ports in this VLAN, we're done */
1481 if (group_count
== 0)
1482 goto no_ports_in_vlan
;
1484 err
= ofdpa_group_l2_flood(ofdpa_port
, flags
, vlan_id
,
1485 group_count
, group_ids
, group_id
);
1487 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 flood group\n", err
);
1494 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port
*ofdpa_port
, int flags
,
1495 __be16 vlan_id
, bool pop_vlan
)
1497 const struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1498 unsigned int port_count
= ofdpa
->rocker
->port_count
;
1499 struct ofdpa_port
*p
;
1500 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1506 /* An L2 interface group for this port in this VLAN, but
1507 * only when port STP state is LEARNING|FORWARDING.
1510 if (ofdpa_port
->stp_state
== BR_STATE_LEARNING
||
1511 ofdpa_port
->stp_state
== BR_STATE_FORWARDING
) {
1512 out_pport
= ofdpa_port
->pport
;
1513 err
= ofdpa_group_l2_interface(ofdpa_port
, flags
,
1514 vlan_id
, out_pport
, pop_vlan
);
1516 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for pport %d\n",
1522 /* An L2 interface group for this VLAN to CPU port.
1523 * Add when first port joins this VLAN and destroy when
1524 * last port leaves this VLAN.
1527 for (i
= 0; i
< port_count
; i
++) {
1528 p
= ofdpa_port_get(ofdpa
, i
);
1529 if (p
&& test_bit(ntohs(vlan_id
), p
->vlan_bitmap
))
1533 if ((!adding
|| ref
!= 1) && (adding
|| ref
!= 0))
1537 err
= ofdpa_group_l2_interface(ofdpa_port
, flags
,
1538 vlan_id
, out_pport
, pop_vlan
);
1540 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for CPU port\n", err
);
1547 static struct ofdpa_ctrl
{
1549 const u8
*eth_dst_mask
;
1556 [OFDPA_CTRL_LINK_LOCAL_MCAST
] = {
1557 /* pass link local multicast pkts up to CPU for filtering */
1559 .eth_dst_mask
= ll_mask
,
1562 [OFDPA_CTRL_LOCAL_ARP
] = {
1563 /* pass local ARP pkts up to CPU */
1564 .eth_dst
= zero_mac
,
1565 .eth_dst_mask
= zero_mac
,
1566 .eth_type
= htons(ETH_P_ARP
),
1569 [OFDPA_CTRL_IPV4_MCAST
] = {
1570 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1571 .eth_dst
= ipv4_mcast
,
1572 .eth_dst_mask
= ipv4_mask
,
1573 .eth_type
= htons(ETH_P_IP
),
1575 .copy_to_cpu
= true,
1577 [OFDPA_CTRL_IPV6_MCAST
] = {
1578 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1579 .eth_dst
= ipv6_mcast
,
1580 .eth_dst_mask
= ipv6_mask
,
1581 .eth_type
= htons(ETH_P_IPV6
),
1583 .copy_to_cpu
= true,
1585 [OFDPA_CTRL_DFLT_BRIDGING
] = {
1586 /* flood any pkts on vlan */
1588 .copy_to_cpu
= true,
1590 [OFDPA_CTRL_DFLT_OVS
] = {
1591 /* pass all pkts up to CPU */
1592 .eth_dst
= zero_mac
,
1593 .eth_dst_mask
= zero_mac
,
1598 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port
*ofdpa_port
, int flags
,
1599 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1601 u32 in_pport
= ofdpa_port
->pport
;
1602 u32 in_pport_mask
= 0xffffffff;
1604 const u8
*eth_src
= NULL
;
1605 const u8
*eth_src_mask
= NULL
;
1606 __be16 vlan_id_mask
= htons(0xffff);
1608 u8 ip_proto_mask
= 0;
1611 u32 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1614 err
= ofdpa_flow_tbl_acl(ofdpa_port
, flags
,
1615 in_pport
, in_pport_mask
,
1616 eth_src
, eth_src_mask
,
1617 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
1619 vlan_id
, vlan_id_mask
,
1620 ip_proto
, ip_proto_mask
,
1621 ip_tos
, ip_tos_mask
,
1625 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl ACL\n", err
);
1630 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port
*ofdpa_port
,
1631 int flags
, const struct ofdpa_ctrl
*ctrl
,
1634 enum rocker_of_dpa_table_id goto_tbl
=
1635 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1636 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
1640 if (!ofdpa_port_is_bridged(ofdpa_port
))
1643 err
= ofdpa_flow_tbl_bridge(ofdpa_port
, flags
,
1644 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
1646 goto_tbl
, group_id
, ctrl
->copy_to_cpu
);
1649 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl FLOOD\n", err
);
1654 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port
*ofdpa_port
, int flags
,
1655 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1657 u32 in_pport_mask
= 0xffffffff;
1658 __be16 vlan_id_mask
= htons(0xffff);
1661 if (ntohs(vlan_id
) == 0)
1662 vlan_id
= ofdpa_port
->internal_vlan_id
;
1664 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, ofdpa_port
->pport
, in_pport_mask
,
1665 ctrl
->eth_type
, ctrl
->eth_dst
,
1666 ctrl
->eth_dst_mask
, vlan_id
,
1667 vlan_id_mask
, ctrl
->copy_to_cpu
,
1671 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl term\n", err
);
1676 static int ofdpa_port_ctrl_vlan(struct ofdpa_port
*ofdpa_port
, int flags
,
1677 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1680 return ofdpa_port_ctrl_vlan_acl(ofdpa_port
, flags
,
1683 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port
, flags
,
1687 return ofdpa_port_ctrl_vlan_term(ofdpa_port
, flags
,
1693 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port
*ofdpa_port
, int flags
,
1699 for (i
= 0; i
< OFDPA_CTRL_MAX
; i
++) {
1700 if (ofdpa_port
->ctrls
[i
]) {
1701 err
= ofdpa_port_ctrl_vlan(ofdpa_port
, flags
,
1702 &ofdpa_ctrls
[i
], vlan_id
);
1711 static int ofdpa_port_ctrl(struct ofdpa_port
*ofdpa_port
, int flags
,
1712 const struct ofdpa_ctrl
*ctrl
)
1717 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
1718 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
1720 err
= ofdpa_port_ctrl_vlan(ofdpa_port
, flags
,
1729 static int ofdpa_port_vlan(struct ofdpa_port
*ofdpa_port
, int flags
,
1732 enum rocker_of_dpa_table_id goto_tbl
=
1733 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
1734 u32 in_pport
= ofdpa_port
->pport
;
1735 __be16 vlan_id
= htons(vid
);
1736 __be16 vlan_id_mask
= htons(0xffff);
1737 __be16 internal_vlan_id
;
1739 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1742 internal_vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, &untagged
);
1745 test_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
))
1746 return 0; /* already added */
1748 !test_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
))
1749 return 0; /* already removed */
1751 change_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
);
1754 err
= ofdpa_port_ctrl_vlan_add(ofdpa_port
, flags
,
1757 netdev_err(ofdpa_port
->dev
, "Error (%d) port ctrl vlan add\n", err
);
1762 err
= ofdpa_port_vlan_l2_groups(ofdpa_port
, flags
,
1763 internal_vlan_id
, untagged
);
1765 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 groups\n", err
);
1766 goto err_vlan_l2_groups
;
1769 err
= ofdpa_port_vlan_flood_group(ofdpa_port
, flags
,
1772 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 flood group\n", err
);
1773 goto err_flood_group
;
1776 err
= ofdpa_flow_tbl_vlan(ofdpa_port
, flags
,
1777 in_pport
, vlan_id
, vlan_id_mask
,
1778 goto_tbl
, untagged
, internal_vlan_id
);
1780 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN table\n", err
);
1787 change_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
);
1791 static int ofdpa_port_ig_tbl(struct ofdpa_port
*ofdpa_port
, int flags
)
1793 enum rocker_of_dpa_table_id goto_tbl
;
1798 /* Normal Ethernet Frames. Matches pkts from any local physical
1799 * ports. Goto VLAN tbl.
1803 in_pport_mask
= 0xffff0000;
1804 goto_tbl
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
1806 err
= ofdpa_flow_tbl_ig_port(ofdpa_port
, flags
,
1807 in_pport
, in_pport_mask
,
1810 netdev_err(ofdpa_port
->dev
, "Error (%d) ingress port table entry\n", err
);
1815 struct ofdpa_fdb_learn_work
{
1816 struct work_struct work
;
1817 struct ofdpa_port
*ofdpa_port
;
1823 static void ofdpa_port_fdb_learn_work(struct work_struct
*work
)
1825 const struct ofdpa_fdb_learn_work
*lw
=
1826 container_of(work
, struct ofdpa_fdb_learn_work
, work
);
1827 bool removing
= (lw
->flags
& OFDPA_OP_FLAG_REMOVE
);
1828 bool learned
= (lw
->flags
& OFDPA_OP_FLAG_LEARNED
);
1829 struct switchdev_notifier_fdb_info info
;
1831 info
.addr
= lw
->addr
;
1835 if (learned
&& removing
)
1836 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE
,
1837 lw
->ofdpa_port
->dev
, &info
.info
);
1838 else if (learned
&& !removing
)
1839 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE
,
1840 lw
->ofdpa_port
->dev
, &info
.info
);
1846 static int ofdpa_port_fdb_learn(struct ofdpa_port
*ofdpa_port
,
1847 int flags
, const u8
*addr
, __be16 vlan_id
)
1849 struct ofdpa_fdb_learn_work
*lw
;
1850 enum rocker_of_dpa_table_id goto_tbl
=
1851 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1852 u32 out_pport
= ofdpa_port
->pport
;
1854 u32 group_id
= ROCKER_GROUP_NONE
;
1855 bool copy_to_cpu
= false;
1858 if (ofdpa_port_is_bridged(ofdpa_port
))
1859 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1861 if (!(flags
& OFDPA_OP_FLAG_REFRESH
)) {
1862 err
= ofdpa_flow_tbl_bridge(ofdpa_port
, flags
, addr
,
1863 NULL
, vlan_id
, tunnel_id
, goto_tbl
,
1864 group_id
, copy_to_cpu
);
1869 if (!ofdpa_port_is_bridged(ofdpa_port
))
1872 lw
= kzalloc(sizeof(*lw
), GFP_ATOMIC
);
1876 INIT_WORK(&lw
->work
, ofdpa_port_fdb_learn_work
);
1878 lw
->ofdpa_port
= ofdpa_port
;
1880 ether_addr_copy(lw
->addr
, addr
);
1881 lw
->vid
= ofdpa_port_vlan_to_vid(ofdpa_port
, vlan_id
);
1883 schedule_work(&lw
->work
);
1887 static struct ofdpa_fdb_tbl_entry
*
1888 ofdpa_fdb_tbl_find(const struct ofdpa
*ofdpa
,
1889 const struct ofdpa_fdb_tbl_entry
*match
)
1891 struct ofdpa_fdb_tbl_entry
*found
;
1893 hash_for_each_possible(ofdpa
->fdb_tbl
, found
, entry
, match
->key_crc32
)
1894 if (memcmp(&found
->key
, &match
->key
, sizeof(found
->key
)) == 0)
1900 static int ofdpa_port_fdb(struct ofdpa_port
*ofdpa_port
,
1901 const unsigned char *addr
,
1902 __be16 vlan_id
, int flags
)
1904 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1905 struct ofdpa_fdb_tbl_entry
*fdb
;
1906 struct ofdpa_fdb_tbl_entry
*found
;
1907 bool removing
= (flags
& OFDPA_OP_FLAG_REMOVE
);
1908 unsigned long lock_flags
;
1910 fdb
= kzalloc(sizeof(*fdb
), GFP_KERNEL
);
1914 fdb
->learned
= (flags
& OFDPA_OP_FLAG_LEARNED
);
1915 fdb
->touched
= jiffies
;
1916 fdb
->key
.ofdpa_port
= ofdpa_port
;
1917 ether_addr_copy(fdb
->key
.addr
, addr
);
1918 fdb
->key
.vlan_id
= vlan_id
;
1919 fdb
->key_crc32
= crc32(~0, &fdb
->key
, sizeof(fdb
->key
));
1921 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1923 found
= ofdpa_fdb_tbl_find(ofdpa
, fdb
);
1926 found
->touched
= jiffies
;
1929 hash_del(&found
->entry
);
1931 } else if (!removing
) {
1932 hash_add(ofdpa
->fdb_tbl
, &fdb
->entry
,
1936 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1938 /* Check if adding and already exists, or removing and can't find */
1939 if (!found
!= !removing
) {
1941 if (!found
&& removing
)
1943 /* Refreshing existing to update aging timers */
1944 flags
|= OFDPA_OP_FLAG_REFRESH
;
1947 return ofdpa_port_fdb_learn(ofdpa_port
, flags
, addr
, vlan_id
);
1950 static int ofdpa_port_fdb_flush(struct ofdpa_port
*ofdpa_port
, int flags
)
1952 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1953 struct ofdpa_fdb_tbl_entry
*found
;
1954 unsigned long lock_flags
;
1955 struct hlist_node
*tmp
;
1959 if (ofdpa_port
->stp_state
== BR_STATE_LEARNING
||
1960 ofdpa_port
->stp_state
== BR_STATE_FORWARDING
)
1963 flags
|= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_REMOVE
;
1965 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1967 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
1968 if (found
->key
.ofdpa_port
!= ofdpa_port
)
1970 if (!found
->learned
)
1972 err
= ofdpa_port_fdb_learn(ofdpa_port
, flags
,
1974 found
->key
.vlan_id
);
1977 hash_del(&found
->entry
);
1981 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
1986 static void ofdpa_fdb_cleanup(unsigned long data
)
1988 struct ofdpa
*ofdpa
= (struct ofdpa
*)data
;
1989 struct ofdpa_port
*ofdpa_port
;
1990 struct ofdpa_fdb_tbl_entry
*entry
;
1991 struct hlist_node
*tmp
;
1992 unsigned long next_timer
= jiffies
+ ofdpa
->ageing_time
;
1993 unsigned long expires
;
1994 unsigned long lock_flags
;
1995 int flags
= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_REMOVE
|
1996 OFDPA_OP_FLAG_LEARNED
;
1999 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2001 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, entry
, entry
) {
2002 if (!entry
->learned
)
2004 ofdpa_port
= entry
->key
.ofdpa_port
;
2005 expires
= entry
->touched
+ ofdpa_port
->ageing_time
;
2006 if (time_before_eq(expires
, jiffies
)) {
2007 ofdpa_port_fdb_learn(ofdpa_port
, flags
,
2009 entry
->key
.vlan_id
);
2010 hash_del(&entry
->entry
);
2011 } else if (time_before(expires
, next_timer
)) {
2012 next_timer
= expires
;
2016 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2018 mod_timer(&ofdpa
->fdb_cleanup_timer
, round_jiffies_up(next_timer
));
2021 static int ofdpa_port_router_mac(struct ofdpa_port
*ofdpa_port
,
2022 int flags
, __be16 vlan_id
)
2024 u32 in_pport_mask
= 0xffffffff;
2026 const u8
*dst_mac_mask
= ff_mac
;
2027 __be16 vlan_id_mask
= htons(0xffff);
2028 bool copy_to_cpu
= false;
2031 if (ntohs(vlan_id
) == 0)
2032 vlan_id
= ofdpa_port
->internal_vlan_id
;
2034 eth_type
= htons(ETH_P_IP
);
2035 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, ofdpa_port
->pport
,
2036 in_pport_mask
, eth_type
,
2037 ofdpa_port
->dev
->dev_addr
,
2038 dst_mac_mask
, vlan_id
, vlan_id_mask
,
2039 copy_to_cpu
, flags
);
2043 eth_type
= htons(ETH_P_IPV6
);
2044 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, ofdpa_port
->pport
,
2045 in_pport_mask
, eth_type
,
2046 ofdpa_port
->dev
->dev_addr
,
2047 dst_mac_mask
, vlan_id
, vlan_id_mask
,
2048 copy_to_cpu
, flags
);
2053 static int ofdpa_port_fwding(struct ofdpa_port
*ofdpa_port
, int flags
)
2061 /* Port will be forwarding-enabled if its STP state is LEARNING
2062 * or FORWARDING. Traffic from CPU can still egress, regardless of
2063 * port STP state. Use L2 interface group on port VLANs as a way
2064 * to toggle port forwarding: if forwarding is disabled, L2
2065 * interface group will not exist.
2068 if (ofdpa_port
->stp_state
!= BR_STATE_LEARNING
&&
2069 ofdpa_port
->stp_state
!= BR_STATE_FORWARDING
)
2070 flags
|= OFDPA_OP_FLAG_REMOVE
;
2072 out_pport
= ofdpa_port
->pport
;
2073 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
2074 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
2076 vlan_id
= htons(vid
);
2077 pop_vlan
= ofdpa_vlan_id_is_internal(vlan_id
);
2078 err
= ofdpa_group_l2_interface(ofdpa_port
, flags
,
2079 vlan_id
, out_pport
, pop_vlan
);
2081 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for pport %d\n",
2090 static int ofdpa_port_stp_update(struct ofdpa_port
*ofdpa_port
,
2091 int flags
, u8 state
)
2093 bool want
[OFDPA_CTRL_MAX
] = { 0, };
2094 bool prev_ctrls
[OFDPA_CTRL_MAX
];
2099 memcpy(prev_ctrls
, ofdpa_port
->ctrls
, sizeof(prev_ctrls
));
2100 prev_state
= ofdpa_port
->stp_state
;
2102 if (ofdpa_port
->stp_state
== state
)
2105 ofdpa_port
->stp_state
= state
;
2108 case BR_STATE_DISABLED
:
2109 /* port is completely disabled */
2111 case BR_STATE_LISTENING
:
2112 case BR_STATE_BLOCKING
:
2113 want
[OFDPA_CTRL_LINK_LOCAL_MCAST
] = true;
2115 case BR_STATE_LEARNING
:
2116 case BR_STATE_FORWARDING
:
2117 if (!ofdpa_port_is_ovsed(ofdpa_port
))
2118 want
[OFDPA_CTRL_LINK_LOCAL_MCAST
] = true;
2119 want
[OFDPA_CTRL_IPV4_MCAST
] = true;
2120 want
[OFDPA_CTRL_IPV6_MCAST
] = true;
2121 if (ofdpa_port_is_bridged(ofdpa_port
))
2122 want
[OFDPA_CTRL_DFLT_BRIDGING
] = true;
2123 else if (ofdpa_port_is_ovsed(ofdpa_port
))
2124 want
[OFDPA_CTRL_DFLT_OVS
] = true;
2126 want
[OFDPA_CTRL_LOCAL_ARP
] = true;
2130 for (i
= 0; i
< OFDPA_CTRL_MAX
; i
++) {
2131 if (want
[i
] != ofdpa_port
->ctrls
[i
]) {
2132 int ctrl_flags
= flags
|
2133 (want
[i
] ? 0 : OFDPA_OP_FLAG_REMOVE
);
2134 err
= ofdpa_port_ctrl(ofdpa_port
, ctrl_flags
,
2138 ofdpa_port
->ctrls
[i
] = want
[i
];
2142 err
= ofdpa_port_fdb_flush(ofdpa_port
, flags
);
2146 err
= ofdpa_port_fwding(ofdpa_port
, flags
);
2148 goto err_port_fwding
;
2155 memcpy(ofdpa_port
->ctrls
, prev_ctrls
, sizeof(prev_ctrls
));
2156 ofdpa_port
->stp_state
= prev_state
;
2160 static int ofdpa_port_fwd_enable(struct ofdpa_port
*ofdpa_port
, int flags
)
2162 if (ofdpa_port_is_bridged(ofdpa_port
))
2163 /* bridge STP will enable port */
2166 /* port is not bridged, so simulate going to FORWARDING state */
2167 return ofdpa_port_stp_update(ofdpa_port
, flags
,
2168 BR_STATE_FORWARDING
);
2171 static int ofdpa_port_fwd_disable(struct ofdpa_port
*ofdpa_port
, int flags
)
2173 if (ofdpa_port_is_bridged(ofdpa_port
))
2174 /* bridge STP will disable port */
2177 /* port is not bridged, so simulate going to DISABLED state */
2178 return ofdpa_port_stp_update(ofdpa_port
, flags
,
2182 static int ofdpa_port_vlan_add(struct ofdpa_port
*ofdpa_port
,
2187 /* XXX deal with flags for PVID and untagged */
2189 err
= ofdpa_port_vlan(ofdpa_port
, 0, vid
);
2193 err
= ofdpa_port_router_mac(ofdpa_port
, 0, htons(vid
));
2195 ofdpa_port_vlan(ofdpa_port
,
2196 OFDPA_OP_FLAG_REMOVE
, vid
);
2201 static int ofdpa_port_vlan_del(struct ofdpa_port
*ofdpa_port
,
2206 err
= ofdpa_port_router_mac(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
,
2211 return ofdpa_port_vlan(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
,
2215 static struct ofdpa_internal_vlan_tbl_entry
*
2216 ofdpa_internal_vlan_tbl_find(const struct ofdpa
*ofdpa
, int ifindex
)
2218 struct ofdpa_internal_vlan_tbl_entry
*found
;
2220 hash_for_each_possible(ofdpa
->internal_vlan_tbl
, found
,
2222 if (found
->ifindex
== ifindex
)
2229 static __be16
ofdpa_port_internal_vlan_id_get(struct ofdpa_port
*ofdpa_port
,
2232 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2233 struct ofdpa_internal_vlan_tbl_entry
*entry
;
2234 struct ofdpa_internal_vlan_tbl_entry
*found
;
2235 unsigned long lock_flags
;
2238 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2242 entry
->ifindex
= ifindex
;
2244 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2246 found
= ofdpa_internal_vlan_tbl_find(ofdpa
, ifindex
);
2253 hash_add(ofdpa
->internal_vlan_tbl
, &found
->entry
, found
->ifindex
);
2255 for (i
= 0; i
< OFDPA_N_INTERNAL_VLANS
; i
++) {
2256 if (test_and_set_bit(i
, ofdpa
->internal_vlan_bitmap
))
2258 found
->vlan_id
= htons(OFDPA_INTERNAL_VLAN_ID_BASE
+ i
);
2262 netdev_err(ofdpa_port
->dev
, "Out of internal VLAN IDs\n");
2266 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2268 return found
->vlan_id
;
2271 static int ofdpa_port_fib_ipv4(struct ofdpa_port
*ofdpa_port
, __be32 dst
,
2272 int dst_len
, struct fib_info
*fi
, u32 tb_id
,
2275 const struct fib_nh
*nh
;
2276 __be16 eth_type
= htons(ETH_P_IP
);
2277 __be32 dst_mask
= inet_make_mask(dst_len
);
2278 __be16 internal_vlan_id
= ofdpa_port
->internal_vlan_id
;
2279 u32 priority
= fi
->fib_priority
;
2280 enum rocker_of_dpa_table_id goto_tbl
=
2281 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2288 /* XXX support ECMP */
2291 nh_on_port
= (fi
->fib_dev
== ofdpa_port
->dev
);
2292 has_gw
= !!nh
->nh_gw
;
2294 if (has_gw
&& nh_on_port
) {
2295 err
= ofdpa_port_ipv4_nh(ofdpa_port
, flags
,
2300 group_id
= ROCKER_GROUP_L3_UNICAST(index
);
2302 /* Send to CPU for processing */
2303 group_id
= ROCKER_GROUP_L2_INTERFACE(internal_vlan_id
, 0);
2306 err
= ofdpa_flow_tbl_ucast4_routing(ofdpa_port
, eth_type
, dst
,
2307 dst_mask
, priority
, goto_tbl
,
2308 group_id
, fi
, flags
);
2310 netdev_err(ofdpa_port
->dev
, "Error (%d) IPv4 route %pI4\n",
2317 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port
*ofdpa_port
,
2320 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2321 struct ofdpa_internal_vlan_tbl_entry
*found
;
2322 unsigned long lock_flags
;
2325 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2327 found
= ofdpa_internal_vlan_tbl_find(ofdpa
, ifindex
);
2329 netdev_err(ofdpa_port
->dev
,
2330 "ifindex (%d) not found in internal VLAN tbl\n",
2335 if (--found
->ref_count
<= 0) {
2336 bit
= ntohs(found
->vlan_id
) - OFDPA_INTERNAL_VLAN_ID_BASE
;
2337 clear_bit(bit
, ofdpa
->internal_vlan_bitmap
);
2338 hash_del(&found
->entry
);
2343 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2346 /**********************************
2347 * Rocker world ops implementation
2348 **********************************/
2350 static int ofdpa_init(struct rocker
*rocker
)
2352 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2354 ofdpa
->rocker
= rocker
;
2356 hash_init(ofdpa
->flow_tbl
);
2357 spin_lock_init(&ofdpa
->flow_tbl_lock
);
2359 hash_init(ofdpa
->group_tbl
);
2360 spin_lock_init(&ofdpa
->group_tbl_lock
);
2362 hash_init(ofdpa
->fdb_tbl
);
2363 spin_lock_init(&ofdpa
->fdb_tbl_lock
);
2365 hash_init(ofdpa
->internal_vlan_tbl
);
2366 spin_lock_init(&ofdpa
->internal_vlan_tbl_lock
);
2368 hash_init(ofdpa
->neigh_tbl
);
2369 spin_lock_init(&ofdpa
->neigh_tbl_lock
);
2371 setup_timer(&ofdpa
->fdb_cleanup_timer
, ofdpa_fdb_cleanup
,
2372 (unsigned long) ofdpa
);
2373 mod_timer(&ofdpa
->fdb_cleanup_timer
, jiffies
);
2375 ofdpa
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
2380 static void ofdpa_fini(struct rocker
*rocker
)
2382 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2384 unsigned long flags
;
2385 struct ofdpa_flow_tbl_entry
*flow_entry
;
2386 struct ofdpa_group_tbl_entry
*group_entry
;
2387 struct ofdpa_fdb_tbl_entry
*fdb_entry
;
2388 struct ofdpa_internal_vlan_tbl_entry
*internal_vlan_entry
;
2389 struct ofdpa_neigh_tbl_entry
*neigh_entry
;
2390 struct hlist_node
*tmp
;
2393 del_timer_sync(&ofdpa
->fdb_cleanup_timer
);
2394 flush_workqueue(rocker
->rocker_owq
);
2396 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, flags
);
2397 hash_for_each_safe(ofdpa
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
)
2398 hash_del(&flow_entry
->entry
);
2399 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, flags
);
2401 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, flags
);
2402 hash_for_each_safe(ofdpa
->group_tbl
, bkt
, tmp
, group_entry
, entry
)
2403 hash_del(&group_entry
->entry
);
2404 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, flags
);
2406 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, flags
);
2407 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, fdb_entry
, entry
)
2408 hash_del(&fdb_entry
->entry
);
2409 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, flags
);
2411 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, flags
);
2412 hash_for_each_safe(ofdpa
->internal_vlan_tbl
, bkt
,
2413 tmp
, internal_vlan_entry
, entry
)
2414 hash_del(&internal_vlan_entry
->entry
);
2415 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, flags
);
2417 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, flags
);
2418 hash_for_each_safe(ofdpa
->neigh_tbl
, bkt
, tmp
, neigh_entry
, entry
)
2419 hash_del(&neigh_entry
->entry
);
2420 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, flags
);
2423 static int ofdpa_port_pre_init(struct rocker_port
*rocker_port
)
2425 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2427 ofdpa_port
->ofdpa
= rocker_port
->rocker
->wpriv
;
2428 ofdpa_port
->rocker_port
= rocker_port
;
2429 ofdpa_port
->dev
= rocker_port
->dev
;
2430 ofdpa_port
->pport
= rocker_port
->pport
;
2431 ofdpa_port
->brport_flags
= BR_LEARNING
;
2432 ofdpa_port
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
2436 static int ofdpa_port_init(struct rocker_port
*rocker_port
)
2438 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2441 rocker_port_set_learning(rocker_port
,
2442 !!(ofdpa_port
->brport_flags
& BR_LEARNING
));
2444 err
= ofdpa_port_ig_tbl(ofdpa_port
, 0);
2446 netdev_err(ofdpa_port
->dev
, "install ig port table failed\n");
2450 ofdpa_port
->internal_vlan_id
=
2451 ofdpa_port_internal_vlan_id_get(ofdpa_port
,
2452 ofdpa_port
->dev
->ifindex
);
2454 err
= ofdpa_port_vlan_add(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2456 netdev_err(ofdpa_port
->dev
, "install untagged VLAN failed\n");
2457 goto err_untagged_vlan
;
2462 ofdpa_port_ig_tbl(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
);
2466 static void ofdpa_port_fini(struct rocker_port
*rocker_port
)
2468 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2470 ofdpa_port_ig_tbl(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
);
2473 static int ofdpa_port_open(struct rocker_port
*rocker_port
)
2475 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2477 return ofdpa_port_fwd_enable(ofdpa_port
, 0);
2480 static void ofdpa_port_stop(struct rocker_port
*rocker_port
)
2482 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2484 ofdpa_port_fwd_disable(ofdpa_port
, OFDPA_OP_FLAG_NOWAIT
);
2487 static int ofdpa_port_attr_stp_state_set(struct rocker_port
*rocker_port
,
2490 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2492 return ofdpa_port_stp_update(ofdpa_port
, 0, state
);
2495 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port
*rocker_port
,
2496 unsigned long brport_flags
,
2497 struct switchdev_trans
*trans
)
2499 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2500 unsigned long orig_flags
;
2503 orig_flags
= ofdpa_port
->brport_flags
;
2504 ofdpa_port
->brport_flags
= brport_flags
;
2505 if ((orig_flags
^ ofdpa_port
->brport_flags
) & BR_LEARNING
&&
2506 !switchdev_trans_ph_prepare(trans
))
2507 err
= rocker_port_set_learning(ofdpa_port
->rocker_port
,
2508 !!(ofdpa_port
->brport_flags
& BR_LEARNING
));
2510 if (switchdev_trans_ph_prepare(trans
))
2511 ofdpa_port
->brport_flags
= orig_flags
;
2517 ofdpa_port_attr_bridge_flags_get(const struct rocker_port
*rocker_port
,
2518 unsigned long *p_brport_flags
)
2520 const struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2522 *p_brport_flags
= ofdpa_port
->brport_flags
;
2527 ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port
*
2530 p_brport_flags_support
)
2532 *p_brport_flags_support
= BR_LEARNING
;
2537 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port
*rocker_port
,
2539 struct switchdev_trans
*trans
)
2541 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2542 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2544 if (!switchdev_trans_ph_prepare(trans
)) {
2545 ofdpa_port
->ageing_time
= clock_t_to_jiffies(ageing_time
);
2546 if (ofdpa_port
->ageing_time
< ofdpa
->ageing_time
)
2547 ofdpa
->ageing_time
= ofdpa_port
->ageing_time
;
2548 mod_timer(&ofdpa_port
->ofdpa
->fdb_cleanup_timer
, jiffies
);
2554 static int ofdpa_port_obj_vlan_add(struct rocker_port
*rocker_port
,
2555 const struct switchdev_obj_port_vlan
*vlan
)
2557 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2561 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
2562 err
= ofdpa_port_vlan_add(ofdpa_port
, vid
, vlan
->flags
);
2570 static int ofdpa_port_obj_vlan_del(struct rocker_port
*rocker_port
,
2571 const struct switchdev_obj_port_vlan
*vlan
)
2573 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2577 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
2578 err
= ofdpa_port_vlan_del(ofdpa_port
, vid
, vlan
->flags
);
2586 static int ofdpa_port_obj_fdb_add(struct rocker_port
*rocker_port
,
2587 u16 vid
, const unsigned char *addr
)
2589 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2590 __be16 vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, NULL
);
2592 if (!ofdpa_port_is_bridged(ofdpa_port
))
2595 return ofdpa_port_fdb(ofdpa_port
, addr
, vlan_id
, 0);
2598 static int ofdpa_port_obj_fdb_del(struct rocker_port
*rocker_port
,
2599 u16 vid
, const unsigned char *addr
)
2601 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2602 __be16 vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, NULL
);
2603 int flags
= OFDPA_OP_FLAG_REMOVE
;
2605 if (!ofdpa_port_is_bridged(ofdpa_port
))
2608 return ofdpa_port_fdb(ofdpa_port
, addr
, vlan_id
, flags
);
2611 static int ofdpa_port_bridge_join(struct ofdpa_port
*ofdpa_port
,
2612 struct net_device
*bridge
)
2616 /* Port is joining bridge, so the internal VLAN for the
2617 * port is going to change to the bridge internal VLAN.
2618 * Let's remove untagged VLAN (vid=0) from port and
2619 * re-add once internal VLAN has changed.
2622 err
= ofdpa_port_vlan_del(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2626 ofdpa_port_internal_vlan_id_put(ofdpa_port
,
2627 ofdpa_port
->dev
->ifindex
);
2628 ofdpa_port
->internal_vlan_id
=
2629 ofdpa_port_internal_vlan_id_get(ofdpa_port
, bridge
->ifindex
);
2631 ofdpa_port
->bridge_dev
= bridge
;
2633 return ofdpa_port_vlan_add(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2636 static int ofdpa_port_bridge_leave(struct ofdpa_port
*ofdpa_port
)
2640 err
= ofdpa_port_vlan_del(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2644 ofdpa_port_internal_vlan_id_put(ofdpa_port
,
2645 ofdpa_port
->bridge_dev
->ifindex
);
2646 ofdpa_port
->internal_vlan_id
=
2647 ofdpa_port_internal_vlan_id_get(ofdpa_port
,
2648 ofdpa_port
->dev
->ifindex
);
2650 ofdpa_port
->bridge_dev
= NULL
;
2652 err
= ofdpa_port_vlan_add(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2656 if (ofdpa_port
->dev
->flags
& IFF_UP
)
2657 err
= ofdpa_port_fwd_enable(ofdpa_port
, 0);
2662 static int ofdpa_port_ovs_changed(struct ofdpa_port
*ofdpa_port
,
2663 struct net_device
*master
)
2667 ofdpa_port
->bridge_dev
= master
;
2669 err
= ofdpa_port_fwd_disable(ofdpa_port
, 0);
2672 err
= ofdpa_port_fwd_enable(ofdpa_port
, 0);
2677 static int ofdpa_port_master_linked(struct rocker_port
*rocker_port
,
2678 struct net_device
*master
)
2680 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2683 if (netif_is_bridge_master(master
))
2684 err
= ofdpa_port_bridge_join(ofdpa_port
, master
);
2685 else if (netif_is_ovs_master(master
))
2686 err
= ofdpa_port_ovs_changed(ofdpa_port
, master
);
2690 static int ofdpa_port_master_unlinked(struct rocker_port
*rocker_port
,
2691 struct net_device
*master
)
2693 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2696 if (ofdpa_port_is_bridged(ofdpa_port
))
2697 err
= ofdpa_port_bridge_leave(ofdpa_port
);
2698 else if (ofdpa_port_is_ovsed(ofdpa_port
))
2699 err
= ofdpa_port_ovs_changed(ofdpa_port
, NULL
);
2703 static int ofdpa_port_neigh_update(struct rocker_port
*rocker_port
,
2704 struct neighbour
*n
)
2706 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2707 int flags
= (n
->nud_state
& NUD_VALID
? 0 : OFDPA_OP_FLAG_REMOVE
) |
2708 OFDPA_OP_FLAG_NOWAIT
;
2709 __be32 ip_addr
= *(__be32
*) n
->primary_key
;
2711 return ofdpa_port_ipv4_neigh(ofdpa_port
, flags
, ip_addr
, n
->ha
);
2714 static int ofdpa_port_neigh_destroy(struct rocker_port
*rocker_port
,
2715 struct neighbour
*n
)
2717 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2718 int flags
= OFDPA_OP_FLAG_REMOVE
| OFDPA_OP_FLAG_NOWAIT
;
2719 __be32 ip_addr
= *(__be32
*) n
->primary_key
;
2721 return ofdpa_port_ipv4_neigh(ofdpa_port
, flags
, ip_addr
, n
->ha
);
2724 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port
*rocker_port
,
2725 const unsigned char *addr
,
2728 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2729 int flags
= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_LEARNED
;
2731 if (ofdpa_port
->stp_state
!= BR_STATE_LEARNING
&&
2732 ofdpa_port
->stp_state
!= BR_STATE_FORWARDING
)
2735 return ofdpa_port_fdb(ofdpa_port
, addr
, vlan_id
, flags
);
2738 static struct ofdpa_port
*ofdpa_port_dev_lower_find(struct net_device
*dev
,
2739 struct rocker
*rocker
)
2741 struct rocker_port
*rocker_port
;
2743 rocker_port
= rocker_port_dev_lower_find(dev
, rocker
);
2744 return rocker_port
? rocker_port
->wpriv
: NULL
;
2747 static int ofdpa_fib4_add(struct rocker
*rocker
,
2748 const struct fib_entry_notifier_info
*fen_info
)
2750 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2751 struct ofdpa_port
*ofdpa_port
;
2754 if (ofdpa
->fib_aborted
)
2756 ofdpa_port
= ofdpa_port_dev_lower_find(fen_info
->fi
->fib_dev
, rocker
);
2759 err
= ofdpa_port_fib_ipv4(ofdpa_port
, htonl(fen_info
->dst
),
2760 fen_info
->dst_len
, fen_info
->fi
,
2761 fen_info
->tb_id
, 0);
2764 fib_info_offload_inc(fen_info
->fi
);
2768 static int ofdpa_fib4_del(struct rocker
*rocker
,
2769 const struct fib_entry_notifier_info
*fen_info
)
2771 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2772 struct ofdpa_port
*ofdpa_port
;
2774 if (ofdpa
->fib_aborted
)
2776 ofdpa_port
= ofdpa_port_dev_lower_find(fen_info
->fi
->fib_dev
, rocker
);
2779 fib_info_offload_dec(fen_info
->fi
);
2780 return ofdpa_port_fib_ipv4(ofdpa_port
, htonl(fen_info
->dst
),
2781 fen_info
->dst_len
, fen_info
->fi
,
2782 fen_info
->tb_id
, OFDPA_OP_FLAG_REMOVE
);
2785 static void ofdpa_fib4_abort(struct rocker
*rocker
)
2787 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2788 struct ofdpa_port
*ofdpa_port
;
2789 struct ofdpa_flow_tbl_entry
*flow_entry
;
2790 struct hlist_node
*tmp
;
2791 unsigned long flags
;
2794 if (ofdpa
->fib_aborted
)
2797 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, flags
);
2798 hash_for_each_safe(ofdpa
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
) {
2799 if (flow_entry
->key
.tbl_id
!=
2800 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
)
2802 ofdpa_port
= ofdpa_port_dev_lower_find(flow_entry
->fi
->fib_dev
,
2806 fib_info_offload_dec(flow_entry
->fi
);
2807 ofdpa_flow_tbl_del(ofdpa_port
, OFDPA_OP_FLAG_REMOVE
,
2810 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, flags
);
2811 ofdpa
->fib_aborted
= true;
2814 struct rocker_world_ops rocker_ofdpa_ops
= {
2816 .priv_size
= sizeof(struct ofdpa
),
2817 .port_priv_size
= sizeof(struct ofdpa_port
),
2818 .mode
= ROCKER_PORT_MODE_OF_DPA
,
2821 .port_pre_init
= ofdpa_port_pre_init
,
2822 .port_init
= ofdpa_port_init
,
2823 .port_fini
= ofdpa_port_fini
,
2824 .port_open
= ofdpa_port_open
,
2825 .port_stop
= ofdpa_port_stop
,
2826 .port_attr_stp_state_set
= ofdpa_port_attr_stp_state_set
,
2827 .port_attr_bridge_flags_set
= ofdpa_port_attr_bridge_flags_set
,
2828 .port_attr_bridge_flags_get
= ofdpa_port_attr_bridge_flags_get
,
2829 .port_attr_bridge_flags_support_get
= ofdpa_port_attr_bridge_flags_support_get
,
2830 .port_attr_bridge_ageing_time_set
= ofdpa_port_attr_bridge_ageing_time_set
,
2831 .port_obj_vlan_add
= ofdpa_port_obj_vlan_add
,
2832 .port_obj_vlan_del
= ofdpa_port_obj_vlan_del
,
2833 .port_obj_fdb_add
= ofdpa_port_obj_fdb_add
,
2834 .port_obj_fdb_del
= ofdpa_port_obj_fdb_del
,
2835 .port_master_linked
= ofdpa_port_master_linked
,
2836 .port_master_unlinked
= ofdpa_port_master_unlinked
,
2837 .port_neigh_update
= ofdpa_port_neigh_update
,
2838 .port_neigh_destroy
= ofdpa_port_neigh_destroy
,
2839 .port_ev_mac_vlan_seen
= ofdpa_port_ev_mac_vlan_seen
,
2840 .fib4_add
= ofdpa_fib4_add
,
2841 .fib4_del
= ofdpa_fib4_del
,
2842 .fib4_abort
= ofdpa_fib4_abort
,