2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/hashtable.h>
17 #include <linux/crc32.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_vlan.h>
21 #include <linux/if_bridge.h>
22 #include <net/neighbour.h>
23 #include <net/switchdev.h>
24 #include <net/ip_fib.h>
28 #include "rocker_tlv.h"
30 struct ofdpa_flow_tbl_key
{
32 enum rocker_of_dpa_table_id tbl_id
;
37 enum rocker_of_dpa_table_id goto_tbl
;
43 enum rocker_of_dpa_table_id goto_tbl
;
52 u8 eth_dst_mask
[ETH_ALEN
];
55 enum rocker_of_dpa_table_id goto_tbl
;
62 enum rocker_of_dpa_table_id goto_tbl
;
67 u8 eth_dst_mask
[ETH_ALEN
];
72 enum rocker_of_dpa_table_id goto_tbl
;
80 u8 eth_src_mask
[ETH_ALEN
];
82 u8 eth_dst_mask
[ETH_ALEN
];
95 struct ofdpa_flow_tbl_entry
{
96 struct hlist_node entry
;
99 struct ofdpa_flow_tbl_key key
;
101 u32 key_crc32
; /* key */
104 struct ofdpa_group_tbl_entry
{
105 struct hlist_node entry
;
107 u32 group_id
; /* key */
115 u8 eth_src
[ETH_ALEN
];
116 u8 eth_dst
[ETH_ALEN
];
121 u8 eth_src
[ETH_ALEN
];
122 u8 eth_dst
[ETH_ALEN
];
130 struct ofdpa_fdb_tbl_entry
{
131 struct hlist_node entry
;
132 u32 key_crc32
; /* key */
134 unsigned long touched
;
135 struct ofdpa_fdb_tbl_key
{
136 struct ofdpa_port
*ofdpa_port
;
142 struct ofdpa_internal_vlan_tbl_entry
{
143 struct hlist_node entry
;
144 int ifindex
; /* key */
149 struct ofdpa_neigh_tbl_entry
{
150 struct hlist_node entry
;
151 __be32 ip_addr
; /* key */
152 struct net_device
*dev
;
155 u8 eth_dst
[ETH_ALEN
];
160 OFDPA_CTRL_LINK_LOCAL_MCAST
,
161 OFDPA_CTRL_LOCAL_ARP
,
162 OFDPA_CTRL_IPV4_MCAST
,
163 OFDPA_CTRL_IPV6_MCAST
,
164 OFDPA_CTRL_DFLT_BRIDGING
,
169 #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
170 #define OFDPA_N_INTERNAL_VLANS 255
171 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
172 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
173 #define OFDPA_UNTAGGED_VID 0
176 struct rocker
*rocker
;
177 DECLARE_HASHTABLE(flow_tbl
, 16);
178 spinlock_t flow_tbl_lock
; /* for flow tbl accesses */
179 u64 flow_tbl_next_cookie
;
180 DECLARE_HASHTABLE(group_tbl
, 16);
181 spinlock_t group_tbl_lock
; /* for group tbl accesses */
182 struct timer_list fdb_cleanup_timer
;
183 DECLARE_HASHTABLE(fdb_tbl
, 16);
184 spinlock_t fdb_tbl_lock
; /* for fdb tbl accesses */
185 unsigned long internal_vlan_bitmap
[OFDPA_INTERNAL_VLAN_BITMAP_LEN
];
186 DECLARE_HASHTABLE(internal_vlan_tbl
, 8);
187 spinlock_t internal_vlan_tbl_lock
; /* for vlan tbl accesses */
188 DECLARE_HASHTABLE(neigh_tbl
, 16);
189 spinlock_t neigh_tbl_lock
; /* for neigh tbl accesses */
190 u32 neigh_tbl_next_index
;
191 unsigned long ageing_time
;
196 struct rocker_port
*rocker_port
;
197 struct net_device
*dev
;
199 struct net_device
*bridge_dev
;
200 __be16 internal_vlan_id
;
203 unsigned long ageing_time
;
204 bool ctrls
[OFDPA_CTRL_MAX
];
205 unsigned long vlan_bitmap
[OFDPA_VLAN_BITMAP_LEN
];
208 static const u8 zero_mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
209 static const u8 ff_mac
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
210 static const u8 ll_mac
[ETH_ALEN
] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
211 static const u8 ll_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
212 static const u8 mcast_mac
[ETH_ALEN
] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
213 static const u8 ipv4_mcast
[ETH_ALEN
] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
214 static const u8 ipv4_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
215 static const u8 ipv6_mcast
[ETH_ALEN
] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
216 static const u8 ipv6_mask
[ETH_ALEN
] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
218 /* Rocker priority levels for flow table entries. Higher
219 * priority match takes precedence over lower priority match.
223 OFDPA_PRIORITY_UNKNOWN
= 0,
224 OFDPA_PRIORITY_IG_PORT
= 1,
225 OFDPA_PRIORITY_VLAN
= 1,
226 OFDPA_PRIORITY_TERM_MAC_UCAST
= 0,
227 OFDPA_PRIORITY_TERM_MAC_MCAST
= 1,
228 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
= 1,
229 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD
= 2,
230 OFDPA_PRIORITY_BRIDGING_VLAN
= 3,
231 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
= 1,
232 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD
= 2,
233 OFDPA_PRIORITY_BRIDGING_TENANT
= 3,
234 OFDPA_PRIORITY_ACL_CTRL
= 3,
235 OFDPA_PRIORITY_ACL_NORMAL
= 2,
236 OFDPA_PRIORITY_ACL_DFLT
= 1,
239 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id
)
241 u16 start
= OFDPA_INTERNAL_VLAN_ID_BASE
;
243 u16 _vlan_id
= ntohs(vlan_id
);
245 return (_vlan_id
>= start
&& _vlan_id
<= end
);
248 static __be16
ofdpa_port_vid_to_vlan(const struct ofdpa_port
*ofdpa_port
,
249 u16 vid
, bool *pop_vlan
)
255 vlan_id
= htons(vid
);
257 vlan_id
= ofdpa_port
->internal_vlan_id
;
265 static u16
ofdpa_port_vlan_to_vid(const struct ofdpa_port
*ofdpa_port
,
268 if (ofdpa_vlan_id_is_internal(vlan_id
))
271 return ntohs(vlan_id
);
274 static bool ofdpa_port_is_slave(const struct ofdpa_port
*ofdpa_port
,
277 return ofdpa_port
->bridge_dev
&&
278 !strcmp(ofdpa_port
->bridge_dev
->rtnl_link_ops
->kind
, kind
);
281 static bool ofdpa_port_is_bridged(const struct ofdpa_port
*ofdpa_port
)
283 return ofdpa_port_is_slave(ofdpa_port
, "bridge");
286 static bool ofdpa_port_is_ovsed(const struct ofdpa_port
*ofdpa_port
)
288 return ofdpa_port_is_slave(ofdpa_port
, "openvswitch");
291 #define OFDPA_OP_FLAG_REMOVE BIT(0)
292 #define OFDPA_OP_FLAG_NOWAIT BIT(1)
293 #define OFDPA_OP_FLAG_LEARNED BIT(2)
294 #define OFDPA_OP_FLAG_REFRESH BIT(3)
296 static bool ofdpa_flags_nowait(int flags
)
298 return flags
& OFDPA_OP_FLAG_NOWAIT
;
301 static void *__ofdpa_mem_alloc(struct switchdev_trans
*trans
, int flags
,
304 struct switchdev_trans_item
*elem
= NULL
;
305 gfp_t gfp_flags
= (flags
& OFDPA_OP_FLAG_NOWAIT
) ?
306 GFP_ATOMIC
: GFP_KERNEL
;
308 /* If in transaction prepare phase, allocate the memory
309 * and enqueue it on a transaction. If in transaction
310 * commit phase, dequeue the memory from the transaction
311 * rather than re-allocating the memory. The idea is the
312 * driver code paths for prepare and commit are identical
313 * so the memory allocated in the prepare phase is the
314 * memory used in the commit phase.
318 elem
= kzalloc(size
+ sizeof(*elem
), gfp_flags
);
319 } else if (switchdev_trans_ph_prepare(trans
)) {
320 elem
= kzalloc(size
+ sizeof(*elem
), gfp_flags
);
323 switchdev_trans_item_enqueue(trans
, elem
, kfree
, elem
);
325 elem
= switchdev_trans_item_dequeue(trans
);
328 return elem
? elem
+ 1 : NULL
;
331 static void *ofdpa_kzalloc(struct switchdev_trans
*trans
, int flags
,
334 return __ofdpa_mem_alloc(trans
, flags
, size
);
337 static void *ofdpa_kcalloc(struct switchdev_trans
*trans
, int flags
,
338 size_t n
, size_t size
)
340 return __ofdpa_mem_alloc(trans
, flags
, n
* size
);
343 static void ofdpa_kfree(struct switchdev_trans
*trans
, const void *mem
)
345 struct switchdev_trans_item
*elem
;
347 /* Frees are ignored if in transaction prepare phase. The
348 * memory remains on the per-port list until freed in the
352 if (switchdev_trans_ph_prepare(trans
))
355 elem
= (struct switchdev_trans_item
*) mem
- 1;
359 /*************************************************************
360 * Flow, group, FDB, internal VLAN and neigh command prepares
361 *************************************************************/
364 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info
*desc_info
,
365 const struct ofdpa_flow_tbl_entry
*entry
)
367 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
368 entry
->key
.ig_port
.in_pport
))
370 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
371 entry
->key
.ig_port
.in_pport_mask
))
373 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
374 entry
->key
.ig_port
.goto_tbl
))
381 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info
*desc_info
,
382 const struct ofdpa_flow_tbl_entry
*entry
)
384 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
385 entry
->key
.vlan
.in_pport
))
387 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
388 entry
->key
.vlan
.vlan_id
))
390 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
391 entry
->key
.vlan
.vlan_id_mask
))
393 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
394 entry
->key
.vlan
.goto_tbl
))
396 if (entry
->key
.vlan
.untagged
&&
397 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_NEW_VLAN_ID
,
398 entry
->key
.vlan
.new_vlan_id
))
405 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info
*desc_info
,
406 const struct ofdpa_flow_tbl_entry
*entry
)
408 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
409 entry
->key
.term_mac
.in_pport
))
411 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
412 entry
->key
.term_mac
.in_pport_mask
))
414 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
415 entry
->key
.term_mac
.eth_type
))
417 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
418 ETH_ALEN
, entry
->key
.term_mac
.eth_dst
))
420 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
421 ETH_ALEN
, entry
->key
.term_mac
.eth_dst_mask
))
423 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
424 entry
->key
.term_mac
.vlan_id
))
426 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
427 entry
->key
.term_mac
.vlan_id_mask
))
429 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
430 entry
->key
.term_mac
.goto_tbl
))
432 if (entry
->key
.term_mac
.copy_to_cpu
&&
433 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
434 entry
->key
.term_mac
.copy_to_cpu
))
441 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info
*desc_info
,
442 const struct ofdpa_flow_tbl_entry
*entry
)
444 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
445 entry
->key
.ucast_routing
.eth_type
))
447 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP
,
448 entry
->key
.ucast_routing
.dst4
))
450 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP_MASK
,
451 entry
->key
.ucast_routing
.dst4_mask
))
453 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
454 entry
->key
.ucast_routing
.goto_tbl
))
456 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
457 entry
->key
.ucast_routing
.group_id
))
464 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info
*desc_info
,
465 const struct ofdpa_flow_tbl_entry
*entry
)
467 if (entry
->key
.bridge
.has_eth_dst
&&
468 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
469 ETH_ALEN
, entry
->key
.bridge
.eth_dst
))
471 if (entry
->key
.bridge
.has_eth_dst_mask
&&
472 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
473 ETH_ALEN
, entry
->key
.bridge
.eth_dst_mask
))
475 if (entry
->key
.bridge
.vlan_id
&&
476 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
477 entry
->key
.bridge
.vlan_id
))
479 if (entry
->key
.bridge
.tunnel_id
&&
480 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_TUNNEL_ID
,
481 entry
->key
.bridge
.tunnel_id
))
483 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
484 entry
->key
.bridge
.goto_tbl
))
486 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
487 entry
->key
.bridge
.group_id
))
489 if (entry
->key
.bridge
.copy_to_cpu
&&
490 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
491 entry
->key
.bridge
.copy_to_cpu
))
498 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info
*desc_info
,
499 const struct ofdpa_flow_tbl_entry
*entry
)
501 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
502 entry
->key
.acl
.in_pport
))
504 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
505 entry
->key
.acl
.in_pport_mask
))
507 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
508 ETH_ALEN
, entry
->key
.acl
.eth_src
))
510 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC_MASK
,
511 ETH_ALEN
, entry
->key
.acl
.eth_src_mask
))
513 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
514 ETH_ALEN
, entry
->key
.acl
.eth_dst
))
516 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
517 ETH_ALEN
, entry
->key
.acl
.eth_dst_mask
))
519 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
520 entry
->key
.acl
.eth_type
))
522 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
523 entry
->key
.acl
.vlan_id
))
525 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
526 entry
->key
.acl
.vlan_id_mask
))
529 switch (ntohs(entry
->key
.acl
.eth_type
)) {
532 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_PROTO
,
533 entry
->key
.acl
.ip_proto
))
535 if (rocker_tlv_put_u8(desc_info
,
536 ROCKER_TLV_OF_DPA_IP_PROTO_MASK
,
537 entry
->key
.acl
.ip_proto_mask
))
539 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_DSCP
,
540 entry
->key
.acl
.ip_tos
& 0x3f))
542 if (rocker_tlv_put_u8(desc_info
,
543 ROCKER_TLV_OF_DPA_IP_DSCP_MASK
,
544 entry
->key
.acl
.ip_tos_mask
& 0x3f))
546 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_ECN
,
547 (entry
->key
.acl
.ip_tos
& 0xc0) >> 6))
549 if (rocker_tlv_put_u8(desc_info
,
550 ROCKER_TLV_OF_DPA_IP_ECN_MASK
,
551 (entry
->key
.acl
.ip_tos_mask
& 0xc0) >> 6))
556 if (entry
->key
.acl
.group_id
!= ROCKER_GROUP_NONE
&&
557 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
558 entry
->key
.acl
.group_id
))
564 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port
*rocker_port
,
565 struct rocker_desc_info
*desc_info
,
568 const struct ofdpa_flow_tbl_entry
*entry
= priv
;
569 struct rocker_tlv
*cmd_info
;
572 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
574 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
577 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_TABLE_ID
,
580 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_PRIORITY
,
581 entry
->key
.priority
))
583 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_HARDTIME
, 0))
585 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
589 switch (entry
->key
.tbl_id
) {
590 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
:
591 err
= ofdpa_cmd_flow_tbl_add_ig_port(desc_info
, entry
);
593 case ROCKER_OF_DPA_TABLE_ID_VLAN
:
594 err
= ofdpa_cmd_flow_tbl_add_vlan(desc_info
, entry
);
596 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
:
597 err
= ofdpa_cmd_flow_tbl_add_term_mac(desc_info
, entry
);
599 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
:
600 err
= ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info
, entry
);
602 case ROCKER_OF_DPA_TABLE_ID_BRIDGING
:
603 err
= ofdpa_cmd_flow_tbl_add_bridge(desc_info
, entry
);
605 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
:
606 err
= ofdpa_cmd_flow_tbl_add_acl(desc_info
, entry
);
616 rocker_tlv_nest_end(desc_info
, cmd_info
);
621 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port
*rocker_port
,
622 struct rocker_desc_info
*desc_info
,
625 const struct ofdpa_flow_tbl_entry
*entry
= priv
;
626 struct rocker_tlv
*cmd_info
;
628 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
630 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
633 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
636 rocker_tlv_nest_end(desc_info
, cmd_info
);
642 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info
*desc_info
,
643 struct ofdpa_group_tbl_entry
*entry
)
645 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_OUT_PPORT
,
646 ROCKER_GROUP_PORT_GET(entry
->group_id
)))
648 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_POP_VLAN
,
649 entry
->l2_interface
.pop_vlan
))
656 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info
*desc_info
,
657 const struct ofdpa_group_tbl_entry
*entry
)
659 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
660 entry
->l2_rewrite
.group_id
))
662 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_src
) &&
663 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
664 ETH_ALEN
, entry
->l2_rewrite
.eth_src
))
666 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_dst
) &&
667 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
668 ETH_ALEN
, entry
->l2_rewrite
.eth_dst
))
670 if (entry
->l2_rewrite
.vlan_id
&&
671 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
672 entry
->l2_rewrite
.vlan_id
))
679 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info
*desc_info
,
680 const struct ofdpa_group_tbl_entry
*entry
)
683 struct rocker_tlv
*group_ids
;
685 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GROUP_COUNT
,
689 group_ids
= rocker_tlv_nest_start(desc_info
,
690 ROCKER_TLV_OF_DPA_GROUP_IDS
);
694 for (i
= 0; i
< entry
->group_count
; i
++)
695 /* Note TLV array is 1-based */
696 if (rocker_tlv_put_u32(desc_info
, i
+ 1, entry
->group_ids
[i
]))
699 rocker_tlv_nest_end(desc_info
, group_ids
);
705 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info
*desc_info
,
706 const struct ofdpa_group_tbl_entry
*entry
)
708 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_src
) &&
709 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
710 ETH_ALEN
, entry
->l3_unicast
.eth_src
))
712 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_dst
) &&
713 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
714 ETH_ALEN
, entry
->l3_unicast
.eth_dst
))
716 if (entry
->l3_unicast
.vlan_id
&&
717 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
718 entry
->l3_unicast
.vlan_id
))
720 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_TTL_CHECK
,
721 entry
->l3_unicast
.ttl_check
))
723 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
724 entry
->l3_unicast
.group_id
))
730 static int ofdpa_cmd_group_tbl_add(const struct rocker_port
*rocker_port
,
731 struct rocker_desc_info
*desc_info
,
734 struct ofdpa_group_tbl_entry
*entry
= priv
;
735 struct rocker_tlv
*cmd_info
;
738 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
740 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
744 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
748 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
749 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
750 err
= ofdpa_cmd_group_tbl_add_l2_interface(desc_info
, entry
);
752 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
753 err
= ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info
, entry
);
755 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
756 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
757 err
= ofdpa_cmd_group_tbl_add_group_ids(desc_info
, entry
);
759 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
760 err
= ofdpa_cmd_group_tbl_add_l3_unicast(desc_info
, entry
);
770 rocker_tlv_nest_end(desc_info
, cmd_info
);
775 static int ofdpa_cmd_group_tbl_del(const struct rocker_port
*rocker_port
,
776 struct rocker_desc_info
*desc_info
,
779 const struct ofdpa_group_tbl_entry
*entry
= priv
;
780 struct rocker_tlv
*cmd_info
;
782 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
784 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
787 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
790 rocker_tlv_nest_end(desc_info
, cmd_info
);
795 /***************************************************
796 * Flow, group, FDB, internal VLAN and neigh tables
797 ***************************************************/
799 static struct ofdpa_flow_tbl_entry
*
800 ofdpa_flow_tbl_find(const struct ofdpa
*ofdpa
,
801 const struct ofdpa_flow_tbl_entry
*match
)
803 struct ofdpa_flow_tbl_entry
*found
;
804 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
806 hash_for_each_possible(ofdpa
->flow_tbl
, found
,
807 entry
, match
->key_crc32
) {
808 if (memcmp(&found
->key
, &match
->key
, key_len
) == 0)
815 static int ofdpa_flow_tbl_add(struct ofdpa_port
*ofdpa_port
,
816 struct switchdev_trans
*trans
, int flags
,
817 struct ofdpa_flow_tbl_entry
*match
)
819 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
820 struct ofdpa_flow_tbl_entry
*found
;
821 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
822 unsigned long lock_flags
;
824 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
826 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, lock_flags
);
828 found
= ofdpa_flow_tbl_find(ofdpa
, match
);
831 match
->cookie
= found
->cookie
;
832 if (!switchdev_trans_ph_prepare(trans
))
833 hash_del(&found
->entry
);
834 ofdpa_kfree(trans
, found
);
836 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
;
839 found
->cookie
= ofdpa
->flow_tbl_next_cookie
++;
840 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
;
843 if (!switchdev_trans_ph_prepare(trans
))
844 hash_add(ofdpa
->flow_tbl
, &found
->entry
, found
->key_crc32
);
846 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, lock_flags
);
848 if (!switchdev_trans_ph_prepare(trans
))
849 return rocker_cmd_exec(ofdpa_port
->rocker_port
,
850 ofdpa_flags_nowait(flags
),
851 ofdpa_cmd_flow_tbl_add
,
856 static int ofdpa_flow_tbl_del(struct ofdpa_port
*ofdpa_port
,
857 struct switchdev_trans
*trans
, int flags
,
858 struct ofdpa_flow_tbl_entry
*match
)
860 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
861 struct ofdpa_flow_tbl_entry
*found
;
862 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
863 unsigned long lock_flags
;
866 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
868 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, lock_flags
);
870 found
= ofdpa_flow_tbl_find(ofdpa
, match
);
873 if (!switchdev_trans_ph_prepare(trans
))
874 hash_del(&found
->entry
);
875 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
;
878 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, lock_flags
);
880 ofdpa_kfree(trans
, match
);
883 if (!switchdev_trans_ph_prepare(trans
))
884 err
= rocker_cmd_exec(ofdpa_port
->rocker_port
,
885 ofdpa_flags_nowait(flags
),
886 ofdpa_cmd_flow_tbl_del
,
888 ofdpa_kfree(trans
, found
);
894 static int ofdpa_flow_tbl_do(struct ofdpa_port
*ofdpa_port
,
895 struct switchdev_trans
*trans
, int flags
,
896 struct ofdpa_flow_tbl_entry
*entry
)
898 if (flags
& OFDPA_OP_FLAG_REMOVE
)
899 return ofdpa_flow_tbl_del(ofdpa_port
, trans
, flags
, entry
);
901 return ofdpa_flow_tbl_add(ofdpa_port
, trans
, flags
, entry
);
904 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port
*ofdpa_port
,
905 struct switchdev_trans
*trans
, int flags
,
906 u32 in_pport
, u32 in_pport_mask
,
907 enum rocker_of_dpa_table_id goto_tbl
)
909 struct ofdpa_flow_tbl_entry
*entry
;
911 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
915 entry
->key
.priority
= OFDPA_PRIORITY_IG_PORT
;
916 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
917 entry
->key
.ig_port
.in_pport
= in_pport
;
918 entry
->key
.ig_port
.in_pport_mask
= in_pport_mask
;
919 entry
->key
.ig_port
.goto_tbl
= goto_tbl
;
921 return ofdpa_flow_tbl_do(ofdpa_port
, trans
, flags
, entry
);
924 static int ofdpa_flow_tbl_vlan(struct ofdpa_port
*ofdpa_port
,
925 struct switchdev_trans
*trans
, int flags
,
926 u32 in_pport
, __be16 vlan_id
,
928 enum rocker_of_dpa_table_id goto_tbl
,
929 bool untagged
, __be16 new_vlan_id
)
931 struct ofdpa_flow_tbl_entry
*entry
;
933 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
937 entry
->key
.priority
= OFDPA_PRIORITY_VLAN
;
938 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
939 entry
->key
.vlan
.in_pport
= in_pport
;
940 entry
->key
.vlan
.vlan_id
= vlan_id
;
941 entry
->key
.vlan
.vlan_id_mask
= vlan_id_mask
;
942 entry
->key
.vlan
.goto_tbl
= goto_tbl
;
944 entry
->key
.vlan
.untagged
= untagged
;
945 entry
->key
.vlan
.new_vlan_id
= new_vlan_id
;
947 return ofdpa_flow_tbl_do(ofdpa_port
, trans
, flags
, entry
);
950 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port
*ofdpa_port
,
951 struct switchdev_trans
*trans
,
952 u32 in_pport
, u32 in_pport_mask
,
953 __be16 eth_type
, const u8
*eth_dst
,
954 const u8
*eth_dst_mask
, __be16 vlan_id
,
955 __be16 vlan_id_mask
, bool copy_to_cpu
,
958 struct ofdpa_flow_tbl_entry
*entry
;
960 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
964 if (is_multicast_ether_addr(eth_dst
)) {
965 entry
->key
.priority
= OFDPA_PRIORITY_TERM_MAC_MCAST
;
966 entry
->key
.term_mac
.goto_tbl
=
967 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
969 entry
->key
.priority
= OFDPA_PRIORITY_TERM_MAC_UCAST
;
970 entry
->key
.term_mac
.goto_tbl
=
971 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
974 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
975 entry
->key
.term_mac
.in_pport
= in_pport
;
976 entry
->key
.term_mac
.in_pport_mask
= in_pport_mask
;
977 entry
->key
.term_mac
.eth_type
= eth_type
;
978 ether_addr_copy(entry
->key
.term_mac
.eth_dst
, eth_dst
);
979 ether_addr_copy(entry
->key
.term_mac
.eth_dst_mask
, eth_dst_mask
);
980 entry
->key
.term_mac
.vlan_id
= vlan_id
;
981 entry
->key
.term_mac
.vlan_id_mask
= vlan_id_mask
;
982 entry
->key
.term_mac
.copy_to_cpu
= copy_to_cpu
;
984 return ofdpa_flow_tbl_do(ofdpa_port
, trans
, flags
, entry
);
987 static int ofdpa_flow_tbl_bridge(struct ofdpa_port
*ofdpa_port
,
988 struct switchdev_trans
*trans
, int flags
,
989 const u8
*eth_dst
, const u8
*eth_dst_mask
,
990 __be16 vlan_id
, u32 tunnel_id
,
991 enum rocker_of_dpa_table_id goto_tbl
,
992 u32 group_id
, bool copy_to_cpu
)
994 struct ofdpa_flow_tbl_entry
*entry
;
996 bool vlan_bridging
= !!vlan_id
;
997 bool dflt
= !eth_dst
|| (eth_dst
&& eth_dst_mask
);
1000 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1004 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
1007 entry
->key
.bridge
.has_eth_dst
= 1;
1008 ether_addr_copy(entry
->key
.bridge
.eth_dst
, eth_dst
);
1011 entry
->key
.bridge
.has_eth_dst_mask
= 1;
1012 ether_addr_copy(entry
->key
.bridge
.eth_dst_mask
, eth_dst_mask
);
1013 if (!ether_addr_equal(eth_dst_mask
, ff_mac
))
1017 priority
= OFDPA_PRIORITY_UNKNOWN
;
1018 if (vlan_bridging
&& dflt
&& wild
)
1019 priority
= OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD
;
1020 else if (vlan_bridging
&& dflt
&& !wild
)
1021 priority
= OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
;
1022 else if (vlan_bridging
&& !dflt
)
1023 priority
= OFDPA_PRIORITY_BRIDGING_VLAN
;
1024 else if (!vlan_bridging
&& dflt
&& wild
)
1025 priority
= OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD
;
1026 else if (!vlan_bridging
&& dflt
&& !wild
)
1027 priority
= OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
;
1028 else if (!vlan_bridging
&& !dflt
)
1029 priority
= OFDPA_PRIORITY_BRIDGING_TENANT
;
1031 entry
->key
.priority
= priority
;
1032 entry
->key
.bridge
.vlan_id
= vlan_id
;
1033 entry
->key
.bridge
.tunnel_id
= tunnel_id
;
1034 entry
->key
.bridge
.goto_tbl
= goto_tbl
;
1035 entry
->key
.bridge
.group_id
= group_id
;
1036 entry
->key
.bridge
.copy_to_cpu
= copy_to_cpu
;
1038 return ofdpa_flow_tbl_do(ofdpa_port
, trans
, flags
, entry
);
1041 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port
*ofdpa_port
,
1042 struct switchdev_trans
*trans
,
1043 __be16 eth_type
, __be32 dst
,
1044 __be32 dst_mask
, u32 priority
,
1045 enum rocker_of_dpa_table_id goto_tbl
,
1046 u32 group_id
, int flags
)
1048 struct ofdpa_flow_tbl_entry
*entry
;
1050 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1054 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
1055 entry
->key
.priority
= priority
;
1056 entry
->key
.ucast_routing
.eth_type
= eth_type
;
1057 entry
->key
.ucast_routing
.dst4
= dst
;
1058 entry
->key
.ucast_routing
.dst4_mask
= dst_mask
;
1059 entry
->key
.ucast_routing
.goto_tbl
= goto_tbl
;
1060 entry
->key
.ucast_routing
.group_id
= group_id
;
1061 entry
->key_len
= offsetof(struct ofdpa_flow_tbl_key
,
1062 ucast_routing
.group_id
);
1064 return ofdpa_flow_tbl_do(ofdpa_port
, trans
, flags
, entry
);
1067 static int ofdpa_flow_tbl_acl(struct ofdpa_port
*ofdpa_port
,
1068 struct switchdev_trans
*trans
, int flags
,
1069 u32 in_pport
, u32 in_pport_mask
,
1070 const u8
*eth_src
, const u8
*eth_src_mask
,
1071 const u8
*eth_dst
, const u8
*eth_dst_mask
,
1072 __be16 eth_type
, __be16 vlan_id
,
1073 __be16 vlan_id_mask
, u8 ip_proto
,
1074 u8 ip_proto_mask
, u8 ip_tos
, u8 ip_tos_mask
,
1078 struct ofdpa_flow_tbl_entry
*entry
;
1080 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1084 priority
= OFDPA_PRIORITY_ACL_NORMAL
;
1085 if (eth_dst
&& eth_dst_mask
) {
1086 if (ether_addr_equal(eth_dst_mask
, mcast_mac
))
1087 priority
= OFDPA_PRIORITY_ACL_DFLT
;
1088 else if (is_link_local_ether_addr(eth_dst
))
1089 priority
= OFDPA_PRIORITY_ACL_CTRL
;
1092 entry
->key
.priority
= priority
;
1093 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1094 entry
->key
.acl
.in_pport
= in_pport
;
1095 entry
->key
.acl
.in_pport_mask
= in_pport_mask
;
1098 ether_addr_copy(entry
->key
.acl
.eth_src
, eth_src
);
1100 ether_addr_copy(entry
->key
.acl
.eth_src_mask
, eth_src_mask
);
1102 ether_addr_copy(entry
->key
.acl
.eth_dst
, eth_dst
);
1104 ether_addr_copy(entry
->key
.acl
.eth_dst_mask
, eth_dst_mask
);
1106 entry
->key
.acl
.eth_type
= eth_type
;
1107 entry
->key
.acl
.vlan_id
= vlan_id
;
1108 entry
->key
.acl
.vlan_id_mask
= vlan_id_mask
;
1109 entry
->key
.acl
.ip_proto
= ip_proto
;
1110 entry
->key
.acl
.ip_proto_mask
= ip_proto_mask
;
1111 entry
->key
.acl
.ip_tos
= ip_tos
;
1112 entry
->key
.acl
.ip_tos_mask
= ip_tos_mask
;
1113 entry
->key
.acl
.group_id
= group_id
;
1115 return ofdpa_flow_tbl_do(ofdpa_port
, trans
, flags
, entry
);
1118 static struct ofdpa_group_tbl_entry
*
1119 ofdpa_group_tbl_find(const struct ofdpa
*ofdpa
,
1120 const struct ofdpa_group_tbl_entry
*match
)
1122 struct ofdpa_group_tbl_entry
*found
;
1124 hash_for_each_possible(ofdpa
->group_tbl
, found
,
1125 entry
, match
->group_id
) {
1126 if (found
->group_id
== match
->group_id
)
1133 static void ofdpa_group_tbl_entry_free(struct switchdev_trans
*trans
,
1134 struct ofdpa_group_tbl_entry
*entry
)
1136 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
1137 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
1138 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
1139 ofdpa_kfree(trans
, entry
->group_ids
);
1144 ofdpa_kfree(trans
, entry
);
1147 static int ofdpa_group_tbl_add(struct ofdpa_port
*ofdpa_port
,
1148 struct switchdev_trans
*trans
, int flags
,
1149 struct ofdpa_group_tbl_entry
*match
)
1151 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1152 struct ofdpa_group_tbl_entry
*found
;
1153 unsigned long lock_flags
;
1155 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, lock_flags
);
1157 found
= ofdpa_group_tbl_find(ofdpa
, match
);
1160 if (!switchdev_trans_ph_prepare(trans
))
1161 hash_del(&found
->entry
);
1162 ofdpa_group_tbl_entry_free(trans
, found
);
1164 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
;
1167 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
;
1170 if (!switchdev_trans_ph_prepare(trans
))
1171 hash_add(ofdpa
->group_tbl
, &found
->entry
, found
->group_id
);
1173 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, lock_flags
);
1175 if (!switchdev_trans_ph_prepare(trans
))
1176 return rocker_cmd_exec(ofdpa_port
->rocker_port
,
1177 ofdpa_flags_nowait(flags
),
1178 ofdpa_cmd_group_tbl_add
,
1183 static int ofdpa_group_tbl_del(struct ofdpa_port
*ofdpa_port
,
1184 struct switchdev_trans
*trans
, int flags
,
1185 struct ofdpa_group_tbl_entry
*match
)
1187 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1188 struct ofdpa_group_tbl_entry
*found
;
1189 unsigned long lock_flags
;
1192 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, lock_flags
);
1194 found
= ofdpa_group_tbl_find(ofdpa
, match
);
1197 if (!switchdev_trans_ph_prepare(trans
))
1198 hash_del(&found
->entry
);
1199 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
;
1202 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, lock_flags
);
1204 ofdpa_group_tbl_entry_free(trans
, match
);
1207 if (!switchdev_trans_ph_prepare(trans
))
1208 err
= rocker_cmd_exec(ofdpa_port
->rocker_port
,
1209 ofdpa_flags_nowait(flags
),
1210 ofdpa_cmd_group_tbl_del
,
1212 ofdpa_group_tbl_entry_free(trans
, found
);
1218 static int ofdpa_group_tbl_do(struct ofdpa_port
*ofdpa_port
,
1219 struct switchdev_trans
*trans
, int flags
,
1220 struct ofdpa_group_tbl_entry
*entry
)
1222 if (flags
& OFDPA_OP_FLAG_REMOVE
)
1223 return ofdpa_group_tbl_del(ofdpa_port
, trans
, flags
, entry
);
1225 return ofdpa_group_tbl_add(ofdpa_port
, trans
, flags
, entry
);
1228 static int ofdpa_group_l2_interface(struct ofdpa_port
*ofdpa_port
,
1229 struct switchdev_trans
*trans
, int flags
,
1230 __be16 vlan_id
, u32 out_pport
,
1233 struct ofdpa_group_tbl_entry
*entry
;
1235 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1239 entry
->group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1240 entry
->l2_interface
.pop_vlan
= pop_vlan
;
1242 return ofdpa_group_tbl_do(ofdpa_port
, trans
, flags
, entry
);
1245 static int ofdpa_group_l2_fan_out(struct ofdpa_port
*ofdpa_port
,
1246 struct switchdev_trans
*trans
,
1247 int flags
, u8 group_count
,
1248 const u32
*group_ids
, u32 group_id
)
1250 struct ofdpa_group_tbl_entry
*entry
;
1252 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1256 entry
->group_id
= group_id
;
1257 entry
->group_count
= group_count
;
1259 entry
->group_ids
= ofdpa_kcalloc(trans
, flags
,
1260 group_count
, sizeof(u32
));
1261 if (!entry
->group_ids
) {
1262 ofdpa_kfree(trans
, entry
);
1265 memcpy(entry
->group_ids
, group_ids
, group_count
* sizeof(u32
));
1267 return ofdpa_group_tbl_do(ofdpa_port
, trans
, flags
, entry
);
1270 static int ofdpa_group_l2_flood(struct ofdpa_port
*ofdpa_port
,
1271 struct switchdev_trans
*trans
, int flags
,
1272 __be16 vlan_id
, u8 group_count
,
1273 const u32
*group_ids
, u32 group_id
)
1275 return ofdpa_group_l2_fan_out(ofdpa_port
, trans
, flags
,
1276 group_count
, group_ids
,
1280 static int ofdpa_group_l3_unicast(struct ofdpa_port
*ofdpa_port
,
1281 struct switchdev_trans
*trans
, int flags
,
1282 u32 index
, const u8
*src_mac
, const u8
*dst_mac
,
1283 __be16 vlan_id
, bool ttl_check
, u32 pport
)
1285 struct ofdpa_group_tbl_entry
*entry
;
1287 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1291 entry
->group_id
= ROCKER_GROUP_L3_UNICAST(index
);
1293 ether_addr_copy(entry
->l3_unicast
.eth_src
, src_mac
);
1295 ether_addr_copy(entry
->l3_unicast
.eth_dst
, dst_mac
);
1296 entry
->l3_unicast
.vlan_id
= vlan_id
;
1297 entry
->l3_unicast
.ttl_check
= ttl_check
;
1298 entry
->l3_unicast
.group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, pport
);
1300 return ofdpa_group_tbl_do(ofdpa_port
, trans
, flags
, entry
);
1303 static struct ofdpa_neigh_tbl_entry
*
1304 ofdpa_neigh_tbl_find(const struct ofdpa
*ofdpa
, __be32 ip_addr
)
1306 struct ofdpa_neigh_tbl_entry
*found
;
1308 hash_for_each_possible(ofdpa
->neigh_tbl
, found
,
1309 entry
, be32_to_cpu(ip_addr
))
1310 if (found
->ip_addr
== ip_addr
)
1316 static void ofdpa_neigh_add(struct ofdpa
*ofdpa
,
1317 struct switchdev_trans
*trans
,
1318 struct ofdpa_neigh_tbl_entry
*entry
)
1320 if (!switchdev_trans_ph_commit(trans
))
1321 entry
->index
= ofdpa
->neigh_tbl_next_index
++;
1322 if (switchdev_trans_ph_prepare(trans
))
1325 hash_add(ofdpa
->neigh_tbl
, &entry
->entry
,
1326 be32_to_cpu(entry
->ip_addr
));
1329 static void ofdpa_neigh_del(struct switchdev_trans
*trans
,
1330 struct ofdpa_neigh_tbl_entry
*entry
)
1332 if (switchdev_trans_ph_prepare(trans
))
1334 if (--entry
->ref_count
== 0) {
1335 hash_del(&entry
->entry
);
1336 ofdpa_kfree(trans
, entry
);
1340 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry
*entry
,
1341 struct switchdev_trans
*trans
,
1342 const u8
*eth_dst
, bool ttl_check
)
1345 ether_addr_copy(entry
->eth_dst
, eth_dst
);
1346 entry
->ttl_check
= ttl_check
;
1347 } else if (!switchdev_trans_ph_prepare(trans
)) {
1352 static int ofdpa_port_ipv4_neigh(struct ofdpa_port
*ofdpa_port
,
1353 struct switchdev_trans
*trans
,
1354 int flags
, __be32 ip_addr
, const u8
*eth_dst
)
1356 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1357 struct ofdpa_neigh_tbl_entry
*entry
;
1358 struct ofdpa_neigh_tbl_entry
*found
;
1359 unsigned long lock_flags
;
1360 __be16 eth_type
= htons(ETH_P_IP
);
1361 enum rocker_of_dpa_table_id goto_tbl
=
1362 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1365 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1370 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1374 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1376 found
= ofdpa_neigh_tbl_find(ofdpa
, ip_addr
);
1378 updating
= found
&& adding
;
1379 removing
= found
&& !adding
;
1380 adding
= !found
&& adding
;
1383 entry
->ip_addr
= ip_addr
;
1384 entry
->dev
= ofdpa_port
->dev
;
1385 ether_addr_copy(entry
->eth_dst
, eth_dst
);
1386 entry
->ttl_check
= true;
1387 ofdpa_neigh_add(ofdpa
, trans
, entry
);
1388 } else if (removing
) {
1389 memcpy(entry
, found
, sizeof(*entry
));
1390 ofdpa_neigh_del(trans
, found
);
1391 } else if (updating
) {
1392 ofdpa_neigh_update(found
, trans
, eth_dst
, true);
1393 memcpy(entry
, found
, sizeof(*entry
));
1398 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1403 /* For each active neighbor, we have an L3 unicast group and
1404 * a /32 route to the neighbor, which uses the L3 unicast
1405 * group. The L3 unicast group can also be referred to by
1406 * other routes' nexthops.
1409 err
= ofdpa_group_l3_unicast(ofdpa_port
, trans
, flags
,
1411 ofdpa_port
->dev
->dev_addr
,
1413 ofdpa_port
->internal_vlan_id
,
1417 netdev_err(ofdpa_port
->dev
, "Error (%d) L3 unicast group index %d\n",
1422 if (adding
|| removing
) {
1423 group_id
= ROCKER_GROUP_L3_UNICAST(entry
->index
);
1424 err
= ofdpa_flow_tbl_ucast4_routing(ofdpa_port
, trans
,
1431 netdev_err(ofdpa_port
->dev
, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1432 err
, &entry
->ip_addr
, group_id
);
1437 ofdpa_kfree(trans
, entry
);
1442 static int ofdpa_port_ipv4_resolve(struct ofdpa_port
*ofdpa_port
,
1443 struct switchdev_trans
*trans
,
1446 struct net_device
*dev
= ofdpa_port
->dev
;
1447 struct neighbour
*n
= __ipv4_neigh_lookup(dev
, (__force u32
)ip_addr
);
1451 n
= neigh_create(&arp_tbl
, &ip_addr
, dev
);
1456 /* If the neigh is already resolved, then go ahead and
1457 * install the entry, otherwise start the ARP process to
1458 * resolve the neigh.
1461 if (n
->nud_state
& NUD_VALID
)
1462 err
= ofdpa_port_ipv4_neigh(ofdpa_port
, trans
, 0,
1465 neigh_event_send(n
, NULL
);
1471 static int ofdpa_port_ipv4_nh(struct ofdpa_port
*ofdpa_port
,
1472 struct switchdev_trans
*trans
, int flags
,
1473 __be32 ip_addr
, u32
*index
)
1475 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1476 struct ofdpa_neigh_tbl_entry
*entry
;
1477 struct ofdpa_neigh_tbl_entry
*found
;
1478 unsigned long lock_flags
;
1479 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1482 bool resolved
= true;
1485 entry
= ofdpa_kzalloc(trans
, flags
, sizeof(*entry
));
1489 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1491 found
= ofdpa_neigh_tbl_find(ofdpa
, ip_addr
);
1493 *index
= found
->index
;
1495 updating
= found
&& adding
;
1496 removing
= found
&& !adding
;
1497 adding
= !found
&& adding
;
1500 entry
->ip_addr
= ip_addr
;
1501 entry
->dev
= ofdpa_port
->dev
;
1502 ofdpa_neigh_add(ofdpa
, trans
, entry
);
1503 *index
= entry
->index
;
1505 } else if (removing
) {
1506 ofdpa_neigh_del(trans
, found
);
1507 } else if (updating
) {
1508 ofdpa_neigh_update(found
, trans
, NULL
, false);
1509 resolved
= !is_zero_ether_addr(found
->eth_dst
);
1514 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, lock_flags
);
1517 ofdpa_kfree(trans
, entry
);
1522 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1525 err
= ofdpa_port_ipv4_resolve(ofdpa_port
, trans
, ip_addr
);
1530 static struct ofdpa_port
*ofdpa_port_get(const struct ofdpa
*ofdpa
,
1533 struct rocker_port
*rocker_port
;
1535 rocker_port
= ofdpa
->rocker
->ports
[port_index
];
1536 return rocker_port
? rocker_port
->wpriv
: NULL
;
1539 static int ofdpa_port_vlan_flood_group(struct ofdpa_port
*ofdpa_port
,
1540 struct switchdev_trans
*trans
,
1541 int flags
, __be16 vlan_id
)
1543 struct ofdpa_port
*p
;
1544 const struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1545 unsigned int port_count
= ofdpa
->rocker
->port_count
;
1546 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
1552 group_ids
= ofdpa_kcalloc(trans
, flags
, port_count
, sizeof(u32
));
1556 /* Adjust the flood group for this VLAN. The flood group
1557 * references an L2 interface group for each port in this
1561 for (i
= 0; i
< port_count
; i
++) {
1562 p
= ofdpa_port_get(ofdpa
, i
);
1565 if (!ofdpa_port_is_bridged(p
))
1567 if (test_bit(ntohs(vlan_id
), p
->vlan_bitmap
)) {
1568 group_ids
[group_count
++] =
1569 ROCKER_GROUP_L2_INTERFACE(vlan_id
, p
->pport
);
1573 /* If there are no bridged ports in this VLAN, we're done */
1574 if (group_count
== 0)
1575 goto no_ports_in_vlan
;
1577 err
= ofdpa_group_l2_flood(ofdpa_port
, trans
, flags
, vlan_id
,
1578 group_count
, group_ids
, group_id
);
1580 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 flood group\n", err
);
1583 ofdpa_kfree(trans
, group_ids
);
1587 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port
*ofdpa_port
,
1588 struct switchdev_trans
*trans
, int flags
,
1589 __be16 vlan_id
, bool pop_vlan
)
1591 const struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
1592 unsigned int port_count
= ofdpa
->rocker
->port_count
;
1593 struct ofdpa_port
*p
;
1594 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1600 /* An L2 interface group for this port in this VLAN, but
1601 * only when port STP state is LEARNING|FORWARDING.
1604 if (ofdpa_port
->stp_state
== BR_STATE_LEARNING
||
1605 ofdpa_port
->stp_state
== BR_STATE_FORWARDING
) {
1606 out_pport
= ofdpa_port
->pport
;
1607 err
= ofdpa_group_l2_interface(ofdpa_port
, trans
, flags
,
1608 vlan_id
, out_pport
, pop_vlan
);
1610 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for pport %d\n",
1616 /* An L2 interface group for this VLAN to CPU port.
1617 * Add when first port joins this VLAN and destroy when
1618 * last port leaves this VLAN.
1621 for (i
= 0; i
< port_count
; i
++) {
1622 p
= ofdpa_port_get(ofdpa
, i
);
1623 if (p
&& test_bit(ntohs(vlan_id
), p
->vlan_bitmap
))
1627 if ((!adding
|| ref
!= 1) && (adding
|| ref
!= 0))
1631 err
= ofdpa_group_l2_interface(ofdpa_port
, trans
, flags
,
1632 vlan_id
, out_pport
, pop_vlan
);
1634 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for CPU port\n", err
);
1641 static struct ofdpa_ctrl
{
1643 const u8
*eth_dst_mask
;
1650 [OFDPA_CTRL_LINK_LOCAL_MCAST
] = {
1651 /* pass link local multicast pkts up to CPU for filtering */
1653 .eth_dst_mask
= ll_mask
,
1656 [OFDPA_CTRL_LOCAL_ARP
] = {
1657 /* pass local ARP pkts up to CPU */
1658 .eth_dst
= zero_mac
,
1659 .eth_dst_mask
= zero_mac
,
1660 .eth_type
= htons(ETH_P_ARP
),
1663 [OFDPA_CTRL_IPV4_MCAST
] = {
1664 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1665 .eth_dst
= ipv4_mcast
,
1666 .eth_dst_mask
= ipv4_mask
,
1667 .eth_type
= htons(ETH_P_IP
),
1669 .copy_to_cpu
= true,
1671 [OFDPA_CTRL_IPV6_MCAST
] = {
1672 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1673 .eth_dst
= ipv6_mcast
,
1674 .eth_dst_mask
= ipv6_mask
,
1675 .eth_type
= htons(ETH_P_IPV6
),
1677 .copy_to_cpu
= true,
1679 [OFDPA_CTRL_DFLT_BRIDGING
] = {
1680 /* flood any pkts on vlan */
1682 .copy_to_cpu
= true,
1684 [OFDPA_CTRL_DFLT_OVS
] = {
1685 /* pass all pkts up to CPU */
1686 .eth_dst
= zero_mac
,
1687 .eth_dst_mask
= zero_mac
,
1692 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port
*ofdpa_port
,
1693 struct switchdev_trans
*trans
, int flags
,
1694 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1696 u32 in_pport
= ofdpa_port
->pport
;
1697 u32 in_pport_mask
= 0xffffffff;
1699 const u8
*eth_src
= NULL
;
1700 const u8
*eth_src_mask
= NULL
;
1701 __be16 vlan_id_mask
= htons(0xffff);
1703 u8 ip_proto_mask
= 0;
1706 u32 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1709 err
= ofdpa_flow_tbl_acl(ofdpa_port
, trans
, flags
,
1710 in_pport
, in_pport_mask
,
1711 eth_src
, eth_src_mask
,
1712 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
1714 vlan_id
, vlan_id_mask
,
1715 ip_proto
, ip_proto_mask
,
1716 ip_tos
, ip_tos_mask
,
1720 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl ACL\n", err
);
1725 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port
*ofdpa_port
,
1726 struct switchdev_trans
*trans
,
1728 const struct ofdpa_ctrl
*ctrl
,
1731 enum rocker_of_dpa_table_id goto_tbl
=
1732 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1733 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
1737 if (!ofdpa_port_is_bridged(ofdpa_port
))
1740 err
= ofdpa_flow_tbl_bridge(ofdpa_port
, trans
, flags
,
1741 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
1743 goto_tbl
, group_id
, ctrl
->copy_to_cpu
);
1746 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl FLOOD\n", err
);
1751 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port
*ofdpa_port
,
1752 struct switchdev_trans
*trans
, int flags
,
1753 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1755 u32 in_pport_mask
= 0xffffffff;
1756 __be16 vlan_id_mask
= htons(0xffff);
1759 if (ntohs(vlan_id
) == 0)
1760 vlan_id
= ofdpa_port
->internal_vlan_id
;
1762 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, trans
,
1763 ofdpa_port
->pport
, in_pport_mask
,
1764 ctrl
->eth_type
, ctrl
->eth_dst
,
1765 ctrl
->eth_dst_mask
, vlan_id
,
1766 vlan_id_mask
, ctrl
->copy_to_cpu
,
1770 netdev_err(ofdpa_port
->dev
, "Error (%d) ctrl term\n", err
);
1775 static int ofdpa_port_ctrl_vlan(struct ofdpa_port
*ofdpa_port
,
1776 struct switchdev_trans
*trans
, int flags
,
1777 const struct ofdpa_ctrl
*ctrl
, __be16 vlan_id
)
1780 return ofdpa_port_ctrl_vlan_acl(ofdpa_port
, trans
, flags
,
1783 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port
, trans
, flags
,
1787 return ofdpa_port_ctrl_vlan_term(ofdpa_port
, trans
, flags
,
1793 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port
*ofdpa_port
,
1794 struct switchdev_trans
*trans
, int flags
,
1800 for (i
= 0; i
< OFDPA_CTRL_MAX
; i
++) {
1801 if (ofdpa_port
->ctrls
[i
]) {
1802 err
= ofdpa_port_ctrl_vlan(ofdpa_port
, trans
, flags
,
1803 &ofdpa_ctrls
[i
], vlan_id
);
1812 static int ofdpa_port_ctrl(struct ofdpa_port
*ofdpa_port
,
1813 struct switchdev_trans
*trans
, int flags
,
1814 const struct ofdpa_ctrl
*ctrl
)
1819 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
1820 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
1822 err
= ofdpa_port_ctrl_vlan(ofdpa_port
, trans
, flags
,
1831 static int ofdpa_port_vlan(struct ofdpa_port
*ofdpa_port
,
1832 struct switchdev_trans
*trans
, int flags
, u16 vid
)
1834 enum rocker_of_dpa_table_id goto_tbl
=
1835 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
1836 u32 in_pport
= ofdpa_port
->pport
;
1837 __be16 vlan_id
= htons(vid
);
1838 __be16 vlan_id_mask
= htons(0xffff);
1839 __be16 internal_vlan_id
;
1841 bool adding
= !(flags
& OFDPA_OP_FLAG_REMOVE
);
1844 internal_vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, vid
, &untagged
);
1847 test_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
))
1848 return 0; /* already added */
1850 !test_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
))
1851 return 0; /* already removed */
1853 change_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
);
1856 err
= ofdpa_port_ctrl_vlan_add(ofdpa_port
, trans
, flags
,
1859 netdev_err(ofdpa_port
->dev
, "Error (%d) port ctrl vlan add\n", err
);
1864 err
= ofdpa_port_vlan_l2_groups(ofdpa_port
, trans
, flags
,
1865 internal_vlan_id
, untagged
);
1867 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 groups\n", err
);
1871 err
= ofdpa_port_vlan_flood_group(ofdpa_port
, trans
, flags
,
1874 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 flood group\n", err
);
1878 err
= ofdpa_flow_tbl_vlan(ofdpa_port
, trans
, flags
,
1879 in_pport
, vlan_id
, vlan_id_mask
,
1880 goto_tbl
, untagged
, internal_vlan_id
);
1882 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN table\n", err
);
1885 if (switchdev_trans_ph_prepare(trans
))
1886 change_bit(ntohs(internal_vlan_id
), ofdpa_port
->vlan_bitmap
);
1891 static int ofdpa_port_ig_tbl(struct ofdpa_port
*ofdpa_port
,
1892 struct switchdev_trans
*trans
, int flags
)
1894 enum rocker_of_dpa_table_id goto_tbl
;
1899 /* Normal Ethernet Frames. Matches pkts from any local physical
1900 * ports. Goto VLAN tbl.
1904 in_pport_mask
= 0xffff0000;
1905 goto_tbl
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
1907 err
= ofdpa_flow_tbl_ig_port(ofdpa_port
, trans
, flags
,
1908 in_pport
, in_pport_mask
,
1911 netdev_err(ofdpa_port
->dev
, "Error (%d) ingress port table entry\n", err
);
1916 struct ofdpa_fdb_learn_work
{
1917 struct work_struct work
;
1918 struct ofdpa_port
*ofdpa_port
;
1919 struct switchdev_trans
*trans
;
1925 static void ofdpa_port_fdb_learn_work(struct work_struct
*work
)
1927 const struct ofdpa_fdb_learn_work
*lw
=
1928 container_of(work
, struct ofdpa_fdb_learn_work
, work
);
1929 bool removing
= (lw
->flags
& OFDPA_OP_FLAG_REMOVE
);
1930 bool learned
= (lw
->flags
& OFDPA_OP_FLAG_LEARNED
);
1931 struct switchdev_notifier_fdb_info info
;
1933 info
.addr
= lw
->addr
;
1937 if (learned
&& removing
)
1938 call_switchdev_notifiers(SWITCHDEV_FDB_DEL
,
1939 lw
->ofdpa_port
->dev
, &info
.info
);
1940 else if (learned
&& !removing
)
1941 call_switchdev_notifiers(SWITCHDEV_FDB_ADD
,
1942 lw
->ofdpa_port
->dev
, &info
.info
);
1945 ofdpa_kfree(lw
->trans
, work
);
1948 static int ofdpa_port_fdb_learn(struct ofdpa_port
*ofdpa_port
,
1949 struct switchdev_trans
*trans
, int flags
,
1950 const u8
*addr
, __be16 vlan_id
)
1952 struct ofdpa_fdb_learn_work
*lw
;
1953 enum rocker_of_dpa_table_id goto_tbl
=
1954 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1955 u32 out_pport
= ofdpa_port
->pport
;
1957 u32 group_id
= ROCKER_GROUP_NONE
;
1958 bool syncing
= !!(ofdpa_port
->brport_flags
& BR_LEARNING_SYNC
);
1959 bool copy_to_cpu
= false;
1962 if (ofdpa_port_is_bridged(ofdpa_port
))
1963 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
1965 if (!(flags
& OFDPA_OP_FLAG_REFRESH
)) {
1966 err
= ofdpa_flow_tbl_bridge(ofdpa_port
, trans
, flags
, addr
,
1967 NULL
, vlan_id
, tunnel_id
, goto_tbl
,
1968 group_id
, copy_to_cpu
);
1976 if (!ofdpa_port_is_bridged(ofdpa_port
))
1979 lw
= ofdpa_kzalloc(trans
, flags
, sizeof(*lw
));
1983 INIT_WORK(&lw
->work
, ofdpa_port_fdb_learn_work
);
1985 lw
->ofdpa_port
= ofdpa_port
;
1988 ether_addr_copy(lw
->addr
, addr
);
1989 lw
->vid
= ofdpa_port_vlan_to_vid(ofdpa_port
, vlan_id
);
1991 if (switchdev_trans_ph_prepare(trans
))
1992 ofdpa_kfree(trans
, lw
);
1994 schedule_work(&lw
->work
);
1999 static struct ofdpa_fdb_tbl_entry
*
2000 ofdpa_fdb_tbl_find(const struct ofdpa
*ofdpa
,
2001 const struct ofdpa_fdb_tbl_entry
*match
)
2003 struct ofdpa_fdb_tbl_entry
*found
;
2005 hash_for_each_possible(ofdpa
->fdb_tbl
, found
, entry
, match
->key_crc32
)
2006 if (memcmp(&found
->key
, &match
->key
, sizeof(found
->key
)) == 0)
2012 static int ofdpa_port_fdb(struct ofdpa_port
*ofdpa_port
,
2013 struct switchdev_trans
*trans
,
2014 const unsigned char *addr
,
2015 __be16 vlan_id
, int flags
)
2017 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2018 struct ofdpa_fdb_tbl_entry
*fdb
;
2019 struct ofdpa_fdb_tbl_entry
*found
;
2020 bool removing
= (flags
& OFDPA_OP_FLAG_REMOVE
);
2021 unsigned long lock_flags
;
2023 fdb
= ofdpa_kzalloc(trans
, flags
, sizeof(*fdb
));
2027 fdb
->learned
= (flags
& OFDPA_OP_FLAG_LEARNED
);
2028 fdb
->touched
= jiffies
;
2029 fdb
->key
.ofdpa_port
= ofdpa_port
;
2030 ether_addr_copy(fdb
->key
.addr
, addr
);
2031 fdb
->key
.vlan_id
= vlan_id
;
2032 fdb
->key_crc32
= crc32(~0, &fdb
->key
, sizeof(fdb
->key
));
2034 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2036 found
= ofdpa_fdb_tbl_find(ofdpa
, fdb
);
2039 found
->touched
= jiffies
;
2041 ofdpa_kfree(trans
, fdb
);
2042 if (!switchdev_trans_ph_prepare(trans
))
2043 hash_del(&found
->entry
);
2045 } else if (!removing
) {
2046 if (!switchdev_trans_ph_prepare(trans
))
2047 hash_add(ofdpa
->fdb_tbl
, &fdb
->entry
,
2051 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2053 /* Check if adding and already exists, or removing and can't find */
2054 if (!found
!= !removing
) {
2055 ofdpa_kfree(trans
, fdb
);
2056 if (!found
&& removing
)
2058 /* Refreshing existing to update aging timers */
2059 flags
|= OFDPA_OP_FLAG_REFRESH
;
2062 return ofdpa_port_fdb_learn(ofdpa_port
, trans
, flags
, addr
, vlan_id
);
2065 static int ofdpa_port_fdb_flush(struct ofdpa_port
*ofdpa_port
,
2066 struct switchdev_trans
*trans
, int flags
)
2068 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2069 struct ofdpa_fdb_tbl_entry
*found
;
2070 unsigned long lock_flags
;
2071 struct hlist_node
*tmp
;
2075 if (ofdpa_port
->stp_state
== BR_STATE_LEARNING
||
2076 ofdpa_port
->stp_state
== BR_STATE_FORWARDING
)
2079 flags
|= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_REMOVE
;
2081 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2083 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
2084 if (found
->key
.ofdpa_port
!= ofdpa_port
)
2086 if (!found
->learned
)
2088 err
= ofdpa_port_fdb_learn(ofdpa_port
, trans
, flags
,
2090 found
->key
.vlan_id
);
2093 if (!switchdev_trans_ph_prepare(trans
))
2094 hash_del(&found
->entry
);
2098 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2103 static void ofdpa_fdb_cleanup(unsigned long data
)
2105 struct ofdpa
*ofdpa
= (struct ofdpa
*)data
;
2106 struct ofdpa_port
*ofdpa_port
;
2107 struct ofdpa_fdb_tbl_entry
*entry
;
2108 struct hlist_node
*tmp
;
2109 unsigned long next_timer
= jiffies
+ ofdpa
->ageing_time
;
2110 unsigned long expires
;
2111 unsigned long lock_flags
;
2112 int flags
= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_REMOVE
|
2113 OFDPA_OP_FLAG_LEARNED
;
2116 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2118 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, entry
, entry
) {
2119 if (!entry
->learned
)
2121 ofdpa_port
= entry
->key
.ofdpa_port
;
2122 expires
= entry
->touched
+ ofdpa_port
->ageing_time
;
2123 if (time_before_eq(expires
, jiffies
)) {
2124 ofdpa_port_fdb_learn(ofdpa_port
, NULL
,
2125 flags
, entry
->key
.addr
,
2126 entry
->key
.vlan_id
);
2127 hash_del(&entry
->entry
);
2128 } else if (time_before(expires
, next_timer
)) {
2129 next_timer
= expires
;
2133 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2135 mod_timer(&ofdpa
->fdb_cleanup_timer
, round_jiffies_up(next_timer
));
2138 static int ofdpa_port_router_mac(struct ofdpa_port
*ofdpa_port
,
2139 struct switchdev_trans
*trans
, int flags
,
2142 u32 in_pport_mask
= 0xffffffff;
2144 const u8
*dst_mac_mask
= ff_mac
;
2145 __be16 vlan_id_mask
= htons(0xffff);
2146 bool copy_to_cpu
= false;
2149 if (ntohs(vlan_id
) == 0)
2150 vlan_id
= ofdpa_port
->internal_vlan_id
;
2152 eth_type
= htons(ETH_P_IP
);
2153 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, trans
,
2154 ofdpa_port
->pport
, in_pport_mask
,
2155 eth_type
, ofdpa_port
->dev
->dev_addr
,
2156 dst_mac_mask
, vlan_id
, vlan_id_mask
,
2157 copy_to_cpu
, flags
);
2161 eth_type
= htons(ETH_P_IPV6
);
2162 err
= ofdpa_flow_tbl_term_mac(ofdpa_port
, trans
,
2163 ofdpa_port
->pport
, in_pport_mask
,
2164 eth_type
, ofdpa_port
->dev
->dev_addr
,
2165 dst_mac_mask
, vlan_id
, vlan_id_mask
,
2166 copy_to_cpu
, flags
);
2171 static int ofdpa_port_fwding(struct ofdpa_port
*ofdpa_port
,
2172 struct switchdev_trans
*trans
, int flags
)
2180 /* Port will be forwarding-enabled if its STP state is LEARNING
2181 * or FORWARDING. Traffic from CPU can still egress, regardless of
2182 * port STP state. Use L2 interface group on port VLANs as a way
2183 * to toggle port forwarding: if forwarding is disabled, L2
2184 * interface group will not exist.
2187 if (ofdpa_port
->stp_state
!= BR_STATE_LEARNING
&&
2188 ofdpa_port
->stp_state
!= BR_STATE_FORWARDING
)
2189 flags
|= OFDPA_OP_FLAG_REMOVE
;
2191 out_pport
= ofdpa_port
->pport
;
2192 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
2193 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
2195 vlan_id
= htons(vid
);
2196 pop_vlan
= ofdpa_vlan_id_is_internal(vlan_id
);
2197 err
= ofdpa_group_l2_interface(ofdpa_port
, trans
, flags
,
2198 vlan_id
, out_pport
, pop_vlan
);
2200 netdev_err(ofdpa_port
->dev
, "Error (%d) port VLAN l2 group for pport %d\n",
2209 static int ofdpa_port_stp_update(struct ofdpa_port
*ofdpa_port
,
2210 struct switchdev_trans
*trans
,
2211 int flags
, u8 state
)
2213 bool want
[OFDPA_CTRL_MAX
] = { 0, };
2214 bool prev_ctrls
[OFDPA_CTRL_MAX
];
2215 u8
uninitialized_var(prev_state
);
2219 if (switchdev_trans_ph_prepare(trans
)) {
2220 memcpy(prev_ctrls
, ofdpa_port
->ctrls
, sizeof(prev_ctrls
));
2221 prev_state
= ofdpa_port
->stp_state
;
2224 if (ofdpa_port
->stp_state
== state
)
2227 ofdpa_port
->stp_state
= state
;
2230 case BR_STATE_DISABLED
:
2231 /* port is completely disabled */
2233 case BR_STATE_LISTENING
:
2234 case BR_STATE_BLOCKING
:
2235 want
[OFDPA_CTRL_LINK_LOCAL_MCAST
] = true;
2237 case BR_STATE_LEARNING
:
2238 case BR_STATE_FORWARDING
:
2239 if (!ofdpa_port_is_ovsed(ofdpa_port
))
2240 want
[OFDPA_CTRL_LINK_LOCAL_MCAST
] = true;
2241 want
[OFDPA_CTRL_IPV4_MCAST
] = true;
2242 want
[OFDPA_CTRL_IPV6_MCAST
] = true;
2243 if (ofdpa_port_is_bridged(ofdpa_port
))
2244 want
[OFDPA_CTRL_DFLT_BRIDGING
] = true;
2245 else if (ofdpa_port_is_ovsed(ofdpa_port
))
2246 want
[OFDPA_CTRL_DFLT_OVS
] = true;
2248 want
[OFDPA_CTRL_LOCAL_ARP
] = true;
2252 for (i
= 0; i
< OFDPA_CTRL_MAX
; i
++) {
2253 if (want
[i
] != ofdpa_port
->ctrls
[i
]) {
2254 int ctrl_flags
= flags
|
2255 (want
[i
] ? 0 : OFDPA_OP_FLAG_REMOVE
);
2256 err
= ofdpa_port_ctrl(ofdpa_port
, trans
, ctrl_flags
,
2260 ofdpa_port
->ctrls
[i
] = want
[i
];
2264 err
= ofdpa_port_fdb_flush(ofdpa_port
, trans
, flags
);
2268 err
= ofdpa_port_fwding(ofdpa_port
, trans
, flags
);
2271 if (switchdev_trans_ph_prepare(trans
)) {
2272 memcpy(ofdpa_port
->ctrls
, prev_ctrls
, sizeof(prev_ctrls
));
2273 ofdpa_port
->stp_state
= prev_state
;
2279 static int ofdpa_port_fwd_enable(struct ofdpa_port
*ofdpa_port
, int flags
)
2281 if (ofdpa_port_is_bridged(ofdpa_port
))
2282 /* bridge STP will enable port */
2285 /* port is not bridged, so simulate going to FORWARDING state */
2286 return ofdpa_port_stp_update(ofdpa_port
, NULL
, flags
,
2287 BR_STATE_FORWARDING
);
2290 static int ofdpa_port_fwd_disable(struct ofdpa_port
*ofdpa_port
, int flags
)
2292 if (ofdpa_port_is_bridged(ofdpa_port
))
2293 /* bridge STP will disable port */
2296 /* port is not bridged, so simulate going to DISABLED state */
2297 return ofdpa_port_stp_update(ofdpa_port
, NULL
, flags
,
2301 static int ofdpa_port_vlan_add(struct ofdpa_port
*ofdpa_port
,
2302 struct switchdev_trans
*trans
,
2307 /* XXX deal with flags for PVID and untagged */
2309 err
= ofdpa_port_vlan(ofdpa_port
, trans
, 0, vid
);
2313 err
= ofdpa_port_router_mac(ofdpa_port
, trans
, 0, htons(vid
));
2315 ofdpa_port_vlan(ofdpa_port
, trans
,
2316 OFDPA_OP_FLAG_REMOVE
, vid
);
2321 static int ofdpa_port_vlan_del(struct ofdpa_port
*ofdpa_port
,
2326 err
= ofdpa_port_router_mac(ofdpa_port
, NULL
,
2327 OFDPA_OP_FLAG_REMOVE
, htons(vid
));
2331 return ofdpa_port_vlan(ofdpa_port
, NULL
,
2332 OFDPA_OP_FLAG_REMOVE
, vid
);
2335 static struct ofdpa_internal_vlan_tbl_entry
*
2336 ofdpa_internal_vlan_tbl_find(const struct ofdpa
*ofdpa
, int ifindex
)
2338 struct ofdpa_internal_vlan_tbl_entry
*found
;
2340 hash_for_each_possible(ofdpa
->internal_vlan_tbl
, found
,
2342 if (found
->ifindex
== ifindex
)
2349 static __be16
ofdpa_port_internal_vlan_id_get(struct ofdpa_port
*ofdpa_port
,
2352 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2353 struct ofdpa_internal_vlan_tbl_entry
*entry
;
2354 struct ofdpa_internal_vlan_tbl_entry
*found
;
2355 unsigned long lock_flags
;
2358 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2362 entry
->ifindex
= ifindex
;
2364 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2366 found
= ofdpa_internal_vlan_tbl_find(ofdpa
, ifindex
);
2373 hash_add(ofdpa
->internal_vlan_tbl
, &found
->entry
, found
->ifindex
);
2375 for (i
= 0; i
< OFDPA_N_INTERNAL_VLANS
; i
++) {
2376 if (test_and_set_bit(i
, ofdpa
->internal_vlan_bitmap
))
2378 found
->vlan_id
= htons(OFDPA_INTERNAL_VLAN_ID_BASE
+ i
);
2382 netdev_err(ofdpa_port
->dev
, "Out of internal VLAN IDs\n");
2386 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2388 return found
->vlan_id
;
2391 static int ofdpa_port_fib_ipv4(struct ofdpa_port
*ofdpa_port
,
2392 struct switchdev_trans
*trans
, __be32 dst
,
2393 int dst_len
, const struct fib_info
*fi
,
2394 u32 tb_id
, int flags
)
2396 const struct fib_nh
*nh
;
2397 __be16 eth_type
= htons(ETH_P_IP
);
2398 __be32 dst_mask
= inet_make_mask(dst_len
);
2399 __be16 internal_vlan_id
= ofdpa_port
->internal_vlan_id
;
2400 u32 priority
= fi
->fib_priority
;
2401 enum rocker_of_dpa_table_id goto_tbl
=
2402 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2409 /* XXX support ECMP */
2412 nh_on_port
= (fi
->fib_dev
== ofdpa_port
->dev
);
2413 has_gw
= !!nh
->nh_gw
;
2415 if (has_gw
&& nh_on_port
) {
2416 err
= ofdpa_port_ipv4_nh(ofdpa_port
, trans
, flags
,
2421 group_id
= ROCKER_GROUP_L3_UNICAST(index
);
2423 /* Send to CPU for processing */
2424 group_id
= ROCKER_GROUP_L2_INTERFACE(internal_vlan_id
, 0);
2427 err
= ofdpa_flow_tbl_ucast4_routing(ofdpa_port
, trans
, eth_type
, dst
,
2428 dst_mask
, priority
, goto_tbl
,
2431 netdev_err(ofdpa_port
->dev
, "Error (%d) IPv4 route %pI4\n",
2438 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port
*ofdpa_port
,
2441 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2442 struct ofdpa_internal_vlan_tbl_entry
*found
;
2443 unsigned long lock_flags
;
2446 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2448 found
= ofdpa_internal_vlan_tbl_find(ofdpa
, ifindex
);
2450 netdev_err(ofdpa_port
->dev
,
2451 "ifindex (%d) not found in internal VLAN tbl\n",
2456 if (--found
->ref_count
<= 0) {
2457 bit
= ntohs(found
->vlan_id
) - OFDPA_INTERNAL_VLAN_ID_BASE
;
2458 clear_bit(bit
, ofdpa
->internal_vlan_bitmap
);
2459 hash_del(&found
->entry
);
2464 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, lock_flags
);
2467 /**********************************
2468 * Rocker world ops implementation
2469 **********************************/
2471 static int ofdpa_init(struct rocker
*rocker
)
2473 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2475 ofdpa
->rocker
= rocker
;
2477 hash_init(ofdpa
->flow_tbl
);
2478 spin_lock_init(&ofdpa
->flow_tbl_lock
);
2480 hash_init(ofdpa
->group_tbl
);
2481 spin_lock_init(&ofdpa
->group_tbl_lock
);
2483 hash_init(ofdpa
->fdb_tbl
);
2484 spin_lock_init(&ofdpa
->fdb_tbl_lock
);
2486 hash_init(ofdpa
->internal_vlan_tbl
);
2487 spin_lock_init(&ofdpa
->internal_vlan_tbl_lock
);
2489 hash_init(ofdpa
->neigh_tbl
);
2490 spin_lock_init(&ofdpa
->neigh_tbl_lock
);
2492 setup_timer(&ofdpa
->fdb_cleanup_timer
, ofdpa_fdb_cleanup
,
2493 (unsigned long) ofdpa
);
2494 mod_timer(&ofdpa
->fdb_cleanup_timer
, jiffies
);
2496 ofdpa
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
2501 static void ofdpa_fini(struct rocker
*rocker
)
2503 struct ofdpa
*ofdpa
= rocker
->wpriv
;
2505 unsigned long flags
;
2506 struct ofdpa_flow_tbl_entry
*flow_entry
;
2507 struct ofdpa_group_tbl_entry
*group_entry
;
2508 struct ofdpa_fdb_tbl_entry
*fdb_entry
;
2509 struct ofdpa_internal_vlan_tbl_entry
*internal_vlan_entry
;
2510 struct ofdpa_neigh_tbl_entry
*neigh_entry
;
2511 struct hlist_node
*tmp
;
2514 del_timer_sync(&ofdpa
->fdb_cleanup_timer
);
2516 spin_lock_irqsave(&ofdpa
->flow_tbl_lock
, flags
);
2517 hash_for_each_safe(ofdpa
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
)
2518 hash_del(&flow_entry
->entry
);
2519 spin_unlock_irqrestore(&ofdpa
->flow_tbl_lock
, flags
);
2521 spin_lock_irqsave(&ofdpa
->group_tbl_lock
, flags
);
2522 hash_for_each_safe(ofdpa
->group_tbl
, bkt
, tmp
, group_entry
, entry
)
2523 hash_del(&group_entry
->entry
);
2524 spin_unlock_irqrestore(&ofdpa
->group_tbl_lock
, flags
);
2526 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, flags
);
2527 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, fdb_entry
, entry
)
2528 hash_del(&fdb_entry
->entry
);
2529 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, flags
);
2531 spin_lock_irqsave(&ofdpa
->internal_vlan_tbl_lock
, flags
);
2532 hash_for_each_safe(ofdpa
->internal_vlan_tbl
, bkt
,
2533 tmp
, internal_vlan_entry
, entry
)
2534 hash_del(&internal_vlan_entry
->entry
);
2535 spin_unlock_irqrestore(&ofdpa
->internal_vlan_tbl_lock
, flags
);
2537 spin_lock_irqsave(&ofdpa
->neigh_tbl_lock
, flags
);
2538 hash_for_each_safe(ofdpa
->neigh_tbl
, bkt
, tmp
, neigh_entry
, entry
)
2539 hash_del(&neigh_entry
->entry
);
2540 spin_unlock_irqrestore(&ofdpa
->neigh_tbl_lock
, flags
);
2543 static int ofdpa_port_pre_init(struct rocker_port
*rocker_port
)
2545 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2547 ofdpa_port
->ofdpa
= rocker_port
->rocker
->wpriv
;
2548 ofdpa_port
->rocker_port
= rocker_port
;
2549 ofdpa_port
->dev
= rocker_port
->dev
;
2550 ofdpa_port
->pport
= rocker_port
->pport
;
2551 ofdpa_port
->brport_flags
= BR_LEARNING
| BR_LEARNING_SYNC
;
2552 ofdpa_port
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
2556 static int ofdpa_port_init(struct rocker_port
*rocker_port
)
2558 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2561 switchdev_port_fwd_mark_set(ofdpa_port
->dev
, NULL
, false);
2562 rocker_port_set_learning(rocker_port
,
2563 !!(ofdpa_port
->brport_flags
& BR_LEARNING
));
2565 err
= ofdpa_port_ig_tbl(ofdpa_port
, NULL
, 0);
2567 netdev_err(ofdpa_port
->dev
, "install ig port table failed\n");
2571 ofdpa_port
->internal_vlan_id
=
2572 ofdpa_port_internal_vlan_id_get(ofdpa_port
,
2573 ofdpa_port
->dev
->ifindex
);
2575 err
= ofdpa_port_vlan_add(ofdpa_port
, NULL
, OFDPA_UNTAGGED_VID
, 0);
2577 netdev_err(ofdpa_port
->dev
, "install untagged VLAN failed\n");
2578 goto err_untagged_vlan
;
2583 ofdpa_port_ig_tbl(ofdpa_port
, NULL
, OFDPA_OP_FLAG_REMOVE
);
2587 static void ofdpa_port_fini(struct rocker_port
*rocker_port
)
2589 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2591 ofdpa_port_ig_tbl(ofdpa_port
, NULL
, OFDPA_OP_FLAG_REMOVE
);
2594 static int ofdpa_port_open(struct rocker_port
*rocker_port
)
2596 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2598 return ofdpa_port_fwd_enable(ofdpa_port
, 0);
2601 static void ofdpa_port_stop(struct rocker_port
*rocker_port
)
2603 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2605 ofdpa_port_fwd_disable(ofdpa_port
, OFDPA_OP_FLAG_NOWAIT
);
2608 static int ofdpa_port_attr_stp_state_set(struct rocker_port
*rocker_port
,
2610 struct switchdev_trans
*trans
)
2612 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2614 return ofdpa_port_stp_update(ofdpa_port
, trans
, 0, state
);
2617 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port
*rocker_port
,
2618 unsigned long brport_flags
,
2619 struct switchdev_trans
*trans
)
2621 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2622 unsigned long orig_flags
;
2625 orig_flags
= ofdpa_port
->brport_flags
;
2626 ofdpa_port
->brport_flags
= brport_flags
;
2627 if ((orig_flags
^ ofdpa_port
->brport_flags
) & BR_LEARNING
&&
2628 !switchdev_trans_ph_prepare(trans
))
2629 err
= rocker_port_set_learning(ofdpa_port
->rocker_port
,
2630 !!(ofdpa_port
->brport_flags
& BR_LEARNING
));
2632 if (switchdev_trans_ph_prepare(trans
))
2633 ofdpa_port
->brport_flags
= orig_flags
;
2639 ofdpa_port_attr_bridge_flags_get(const struct rocker_port
*rocker_port
,
2640 unsigned long *p_brport_flags
)
2642 const struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2644 *p_brport_flags
= ofdpa_port
->brport_flags
;
2649 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port
*rocker_port
,
2651 struct switchdev_trans
*trans
)
2653 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2654 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2656 if (!switchdev_trans_ph_prepare(trans
)) {
2657 ofdpa_port
->ageing_time
= clock_t_to_jiffies(ageing_time
);
2658 if (ofdpa_port
->ageing_time
< ofdpa
->ageing_time
)
2659 ofdpa
->ageing_time
= ofdpa_port
->ageing_time
;
2660 mod_timer(&ofdpa_port
->ofdpa
->fdb_cleanup_timer
, jiffies
);
2666 static int ofdpa_port_obj_vlan_add(struct rocker_port
*rocker_port
,
2667 const struct switchdev_obj_port_vlan
*vlan
,
2668 struct switchdev_trans
*trans
)
2670 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2674 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
2675 err
= ofdpa_port_vlan_add(ofdpa_port
, trans
, vid
, vlan
->flags
);
2683 static int ofdpa_port_obj_vlan_del(struct rocker_port
*rocker_port
,
2684 const struct switchdev_obj_port_vlan
*vlan
)
2686 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2690 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
2691 err
= ofdpa_port_vlan_del(ofdpa_port
, vid
, vlan
->flags
);
2699 static int ofdpa_port_obj_vlan_dump(const struct rocker_port
*rocker_port
,
2700 struct switchdev_obj_port_vlan
*vlan
,
2701 switchdev_obj_dump_cb_t
*cb
)
2703 const struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2707 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
2708 if (!test_bit(vid
, ofdpa_port
->vlan_bitmap
))
2711 if (ofdpa_vlan_id_is_internal(htons(vid
)))
2712 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
2713 vlan
->vid_begin
= vlan
->vid_end
= vid
;
2714 err
= cb(&vlan
->obj
);
2722 static int ofdpa_port_obj_fib4_add(struct rocker_port
*rocker_port
,
2723 const struct switchdev_obj_ipv4_fib
*fib4
,
2724 struct switchdev_trans
*trans
)
2726 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2728 return ofdpa_port_fib_ipv4(ofdpa_port
, trans
,
2729 htonl(fib4
->dst
), fib4
->dst_len
,
2730 fib4
->fi
, fib4
->tb_id
, 0);
2733 static int ofdpa_port_obj_fib4_del(struct rocker_port
*rocker_port
,
2734 const struct switchdev_obj_ipv4_fib
*fib4
)
2736 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2738 return ofdpa_port_fib_ipv4(ofdpa_port
, NULL
,
2739 htonl(fib4
->dst
), fib4
->dst_len
,
2740 fib4
->fi
, fib4
->tb_id
,
2741 OFDPA_OP_FLAG_REMOVE
);
2744 static int ofdpa_port_obj_fdb_add(struct rocker_port
*rocker_port
,
2745 const struct switchdev_obj_port_fdb
*fdb
,
2746 struct switchdev_trans
*trans
)
2748 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2749 __be16 vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, fdb
->vid
, NULL
);
2751 if (!ofdpa_port_is_bridged(ofdpa_port
))
2754 return ofdpa_port_fdb(ofdpa_port
, trans
, fdb
->addr
, vlan_id
, 0);
2757 static int ofdpa_port_obj_fdb_del(struct rocker_port
*rocker_port
,
2758 const struct switchdev_obj_port_fdb
*fdb
)
2760 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2761 __be16 vlan_id
= ofdpa_port_vid_to_vlan(ofdpa_port
, fdb
->vid
, NULL
);
2762 int flags
= OFDPA_OP_FLAG_REMOVE
;
2764 if (!ofdpa_port_is_bridged(ofdpa_port
))
2767 return ofdpa_port_fdb(ofdpa_port
, NULL
, fdb
->addr
, vlan_id
, flags
);
2770 static int ofdpa_port_obj_fdb_dump(const struct rocker_port
*rocker_port
,
2771 struct switchdev_obj_port_fdb
*fdb
,
2772 switchdev_obj_dump_cb_t
*cb
)
2774 const struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2775 struct ofdpa
*ofdpa
= ofdpa_port
->ofdpa
;
2776 struct ofdpa_fdb_tbl_entry
*found
;
2777 struct hlist_node
*tmp
;
2778 unsigned long lock_flags
;
2782 spin_lock_irqsave(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2783 hash_for_each_safe(ofdpa
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
2784 if (found
->key
.ofdpa_port
!= ofdpa_port
)
2786 ether_addr_copy(fdb
->addr
, found
->key
.addr
);
2787 fdb
->ndm_state
= NUD_REACHABLE
;
2788 fdb
->vid
= ofdpa_port_vlan_to_vid(ofdpa_port
,
2789 found
->key
.vlan_id
);
2790 err
= cb(&fdb
->obj
);
2794 spin_unlock_irqrestore(&ofdpa
->fdb_tbl_lock
, lock_flags
);
2799 static int ofdpa_port_bridge_join(struct ofdpa_port
*ofdpa_port
,
2800 struct net_device
*bridge
)
2804 /* Port is joining bridge, so the internal VLAN for the
2805 * port is going to change to the bridge internal VLAN.
2806 * Let's remove untagged VLAN (vid=0) from port and
2807 * re-add once internal VLAN has changed.
2810 err
= ofdpa_port_vlan_del(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2814 ofdpa_port_internal_vlan_id_put(ofdpa_port
,
2815 ofdpa_port
->dev
->ifindex
);
2816 ofdpa_port
->internal_vlan_id
=
2817 ofdpa_port_internal_vlan_id_get(ofdpa_port
, bridge
->ifindex
);
2819 ofdpa_port
->bridge_dev
= bridge
;
2820 switchdev_port_fwd_mark_set(ofdpa_port
->dev
, bridge
, true);
2822 return ofdpa_port_vlan_add(ofdpa_port
, NULL
, OFDPA_UNTAGGED_VID
, 0);
2825 static int ofdpa_port_bridge_leave(struct ofdpa_port
*ofdpa_port
)
2829 err
= ofdpa_port_vlan_del(ofdpa_port
, OFDPA_UNTAGGED_VID
, 0);
2833 ofdpa_port_internal_vlan_id_put(ofdpa_port
,
2834 ofdpa_port
->bridge_dev
->ifindex
);
2835 ofdpa_port
->internal_vlan_id
=
2836 ofdpa_port_internal_vlan_id_get(ofdpa_port
,
2837 ofdpa_port
->dev
->ifindex
);
2839 switchdev_port_fwd_mark_set(ofdpa_port
->dev
, ofdpa_port
->bridge_dev
,
2841 ofdpa_port
->bridge_dev
= NULL
;
2843 err
= ofdpa_port_vlan_add(ofdpa_port
, NULL
, OFDPA_UNTAGGED_VID
, 0);
2847 if (ofdpa_port
->dev
->flags
& IFF_UP
)
2848 err
= ofdpa_port_fwd_enable(ofdpa_port
, 0);
2853 static int ofdpa_port_ovs_changed(struct ofdpa_port
*ofdpa_port
,
2854 struct net_device
*master
)
2858 ofdpa_port
->bridge_dev
= master
;
2860 err
= ofdpa_port_fwd_disable(ofdpa_port
, 0);
2863 err
= ofdpa_port_fwd_enable(ofdpa_port
, 0);
2868 static int ofdpa_port_master_linked(struct rocker_port
*rocker_port
,
2869 struct net_device
*master
)
2871 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2874 if (netif_is_bridge_master(master
))
2875 err
= ofdpa_port_bridge_join(ofdpa_port
, master
);
2876 else if (netif_is_ovs_master(master
))
2877 err
= ofdpa_port_ovs_changed(ofdpa_port
, master
);
2881 static int ofdpa_port_master_unlinked(struct rocker_port
*rocker_port
,
2882 struct net_device
*master
)
2884 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2887 if (ofdpa_port_is_bridged(ofdpa_port
))
2888 err
= ofdpa_port_bridge_leave(ofdpa_port
);
2889 else if (ofdpa_port_is_ovsed(ofdpa_port
))
2890 err
= ofdpa_port_ovs_changed(ofdpa_port
, NULL
);
2894 static int ofdpa_port_neigh_update(struct rocker_port
*rocker_port
,
2895 struct neighbour
*n
)
2897 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2898 int flags
= (n
->nud_state
& NUD_VALID
? 0 : OFDPA_OP_FLAG_REMOVE
) |
2899 OFDPA_OP_FLAG_NOWAIT
;
2900 __be32 ip_addr
= *(__be32
*) n
->primary_key
;
2902 return ofdpa_port_ipv4_neigh(ofdpa_port
, NULL
, flags
, ip_addr
, n
->ha
);
2905 static int ofdpa_port_neigh_destroy(struct rocker_port
*rocker_port
,
2906 struct neighbour
*n
)
2908 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2909 int flags
= OFDPA_OP_FLAG_REMOVE
| OFDPA_OP_FLAG_NOWAIT
;
2910 __be32 ip_addr
= *(__be32
*) n
->primary_key
;
2912 return ofdpa_port_ipv4_neigh(ofdpa_port
, NULL
, flags
, ip_addr
, n
->ha
);
2915 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port
*rocker_port
,
2916 const unsigned char *addr
,
2919 struct ofdpa_port
*ofdpa_port
= rocker_port
->wpriv
;
2920 int flags
= OFDPA_OP_FLAG_NOWAIT
| OFDPA_OP_FLAG_LEARNED
;
2922 if (ofdpa_port
->stp_state
!= BR_STATE_LEARNING
&&
2923 ofdpa_port
->stp_state
!= BR_STATE_FORWARDING
)
2926 return ofdpa_port_fdb(ofdpa_port
, NULL
, addr
, vlan_id
, flags
);
2929 struct rocker_world_ops rocker_ofdpa_ops
= {
2931 .priv_size
= sizeof(struct ofdpa
),
2932 .port_priv_size
= sizeof(struct ofdpa_port
),
2933 .mode
= ROCKER_PORT_MODE_OF_DPA
,
2936 .port_pre_init
= ofdpa_port_pre_init
,
2937 .port_init
= ofdpa_port_init
,
2938 .port_fini
= ofdpa_port_fini
,
2939 .port_open
= ofdpa_port_open
,
2940 .port_stop
= ofdpa_port_stop
,
2941 .port_attr_stp_state_set
= ofdpa_port_attr_stp_state_set
,
2942 .port_attr_bridge_flags_set
= ofdpa_port_attr_bridge_flags_set
,
2943 .port_attr_bridge_flags_get
= ofdpa_port_attr_bridge_flags_get
,
2944 .port_attr_bridge_ageing_time_set
= ofdpa_port_attr_bridge_ageing_time_set
,
2945 .port_obj_vlan_add
= ofdpa_port_obj_vlan_add
,
2946 .port_obj_vlan_del
= ofdpa_port_obj_vlan_del
,
2947 .port_obj_vlan_dump
= ofdpa_port_obj_vlan_dump
,
2948 .port_obj_fib4_add
= ofdpa_port_obj_fib4_add
,
2949 .port_obj_fib4_del
= ofdpa_port_obj_fib4_del
,
2950 .port_obj_fdb_add
= ofdpa_port_obj_fdb_add
,
2951 .port_obj_fdb_del
= ofdpa_port_obj_fdb_del
,
2952 .port_obj_fdb_dump
= ofdpa_port_obj_fdb_dump
,
2953 .port_master_linked
= ofdpa_port_master_linked
,
2954 .port_master_unlinked
= ofdpa_port_master_unlinked
,
2955 .port_neigh_update
= ofdpa_port_neigh_update
,
2956 .port_neigh_destroy
= ofdpa_port_neigh_destroy
,
2957 .port_ev_mac_vlan_seen
= ofdpa_port_ev_mac_vlan_seen
,