2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
44 static const char rocker_driver_name
[] = "rocker";
46 static const struct pci_device_id rocker_pci_id_table
[] = {
47 {PCI_VDEVICE(REDHAT
, PCI_DEVICE_ID_REDHAT_ROCKER
), 0},
51 struct rocker_flow_tbl_key
{
53 enum rocker_of_dpa_table_id tbl_id
;
58 enum rocker_of_dpa_table_id goto_tbl
;
64 enum rocker_of_dpa_table_id goto_tbl
;
73 u8 eth_dst_mask
[ETH_ALEN
];
76 enum rocker_of_dpa_table_id goto_tbl
;
83 enum rocker_of_dpa_table_id goto_tbl
;
88 u8 eth_dst_mask
[ETH_ALEN
];
93 enum rocker_of_dpa_table_id goto_tbl
;
100 u8 eth_src
[ETH_ALEN
];
101 u8 eth_src_mask
[ETH_ALEN
];
102 u8 eth_dst
[ETH_ALEN
];
103 u8 eth_dst_mask
[ETH_ALEN
];
116 struct rocker_flow_tbl_entry
{
117 struct hlist_node entry
;
120 struct rocker_flow_tbl_key key
;
122 u32 key_crc32
; /* key */
125 struct rocker_group_tbl_entry
{
126 struct hlist_node entry
;
128 u32 group_id
; /* key */
136 u8 eth_src
[ETH_ALEN
];
137 u8 eth_dst
[ETH_ALEN
];
142 u8 eth_src
[ETH_ALEN
];
143 u8 eth_dst
[ETH_ALEN
];
151 struct rocker_fdb_tbl_entry
{
152 struct hlist_node entry
;
153 u32 key_crc32
; /* key */
155 unsigned long touched
;
156 struct rocker_fdb_tbl_key
{
157 struct rocker_port
*rocker_port
;
163 struct rocker_internal_vlan_tbl_entry
{
164 struct hlist_node entry
;
165 int ifindex
; /* key */
170 struct rocker_neigh_tbl_entry
{
171 struct hlist_node entry
;
172 __be32 ip_addr
; /* key */
173 struct net_device
*dev
;
176 u8 eth_dst
[ETH_ALEN
];
180 struct rocker_desc_info
{
181 char *data
; /* mapped */
184 struct rocker_desc
*desc
;
188 struct rocker_dma_ring_info
{
192 struct rocker_desc
*desc
; /* mapped */
194 struct rocker_desc_info
*desc_info
;
201 ROCKER_CTRL_LINK_LOCAL_MCAST
,
202 ROCKER_CTRL_LOCAL_ARP
,
203 ROCKER_CTRL_IPV4_MCAST
,
204 ROCKER_CTRL_IPV6_MCAST
,
205 ROCKER_CTRL_DFLT_BRIDGING
,
206 ROCKER_CTRL_DFLT_OVS
,
210 #define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
211 #define ROCKER_N_INTERNAL_VLANS 255
212 #define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
213 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
216 struct net_device
*dev
;
217 struct net_device
*bridge_dev
;
218 struct rocker
*rocker
;
219 unsigned int port_number
;
221 __be16 internal_vlan_id
;
224 unsigned long ageing_time
;
225 bool ctrls
[ROCKER_CTRL_MAX
];
226 unsigned long vlan_bitmap
[ROCKER_VLAN_BITMAP_LEN
];
227 struct napi_struct napi_tx
;
228 struct napi_struct napi_rx
;
229 struct rocker_dma_ring_info tx_ring
;
230 struct rocker_dma_ring_info rx_ring
;
234 struct pci_dev
*pdev
;
236 struct msix_entry
*msix_entries
;
237 unsigned int port_count
;
238 struct rocker_port
**ports
;
242 spinlock_t cmd_ring_lock
; /* for cmd ring accesses */
243 struct rocker_dma_ring_info cmd_ring
;
244 struct rocker_dma_ring_info event_ring
;
245 DECLARE_HASHTABLE(flow_tbl
, 16);
246 spinlock_t flow_tbl_lock
; /* for flow tbl accesses */
247 u64 flow_tbl_next_cookie
;
248 DECLARE_HASHTABLE(group_tbl
, 16);
249 spinlock_t group_tbl_lock
; /* for group tbl accesses */
250 struct timer_list fdb_cleanup_timer
;
251 DECLARE_HASHTABLE(fdb_tbl
, 16);
252 spinlock_t fdb_tbl_lock
; /* for fdb tbl accesses */
253 unsigned long internal_vlan_bitmap
[ROCKER_INTERNAL_VLAN_BITMAP_LEN
];
254 DECLARE_HASHTABLE(internal_vlan_tbl
, 8);
255 spinlock_t internal_vlan_tbl_lock
; /* for vlan tbl accesses */
256 DECLARE_HASHTABLE(neigh_tbl
, 16);
257 spinlock_t neigh_tbl_lock
; /* for neigh tbl accesses */
258 u32 neigh_tbl_next_index
;
261 static const u8 zero_mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
262 static const u8 ff_mac
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
263 static const u8 ll_mac
[ETH_ALEN
] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
264 static const u8 ll_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
265 static const u8 mcast_mac
[ETH_ALEN
] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
266 static const u8 ipv4_mcast
[ETH_ALEN
] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
267 static const u8 ipv4_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
268 static const u8 ipv6_mcast
[ETH_ALEN
] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
269 static const u8 ipv6_mask
[ETH_ALEN
] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
271 /* Rocker priority levels for flow table entries. Higher
272 * priority match takes precedence over lower priority match.
276 ROCKER_PRIORITY_UNKNOWN
= 0,
277 ROCKER_PRIORITY_IG_PORT
= 1,
278 ROCKER_PRIORITY_VLAN
= 1,
279 ROCKER_PRIORITY_TERM_MAC_UCAST
= 0,
280 ROCKER_PRIORITY_TERM_MAC_MCAST
= 1,
281 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
= 1,
282 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD
= 2,
283 ROCKER_PRIORITY_BRIDGING_VLAN
= 3,
284 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
= 1,
285 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD
= 2,
286 ROCKER_PRIORITY_BRIDGING_TENANT
= 3,
287 ROCKER_PRIORITY_ACL_CTRL
= 3,
288 ROCKER_PRIORITY_ACL_NORMAL
= 2,
289 ROCKER_PRIORITY_ACL_DFLT
= 1,
292 static bool rocker_vlan_id_is_internal(__be16 vlan_id
)
294 u16 start
= ROCKER_INTERNAL_VLAN_ID_BASE
;
296 u16 _vlan_id
= ntohs(vlan_id
);
298 return (_vlan_id
>= start
&& _vlan_id
<= end
);
301 static __be16
rocker_port_vid_to_vlan(const struct rocker_port
*rocker_port
,
302 u16 vid
, bool *pop_vlan
)
308 vlan_id
= htons(vid
);
310 vlan_id
= rocker_port
->internal_vlan_id
;
318 static u16
rocker_port_vlan_to_vid(const struct rocker_port
*rocker_port
,
321 if (rocker_vlan_id_is_internal(vlan_id
))
324 return ntohs(vlan_id
);
327 static bool rocker_port_is_bridged(const struct rocker_port
*rocker_port
)
329 return rocker_port
->bridge_dev
&&
330 netif_is_bridge_master(rocker_port
->bridge_dev
);
333 static bool rocker_port_is_ovsed(const struct rocker_port
*rocker_port
)
335 return rocker_port
->bridge_dev
&&
336 netif_is_ovs_master(rocker_port
->bridge_dev
);
339 #define ROCKER_OP_FLAG_REMOVE BIT(0)
340 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
341 #define ROCKER_OP_FLAG_LEARNED BIT(2)
342 #define ROCKER_OP_FLAG_REFRESH BIT(3)
344 static void *__rocker_port_mem_alloc(struct rocker_port
*rocker_port
,
345 struct switchdev_trans
*trans
, int flags
,
348 struct switchdev_trans_item
*elem
= NULL
;
349 gfp_t gfp_flags
= (flags
& ROCKER_OP_FLAG_NOWAIT
) ?
350 GFP_ATOMIC
: GFP_KERNEL
;
352 /* If in transaction prepare phase, allocate the memory
353 * and enqueue it on a transaction. If in transaction
354 * commit phase, dequeue the memory from the transaction
355 * rather than re-allocating the memory. The idea is the
356 * driver code paths for prepare and commit are identical
357 * so the memory allocated in the prepare phase is the
358 * memory used in the commit phase.
362 elem
= kzalloc(size
+ sizeof(*elem
), gfp_flags
);
363 } else if (switchdev_trans_ph_prepare(trans
)) {
364 elem
= kzalloc(size
+ sizeof(*elem
), gfp_flags
);
367 switchdev_trans_item_enqueue(trans
, elem
, kfree
, elem
);
369 elem
= switchdev_trans_item_dequeue(trans
);
372 return elem
? elem
+ 1 : NULL
;
375 static void *rocker_port_kzalloc(struct rocker_port
*rocker_port
,
376 struct switchdev_trans
*trans
, int flags
,
379 return __rocker_port_mem_alloc(rocker_port
, trans
, flags
, size
);
382 static void *rocker_port_kcalloc(struct rocker_port
*rocker_port
,
383 struct switchdev_trans
*trans
, int flags
,
384 size_t n
, size_t size
)
386 return __rocker_port_mem_alloc(rocker_port
, trans
, flags
, n
* size
);
389 static void rocker_port_kfree(struct switchdev_trans
*trans
, const void *mem
)
391 struct switchdev_trans_item
*elem
;
393 /* Frees are ignored if in transaction prepare phase. The
394 * memory remains on the per-port list until freed in the
398 if (switchdev_trans_ph_prepare(trans
))
401 elem
= (struct switchdev_trans_item
*) mem
- 1;
406 wait_queue_head_t wait
;
411 static void rocker_wait_reset(struct rocker_wait
*wait
)
414 wait
->nowait
= false;
417 static void rocker_wait_init(struct rocker_wait
*wait
)
419 init_waitqueue_head(&wait
->wait
);
420 rocker_wait_reset(wait
);
423 static struct rocker_wait
*rocker_wait_create(struct rocker_port
*rocker_port
,
424 struct switchdev_trans
*trans
,
427 struct rocker_wait
*wait
;
429 wait
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*wait
));
432 rocker_wait_init(wait
);
436 static void rocker_wait_destroy(struct switchdev_trans
*trans
,
437 struct rocker_wait
*wait
)
439 rocker_port_kfree(trans
, wait
);
442 static bool rocker_wait_event_timeout(struct rocker_wait
*wait
,
443 unsigned long timeout
)
445 wait_event_timeout(wait
->wait
, wait
->done
, HZ
/ 10);
451 static void rocker_wait_wake_up(struct rocker_wait
*wait
)
454 wake_up(&wait
->wait
);
457 static u32
rocker_msix_vector(const struct rocker
*rocker
, unsigned int vector
)
459 return rocker
->msix_entries
[vector
].vector
;
462 static u32
rocker_msix_tx_vector(const struct rocker_port
*rocker_port
)
464 return rocker_msix_vector(rocker_port
->rocker
,
465 ROCKER_MSIX_VEC_TX(rocker_port
->port_number
));
468 static u32
rocker_msix_rx_vector(const struct rocker_port
*rocker_port
)
470 return rocker_msix_vector(rocker_port
->rocker
,
471 ROCKER_MSIX_VEC_RX(rocker_port
->port_number
));
474 #define rocker_write32(rocker, reg, val) \
475 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
476 #define rocker_read32(rocker, reg) \
477 readl((rocker)->hw_addr + (ROCKER_ ## reg))
478 #define rocker_write64(rocker, reg, val) \
479 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
480 #define rocker_read64(rocker, reg) \
481 readq((rocker)->hw_addr + (ROCKER_ ## reg))
483 /*****************************
484 * HW basic testing functions
485 *****************************/
487 static int rocker_reg_test(const struct rocker
*rocker
)
489 const struct pci_dev
*pdev
= rocker
->pdev
;
495 rocker_write32(rocker
, TEST_REG
, rnd
);
496 test_reg
= rocker_read32(rocker
, TEST_REG
);
497 if (test_reg
!= rnd
* 2) {
498 dev_err(&pdev
->dev
, "unexpected 32bit register value %08llx, expected %08llx\n",
505 rnd
|= prandom_u32();
506 rocker_write64(rocker
, TEST_REG64
, rnd
);
507 test_reg
= rocker_read64(rocker
, TEST_REG64
);
508 if (test_reg
!= rnd
* 2) {
509 dev_err(&pdev
->dev
, "unexpected 64bit register value %16llx, expected %16llx\n",
517 static int rocker_dma_test_one(const struct rocker
*rocker
,
518 struct rocker_wait
*wait
, u32 test_type
,
519 dma_addr_t dma_handle
, const unsigned char *buf
,
520 const unsigned char *expect
, size_t size
)
522 const struct pci_dev
*pdev
= rocker
->pdev
;
525 rocker_wait_reset(wait
);
526 rocker_write32(rocker
, TEST_DMA_CTRL
, test_type
);
528 if (!rocker_wait_event_timeout(wait
, HZ
/ 10)) {
529 dev_err(&pdev
->dev
, "no interrupt received within a timeout\n");
533 for (i
= 0; i
< size
; i
++) {
534 if (buf
[i
] != expect
[i
]) {
535 dev_err(&pdev
->dev
, "unexpected memory content %02x at byte %x\n, %02x expected",
536 buf
[i
], i
, expect
[i
]);
543 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
544 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
546 static int rocker_dma_test_offset(const struct rocker
*rocker
,
547 struct rocker_wait
*wait
, int offset
)
549 struct pci_dev
*pdev
= rocker
->pdev
;
550 unsigned char *alloc
;
552 unsigned char *expect
;
553 dma_addr_t dma_handle
;
557 alloc
= kzalloc(ROCKER_TEST_DMA_BUF_SIZE
* 2 + offset
,
558 GFP_KERNEL
| GFP_DMA
);
561 buf
= alloc
+ offset
;
562 expect
= buf
+ ROCKER_TEST_DMA_BUF_SIZE
;
564 dma_handle
= pci_map_single(pdev
, buf
, ROCKER_TEST_DMA_BUF_SIZE
,
565 PCI_DMA_BIDIRECTIONAL
);
566 if (pci_dma_mapping_error(pdev
, dma_handle
)) {
571 rocker_write64(rocker
, TEST_DMA_ADDR
, dma_handle
);
572 rocker_write32(rocker
, TEST_DMA_SIZE
, ROCKER_TEST_DMA_BUF_SIZE
);
574 memset(expect
, ROCKER_TEST_DMA_FILL_PATTERN
, ROCKER_TEST_DMA_BUF_SIZE
);
575 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_FILL
,
576 dma_handle
, buf
, expect
,
577 ROCKER_TEST_DMA_BUF_SIZE
);
581 memset(expect
, 0, ROCKER_TEST_DMA_BUF_SIZE
);
582 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_CLEAR
,
583 dma_handle
, buf
, expect
,
584 ROCKER_TEST_DMA_BUF_SIZE
);
588 prandom_bytes(buf
, ROCKER_TEST_DMA_BUF_SIZE
);
589 for (i
= 0; i
< ROCKER_TEST_DMA_BUF_SIZE
; i
++)
591 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_INVERT
,
592 dma_handle
, buf
, expect
,
593 ROCKER_TEST_DMA_BUF_SIZE
);
598 pci_unmap_single(pdev
, dma_handle
, ROCKER_TEST_DMA_BUF_SIZE
,
599 PCI_DMA_BIDIRECTIONAL
);
606 static int rocker_dma_test(const struct rocker
*rocker
,
607 struct rocker_wait
*wait
)
612 for (i
= 0; i
< 8; i
++) {
613 err
= rocker_dma_test_offset(rocker
, wait
, i
);
620 static irqreturn_t
rocker_test_irq_handler(int irq
, void *dev_id
)
622 struct rocker_wait
*wait
= dev_id
;
624 rocker_wait_wake_up(wait
);
629 static int rocker_basic_hw_test(const struct rocker
*rocker
)
631 const struct pci_dev
*pdev
= rocker
->pdev
;
632 struct rocker_wait wait
;
635 err
= rocker_reg_test(rocker
);
637 dev_err(&pdev
->dev
, "reg test failed\n");
641 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_TEST
),
642 rocker_test_irq_handler
, 0,
643 rocker_driver_name
, &wait
);
645 dev_err(&pdev
->dev
, "cannot assign test irq\n");
649 rocker_wait_init(&wait
);
650 rocker_write32(rocker
, TEST_IRQ
, ROCKER_MSIX_VEC_TEST
);
652 if (!rocker_wait_event_timeout(&wait
, HZ
/ 10)) {
653 dev_err(&pdev
->dev
, "no interrupt received within a timeout\n");
658 err
= rocker_dma_test(rocker
, &wait
);
660 dev_err(&pdev
->dev
, "dma test failed\n");
663 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_TEST
), &wait
);
671 #define ROCKER_TLV_ALIGNTO 8U
672 #define ROCKER_TLV_ALIGN(len) \
673 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
674 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
676 /* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
677 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
678 * | Header | Pad | Payload | Pad |
679 * | (struct rocker_tlv) | ing | | ing |
680 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
681 * <--------------------------- tlv->len -------------------------->
684 static struct rocker_tlv
*rocker_tlv_next(const struct rocker_tlv
*tlv
,
687 int totlen
= ROCKER_TLV_ALIGN(tlv
->len
);
689 *remaining
-= totlen
;
690 return (struct rocker_tlv
*) ((char *) tlv
+ totlen
);
693 static int rocker_tlv_ok(const struct rocker_tlv
*tlv
, int remaining
)
695 return remaining
>= (int) ROCKER_TLV_HDRLEN
&&
696 tlv
->len
>= ROCKER_TLV_HDRLEN
&&
697 tlv
->len
<= remaining
;
700 #define rocker_tlv_for_each(pos, head, len, rem) \
701 for (pos = head, rem = len; \
702 rocker_tlv_ok(pos, rem); \
703 pos = rocker_tlv_next(pos, &(rem)))
705 #define rocker_tlv_for_each_nested(pos, tlv, rem) \
706 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
707 rocker_tlv_len(tlv), rem)
709 static int rocker_tlv_attr_size(int payload
)
711 return ROCKER_TLV_HDRLEN
+ payload
;
714 static int rocker_tlv_total_size(int payload
)
716 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload
));
719 static int rocker_tlv_padlen(int payload
)
721 return rocker_tlv_total_size(payload
) - rocker_tlv_attr_size(payload
);
724 static int rocker_tlv_type(const struct rocker_tlv
*tlv
)
729 static void *rocker_tlv_data(const struct rocker_tlv
*tlv
)
731 return (char *) tlv
+ ROCKER_TLV_HDRLEN
;
734 static int rocker_tlv_len(const struct rocker_tlv
*tlv
)
736 return tlv
->len
- ROCKER_TLV_HDRLEN
;
739 static u8
rocker_tlv_get_u8(const struct rocker_tlv
*tlv
)
741 return *(u8
*) rocker_tlv_data(tlv
);
744 static u16
rocker_tlv_get_u16(const struct rocker_tlv
*tlv
)
746 return *(u16
*) rocker_tlv_data(tlv
);
749 static __be16
rocker_tlv_get_be16(const struct rocker_tlv
*tlv
)
751 return *(__be16
*) rocker_tlv_data(tlv
);
754 static u32
rocker_tlv_get_u32(const struct rocker_tlv
*tlv
)
756 return *(u32
*) rocker_tlv_data(tlv
);
759 static u64
rocker_tlv_get_u64(const struct rocker_tlv
*tlv
)
761 return *(u64
*) rocker_tlv_data(tlv
);
764 static void rocker_tlv_parse(const struct rocker_tlv
**tb
, int maxtype
,
765 const char *buf
, int buf_len
)
767 const struct rocker_tlv
*tlv
;
768 const struct rocker_tlv
*head
= (const struct rocker_tlv
*) buf
;
771 memset(tb
, 0, sizeof(struct rocker_tlv
*) * (maxtype
+ 1));
773 rocker_tlv_for_each(tlv
, head
, buf_len
, rem
) {
774 u32 type
= rocker_tlv_type(tlv
);
776 if (type
> 0 && type
<= maxtype
)
781 static void rocker_tlv_parse_nested(const struct rocker_tlv
**tb
, int maxtype
,
782 const struct rocker_tlv
*tlv
)
784 rocker_tlv_parse(tb
, maxtype
, rocker_tlv_data(tlv
),
785 rocker_tlv_len(tlv
));
788 static void rocker_tlv_parse_desc(const struct rocker_tlv
**tb
, int maxtype
,
789 const struct rocker_desc_info
*desc_info
)
791 rocker_tlv_parse(tb
, maxtype
, desc_info
->data
,
792 desc_info
->desc
->tlv_size
);
795 static struct rocker_tlv
*rocker_tlv_start(struct rocker_desc_info
*desc_info
)
797 return (struct rocker_tlv
*) ((char *) desc_info
->data
+
798 desc_info
->tlv_size
);
801 static int rocker_tlv_put(struct rocker_desc_info
*desc_info
,
802 int attrtype
, int attrlen
, const void *data
)
804 int tail_room
= desc_info
->data_size
- desc_info
->tlv_size
;
805 int total_size
= rocker_tlv_total_size(attrlen
);
806 struct rocker_tlv
*tlv
;
808 if (unlikely(tail_room
< total_size
))
811 tlv
= rocker_tlv_start(desc_info
);
812 desc_info
->tlv_size
+= total_size
;
813 tlv
->type
= attrtype
;
814 tlv
->len
= rocker_tlv_attr_size(attrlen
);
815 memcpy(rocker_tlv_data(tlv
), data
, attrlen
);
816 memset((char *) tlv
+ tlv
->len
, 0, rocker_tlv_padlen(attrlen
));
820 static int rocker_tlv_put_u8(struct rocker_desc_info
*desc_info
,
821 int attrtype
, u8 value
)
823 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u8
), &value
);
826 static int rocker_tlv_put_u16(struct rocker_desc_info
*desc_info
,
827 int attrtype
, u16 value
)
829 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u16
), &value
);
832 static int rocker_tlv_put_be16(struct rocker_desc_info
*desc_info
,
833 int attrtype
, __be16 value
)
835 return rocker_tlv_put(desc_info
, attrtype
, sizeof(__be16
), &value
);
838 static int rocker_tlv_put_u32(struct rocker_desc_info
*desc_info
,
839 int attrtype
, u32 value
)
841 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u32
), &value
);
844 static int rocker_tlv_put_be32(struct rocker_desc_info
*desc_info
,
845 int attrtype
, __be32 value
)
847 return rocker_tlv_put(desc_info
, attrtype
, sizeof(__be32
), &value
);
850 static int rocker_tlv_put_u64(struct rocker_desc_info
*desc_info
,
851 int attrtype
, u64 value
)
853 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u64
), &value
);
856 static struct rocker_tlv
*
857 rocker_tlv_nest_start(struct rocker_desc_info
*desc_info
, int attrtype
)
859 struct rocker_tlv
*start
= rocker_tlv_start(desc_info
);
861 if (rocker_tlv_put(desc_info
, attrtype
, 0, NULL
) < 0)
867 static void rocker_tlv_nest_end(struct rocker_desc_info
*desc_info
,
868 struct rocker_tlv
*start
)
870 start
->len
= (char *) rocker_tlv_start(desc_info
) - (char *) start
;
873 static void rocker_tlv_nest_cancel(struct rocker_desc_info
*desc_info
,
874 const struct rocker_tlv
*start
)
876 desc_info
->tlv_size
= (const char *) start
- desc_info
->data
;
879 /******************************************
880 * DMA rings and descriptors manipulations
881 ******************************************/
883 static u32
__pos_inc(u32 pos
, size_t limit
)
885 return ++pos
== limit
? 0 : pos
;
888 static int rocker_desc_err(const struct rocker_desc_info
*desc_info
)
890 int err
= desc_info
->desc
->comp_err
& ~ROCKER_DMA_DESC_COMP_ERR_GEN
;
905 case -ROCKER_EMSGSIZE
:
907 case -ROCKER_ENOTSUP
:
909 case -ROCKER_ENOBUFS
:
916 static void rocker_desc_gen_clear(const struct rocker_desc_info
*desc_info
)
918 desc_info
->desc
->comp_err
&= ~ROCKER_DMA_DESC_COMP_ERR_GEN
;
921 static bool rocker_desc_gen(const struct rocker_desc_info
*desc_info
)
923 u32 comp_err
= desc_info
->desc
->comp_err
;
925 return comp_err
& ROCKER_DMA_DESC_COMP_ERR_GEN
? true : false;
928 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info
*desc_info
)
930 return (void *)(uintptr_t)desc_info
->desc
->cookie
;
933 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info
*desc_info
,
936 desc_info
->desc
->cookie
= (uintptr_t) ptr
;
939 static struct rocker_desc_info
*
940 rocker_desc_head_get(const struct rocker_dma_ring_info
*info
)
942 static struct rocker_desc_info
*desc_info
;
943 u32 head
= __pos_inc(info
->head
, info
->size
);
945 desc_info
= &info
->desc_info
[info
->head
];
946 if (head
== info
->tail
)
947 return NULL
; /* ring full */
948 desc_info
->tlv_size
= 0;
952 static void rocker_desc_commit(const struct rocker_desc_info
*desc_info
)
954 desc_info
->desc
->buf_size
= desc_info
->data_size
;
955 desc_info
->desc
->tlv_size
= desc_info
->tlv_size
;
958 static void rocker_desc_head_set(const struct rocker
*rocker
,
959 struct rocker_dma_ring_info
*info
,
960 const struct rocker_desc_info
*desc_info
)
962 u32 head
= __pos_inc(info
->head
, info
->size
);
964 BUG_ON(head
== info
->tail
);
965 rocker_desc_commit(desc_info
);
967 rocker_write32(rocker
, DMA_DESC_HEAD(info
->type
), head
);
970 static struct rocker_desc_info
*
971 rocker_desc_tail_get(struct rocker_dma_ring_info
*info
)
973 static struct rocker_desc_info
*desc_info
;
975 if (info
->tail
== info
->head
)
976 return NULL
; /* nothing to be done between head and tail */
977 desc_info
= &info
->desc_info
[info
->tail
];
978 if (!rocker_desc_gen(desc_info
))
979 return NULL
; /* gen bit not set, desc is not ready yet */
980 info
->tail
= __pos_inc(info
->tail
, info
->size
);
981 desc_info
->tlv_size
= desc_info
->desc
->tlv_size
;
985 static void rocker_dma_ring_credits_set(const struct rocker
*rocker
,
986 const struct rocker_dma_ring_info
*info
,
990 rocker_write32(rocker
, DMA_DESC_CREDITS(info
->type
), credits
);
993 static unsigned long rocker_dma_ring_size_fix(size_t size
)
995 return max(ROCKER_DMA_SIZE_MIN
,
996 min(roundup_pow_of_two(size
), ROCKER_DMA_SIZE_MAX
));
999 static int rocker_dma_ring_create(const struct rocker
*rocker
,
1002 struct rocker_dma_ring_info
*info
)
1006 BUG_ON(size
!= rocker_dma_ring_size_fix(size
));
1011 info
->desc_info
= kcalloc(info
->size
, sizeof(*info
->desc_info
),
1013 if (!info
->desc_info
)
1016 info
->desc
= pci_alloc_consistent(rocker
->pdev
,
1017 info
->size
* sizeof(*info
->desc
),
1020 kfree(info
->desc_info
);
1024 for (i
= 0; i
< info
->size
; i
++)
1025 info
->desc_info
[i
].desc
= &info
->desc
[i
];
1027 rocker_write32(rocker
, DMA_DESC_CTRL(info
->type
),
1028 ROCKER_DMA_DESC_CTRL_RESET
);
1029 rocker_write64(rocker
, DMA_DESC_ADDR(info
->type
), info
->mapaddr
);
1030 rocker_write32(rocker
, DMA_DESC_SIZE(info
->type
), info
->size
);
1035 static void rocker_dma_ring_destroy(const struct rocker
*rocker
,
1036 const struct rocker_dma_ring_info
*info
)
1038 rocker_write64(rocker
, DMA_DESC_ADDR(info
->type
), 0);
1040 pci_free_consistent(rocker
->pdev
,
1041 info
->size
* sizeof(struct rocker_desc
),
1042 info
->desc
, info
->mapaddr
);
1043 kfree(info
->desc_info
);
1046 static void rocker_dma_ring_pass_to_producer(const struct rocker
*rocker
,
1047 struct rocker_dma_ring_info
*info
)
1051 BUG_ON(info
->head
|| info
->tail
);
1053 /* When ring is consumer, we need to advance head for each desc.
1054 * That tells hw that the desc is ready to be used by it.
1056 for (i
= 0; i
< info
->size
- 1; i
++)
1057 rocker_desc_head_set(rocker
, info
, &info
->desc_info
[i
]);
1058 rocker_desc_commit(&info
->desc_info
[i
]);
1061 static int rocker_dma_ring_bufs_alloc(const struct rocker
*rocker
,
1062 const struct rocker_dma_ring_info
*info
,
1063 int direction
, size_t buf_size
)
1065 struct pci_dev
*pdev
= rocker
->pdev
;
1069 for (i
= 0; i
< info
->size
; i
++) {
1070 struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
1071 struct rocker_desc
*desc
= &info
->desc
[i
];
1072 dma_addr_t dma_handle
;
1075 buf
= kzalloc(buf_size
, GFP_KERNEL
| GFP_DMA
);
1081 dma_handle
= pci_map_single(pdev
, buf
, buf_size
, direction
);
1082 if (pci_dma_mapping_error(pdev
, dma_handle
)) {
1088 desc_info
->data
= buf
;
1089 desc_info
->data_size
= buf_size
;
1090 dma_unmap_addr_set(desc_info
, mapaddr
, dma_handle
);
1092 desc
->buf_addr
= dma_handle
;
1093 desc
->buf_size
= buf_size
;
1098 for (i
--; i
>= 0; i
--) {
1099 const struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
1101 pci_unmap_single(pdev
, dma_unmap_addr(desc_info
, mapaddr
),
1102 desc_info
->data_size
, direction
);
1103 kfree(desc_info
->data
);
1108 static void rocker_dma_ring_bufs_free(const struct rocker
*rocker
,
1109 const struct rocker_dma_ring_info
*info
,
1112 struct pci_dev
*pdev
= rocker
->pdev
;
1115 for (i
= 0; i
< info
->size
; i
++) {
1116 const struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
1117 struct rocker_desc
*desc
= &info
->desc
[i
];
1121 pci_unmap_single(pdev
, dma_unmap_addr(desc_info
, mapaddr
),
1122 desc_info
->data_size
, direction
);
1123 kfree(desc_info
->data
);
1127 static int rocker_dma_rings_init(struct rocker
*rocker
)
1129 const struct pci_dev
*pdev
= rocker
->pdev
;
1132 err
= rocker_dma_ring_create(rocker
, ROCKER_DMA_CMD
,
1133 ROCKER_DMA_CMD_DEFAULT_SIZE
,
1136 dev_err(&pdev
->dev
, "failed to create command dma ring\n");
1140 spin_lock_init(&rocker
->cmd_ring_lock
);
1142 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker
->cmd_ring
,
1143 PCI_DMA_BIDIRECTIONAL
, PAGE_SIZE
);
1145 dev_err(&pdev
->dev
, "failed to alloc command dma ring buffers\n");
1146 goto err_dma_cmd_ring_bufs_alloc
;
1149 err
= rocker_dma_ring_create(rocker
, ROCKER_DMA_EVENT
,
1150 ROCKER_DMA_EVENT_DEFAULT_SIZE
,
1151 &rocker
->event_ring
);
1153 dev_err(&pdev
->dev
, "failed to create event dma ring\n");
1154 goto err_dma_event_ring_create
;
1157 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker
->event_ring
,
1158 PCI_DMA_FROMDEVICE
, PAGE_SIZE
);
1160 dev_err(&pdev
->dev
, "failed to alloc event dma ring buffers\n");
1161 goto err_dma_event_ring_bufs_alloc
;
1163 rocker_dma_ring_pass_to_producer(rocker
, &rocker
->event_ring
);
1166 err_dma_event_ring_bufs_alloc
:
1167 rocker_dma_ring_destroy(rocker
, &rocker
->event_ring
);
1168 err_dma_event_ring_create
:
1169 rocker_dma_ring_bufs_free(rocker
, &rocker
->cmd_ring
,
1170 PCI_DMA_BIDIRECTIONAL
);
1171 err_dma_cmd_ring_bufs_alloc
:
1172 rocker_dma_ring_destroy(rocker
, &rocker
->cmd_ring
);
1176 static void rocker_dma_rings_fini(struct rocker
*rocker
)
1178 rocker_dma_ring_bufs_free(rocker
, &rocker
->event_ring
,
1179 PCI_DMA_BIDIRECTIONAL
);
1180 rocker_dma_ring_destroy(rocker
, &rocker
->event_ring
);
1181 rocker_dma_ring_bufs_free(rocker
, &rocker
->cmd_ring
,
1182 PCI_DMA_BIDIRECTIONAL
);
1183 rocker_dma_ring_destroy(rocker
, &rocker
->cmd_ring
);
1186 static int rocker_dma_rx_ring_skb_map(const struct rocker_port
*rocker_port
,
1187 struct rocker_desc_info
*desc_info
,
1188 struct sk_buff
*skb
, size_t buf_len
)
1190 const struct rocker
*rocker
= rocker_port
->rocker
;
1191 struct pci_dev
*pdev
= rocker
->pdev
;
1192 dma_addr_t dma_handle
;
1194 dma_handle
= pci_map_single(pdev
, skb
->data
, buf_len
,
1195 PCI_DMA_FROMDEVICE
);
1196 if (pci_dma_mapping_error(pdev
, dma_handle
))
1198 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_RX_FRAG_ADDR
, dma_handle
))
1199 goto tlv_put_failure
;
1200 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_RX_FRAG_MAX_LEN
, buf_len
))
1201 goto tlv_put_failure
;
1205 pci_unmap_single(pdev
, dma_handle
, buf_len
, PCI_DMA_FROMDEVICE
);
1206 desc_info
->tlv_size
= 0;
1210 static size_t rocker_port_rx_buf_len(const struct rocker_port
*rocker_port
)
1212 return rocker_port
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
1215 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port
*rocker_port
,
1216 struct rocker_desc_info
*desc_info
)
1218 struct net_device
*dev
= rocker_port
->dev
;
1219 struct sk_buff
*skb
;
1220 size_t buf_len
= rocker_port_rx_buf_len(rocker_port
);
1223 /* Ensure that hw will see tlv_size zero in case of an error.
1224 * That tells hw to use another descriptor.
1226 rocker_desc_cookie_ptr_set(desc_info
, NULL
);
1227 desc_info
->tlv_size
= 0;
1229 skb
= netdev_alloc_skb_ip_align(dev
, buf_len
);
1232 err
= rocker_dma_rx_ring_skb_map(rocker_port
, desc_info
, skb
, buf_len
);
1234 dev_kfree_skb_any(skb
);
1237 rocker_desc_cookie_ptr_set(desc_info
, skb
);
1241 static void rocker_dma_rx_ring_skb_unmap(const struct rocker
*rocker
,
1242 const struct rocker_tlv
**attrs
)
1244 struct pci_dev
*pdev
= rocker
->pdev
;
1245 dma_addr_t dma_handle
;
1248 if (!attrs
[ROCKER_TLV_RX_FRAG_ADDR
] ||
1249 !attrs
[ROCKER_TLV_RX_FRAG_MAX_LEN
])
1251 dma_handle
= rocker_tlv_get_u64(attrs
[ROCKER_TLV_RX_FRAG_ADDR
]);
1252 len
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FRAG_MAX_LEN
]);
1253 pci_unmap_single(pdev
, dma_handle
, len
, PCI_DMA_FROMDEVICE
);
1256 static void rocker_dma_rx_ring_skb_free(const struct rocker
*rocker
,
1257 const struct rocker_desc_info
*desc_info
)
1259 const struct rocker_tlv
*attrs
[ROCKER_TLV_RX_MAX
+ 1];
1260 struct sk_buff
*skb
= rocker_desc_cookie_ptr_get(desc_info
);
1264 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_RX_MAX
, desc_info
);
1265 rocker_dma_rx_ring_skb_unmap(rocker
, attrs
);
1266 dev_kfree_skb_any(skb
);
1269 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port
*rocker_port
)
1271 const struct rocker_dma_ring_info
*rx_ring
= &rocker_port
->rx_ring
;
1272 const struct rocker
*rocker
= rocker_port
->rocker
;
1276 for (i
= 0; i
< rx_ring
->size
; i
++) {
1277 err
= rocker_dma_rx_ring_skb_alloc(rocker_port
,
1278 &rx_ring
->desc_info
[i
]);
1285 for (i
--; i
>= 0; i
--)
1286 rocker_dma_rx_ring_skb_free(rocker
, &rx_ring
->desc_info
[i
]);
1290 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port
*rocker_port
)
1292 const struct rocker_dma_ring_info
*rx_ring
= &rocker_port
->rx_ring
;
1293 const struct rocker
*rocker
= rocker_port
->rocker
;
1296 for (i
= 0; i
< rx_ring
->size
; i
++)
1297 rocker_dma_rx_ring_skb_free(rocker
, &rx_ring
->desc_info
[i
]);
1300 static int rocker_port_dma_rings_init(struct rocker_port
*rocker_port
)
1302 struct rocker
*rocker
= rocker_port
->rocker
;
1305 err
= rocker_dma_ring_create(rocker
,
1306 ROCKER_DMA_TX(rocker_port
->port_number
),
1307 ROCKER_DMA_TX_DEFAULT_SIZE
,
1308 &rocker_port
->tx_ring
);
1310 netdev_err(rocker_port
->dev
, "failed to create tx dma ring\n");
1314 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker_port
->tx_ring
,
1316 ROCKER_DMA_TX_DESC_SIZE
);
1318 netdev_err(rocker_port
->dev
, "failed to alloc tx dma ring buffers\n");
1319 goto err_dma_tx_ring_bufs_alloc
;
1322 err
= rocker_dma_ring_create(rocker
,
1323 ROCKER_DMA_RX(rocker_port
->port_number
),
1324 ROCKER_DMA_RX_DEFAULT_SIZE
,
1325 &rocker_port
->rx_ring
);
1327 netdev_err(rocker_port
->dev
, "failed to create rx dma ring\n");
1328 goto err_dma_rx_ring_create
;
1331 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker_port
->rx_ring
,
1332 PCI_DMA_BIDIRECTIONAL
,
1333 ROCKER_DMA_RX_DESC_SIZE
);
1335 netdev_err(rocker_port
->dev
, "failed to alloc rx dma ring buffers\n");
1336 goto err_dma_rx_ring_bufs_alloc
;
1339 err
= rocker_dma_rx_ring_skbs_alloc(rocker_port
);
1341 netdev_err(rocker_port
->dev
, "failed to alloc rx dma ring skbs\n");
1342 goto err_dma_rx_ring_skbs_alloc
;
1344 rocker_dma_ring_pass_to_producer(rocker
, &rocker_port
->rx_ring
);
1348 err_dma_rx_ring_skbs_alloc
:
1349 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->rx_ring
,
1350 PCI_DMA_BIDIRECTIONAL
);
1351 err_dma_rx_ring_bufs_alloc
:
1352 rocker_dma_ring_destroy(rocker
, &rocker_port
->rx_ring
);
1353 err_dma_rx_ring_create
:
1354 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->tx_ring
,
1356 err_dma_tx_ring_bufs_alloc
:
1357 rocker_dma_ring_destroy(rocker
, &rocker_port
->tx_ring
);
1361 static void rocker_port_dma_rings_fini(struct rocker_port
*rocker_port
)
1363 struct rocker
*rocker
= rocker_port
->rocker
;
1365 rocker_dma_rx_ring_skbs_free(rocker_port
);
1366 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->rx_ring
,
1367 PCI_DMA_BIDIRECTIONAL
);
1368 rocker_dma_ring_destroy(rocker
, &rocker_port
->rx_ring
);
1369 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->tx_ring
,
1371 rocker_dma_ring_destroy(rocker
, &rocker_port
->tx_ring
);
1374 static void rocker_port_set_enable(const struct rocker_port
*rocker_port
,
1377 u64 val
= rocker_read64(rocker_port
->rocker
, PORT_PHYS_ENABLE
);
1380 val
|= 1ULL << rocker_port
->pport
;
1382 val
&= ~(1ULL << rocker_port
->pport
);
1383 rocker_write64(rocker_port
->rocker
, PORT_PHYS_ENABLE
, val
);
1386 /********************************
1387 * Interrupt handler and helpers
1388 ********************************/
1390 static irqreturn_t
rocker_cmd_irq_handler(int irq
, void *dev_id
)
1392 struct rocker
*rocker
= dev_id
;
1393 const struct rocker_desc_info
*desc_info
;
1394 struct rocker_wait
*wait
;
1397 spin_lock(&rocker
->cmd_ring_lock
);
1398 while ((desc_info
= rocker_desc_tail_get(&rocker
->cmd_ring
))) {
1399 wait
= rocker_desc_cookie_ptr_get(desc_info
);
1401 rocker_desc_gen_clear(desc_info
);
1402 rocker_wait_destroy(NULL
, wait
);
1404 rocker_wait_wake_up(wait
);
1408 spin_unlock(&rocker
->cmd_ring_lock
);
1409 rocker_dma_ring_credits_set(rocker
, &rocker
->cmd_ring
, credits
);
1414 static void rocker_port_link_up(const struct rocker_port
*rocker_port
)
1416 netif_carrier_on(rocker_port
->dev
);
1417 netdev_info(rocker_port
->dev
, "Link is up\n");
1420 static void rocker_port_link_down(const struct rocker_port
*rocker_port
)
1422 netif_carrier_off(rocker_port
->dev
);
1423 netdev_info(rocker_port
->dev
, "Link is down\n");
1426 static int rocker_event_link_change(const struct rocker
*rocker
,
1427 const struct rocker_tlv
*info
)
1429 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_MAX
+ 1];
1430 unsigned int port_number
;
1432 struct rocker_port
*rocker_port
;
1434 rocker_tlv_parse_nested(attrs
, ROCKER_TLV_EVENT_LINK_CHANGED_MAX
, info
);
1435 if (!attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
] ||
1436 !attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
])
1439 rocker_tlv_get_u32(attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
]) - 1;
1440 link_up
= rocker_tlv_get_u8(attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
]);
1442 if (port_number
>= rocker
->port_count
)
1445 rocker_port
= rocker
->ports
[port_number
];
1446 if (netif_carrier_ok(rocker_port
->dev
) != link_up
) {
1448 rocker_port_link_up(rocker_port
);
1450 rocker_port_link_down(rocker_port
);
1456 static int rocker_port_fdb(struct rocker_port
*rocker_port
,
1457 struct switchdev_trans
*trans
,
1458 const unsigned char *addr
,
1459 __be16 vlan_id
, int flags
);
1461 static int rocker_event_mac_vlan_seen(const struct rocker
*rocker
,
1462 const struct rocker_tlv
*info
)
1464 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAX
+ 1];
1465 unsigned int port_number
;
1466 struct rocker_port
*rocker_port
;
1467 const unsigned char *addr
;
1468 int flags
= ROCKER_OP_FLAG_NOWAIT
| ROCKER_OP_FLAG_LEARNED
;
1471 rocker_tlv_parse_nested(attrs
, ROCKER_TLV_EVENT_MAC_VLAN_MAX
, info
);
1472 if (!attrs
[ROCKER_TLV_EVENT_MAC_VLAN_PPORT
] ||
1473 !attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAC
] ||
1474 !attrs
[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
])
1477 rocker_tlv_get_u32(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_PPORT
]) - 1;
1478 addr
= rocker_tlv_data(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAC
]);
1479 vlan_id
= rocker_tlv_get_be16(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
]);
1481 if (port_number
>= rocker
->port_count
)
1484 rocker_port
= rocker
->ports
[port_number
];
1486 if (rocker_port
->stp_state
!= BR_STATE_LEARNING
&&
1487 rocker_port
->stp_state
!= BR_STATE_FORWARDING
)
1490 return rocker_port_fdb(rocker_port
, NULL
, addr
, vlan_id
, flags
);
1493 static int rocker_event_process(const struct rocker
*rocker
,
1494 const struct rocker_desc_info
*desc_info
)
1496 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_MAX
+ 1];
1497 const struct rocker_tlv
*info
;
1500 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_EVENT_MAX
, desc_info
);
1501 if (!attrs
[ROCKER_TLV_EVENT_TYPE
] ||
1502 !attrs
[ROCKER_TLV_EVENT_INFO
])
1505 type
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_EVENT_TYPE
]);
1506 info
= attrs
[ROCKER_TLV_EVENT_INFO
];
1509 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED
:
1510 return rocker_event_link_change(rocker
, info
);
1511 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN
:
1512 return rocker_event_mac_vlan_seen(rocker
, info
);
1518 static irqreturn_t
rocker_event_irq_handler(int irq
, void *dev_id
)
1520 struct rocker
*rocker
= dev_id
;
1521 const struct pci_dev
*pdev
= rocker
->pdev
;
1522 const struct rocker_desc_info
*desc_info
;
1526 while ((desc_info
= rocker_desc_tail_get(&rocker
->event_ring
))) {
1527 err
= rocker_desc_err(desc_info
);
1529 dev_err(&pdev
->dev
, "event desc received with err %d\n",
1532 err
= rocker_event_process(rocker
, desc_info
);
1534 dev_err(&pdev
->dev
, "event processing failed with err %d\n",
1537 rocker_desc_gen_clear(desc_info
);
1538 rocker_desc_head_set(rocker
, &rocker
->event_ring
, desc_info
);
1541 rocker_dma_ring_credits_set(rocker
, &rocker
->event_ring
, credits
);
1546 static irqreturn_t
rocker_tx_irq_handler(int irq
, void *dev_id
)
1548 struct rocker_port
*rocker_port
= dev_id
;
1550 napi_schedule(&rocker_port
->napi_tx
);
1554 static irqreturn_t
rocker_rx_irq_handler(int irq
, void *dev_id
)
1556 struct rocker_port
*rocker_port
= dev_id
;
1558 napi_schedule(&rocker_port
->napi_rx
);
1562 /********************
1564 ********************/
1566 typedef int (*rocker_cmd_prep_cb_t
)(const struct rocker_port
*rocker_port
,
1567 struct rocker_desc_info
*desc_info
,
1570 typedef int (*rocker_cmd_proc_cb_t
)(const struct rocker_port
*rocker_port
,
1571 const struct rocker_desc_info
*desc_info
,
1574 static int rocker_cmd_exec(struct rocker_port
*rocker_port
,
1575 struct switchdev_trans
*trans
, int flags
,
1576 rocker_cmd_prep_cb_t prepare
, void *prepare_priv
,
1577 rocker_cmd_proc_cb_t process
, void *process_priv
)
1579 struct rocker
*rocker
= rocker_port
->rocker
;
1580 struct rocker_desc_info
*desc_info
;
1581 struct rocker_wait
*wait
;
1582 bool nowait
= !!(flags
& ROCKER_OP_FLAG_NOWAIT
);
1583 unsigned long lock_flags
;
1586 wait
= rocker_wait_create(rocker_port
, trans
, flags
);
1589 wait
->nowait
= nowait
;
1591 spin_lock_irqsave(&rocker
->cmd_ring_lock
, lock_flags
);
1593 desc_info
= rocker_desc_head_get(&rocker
->cmd_ring
);
1595 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1600 err
= prepare(rocker_port
, desc_info
, prepare_priv
);
1602 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1606 rocker_desc_cookie_ptr_set(desc_info
, wait
);
1608 if (!switchdev_trans_ph_prepare(trans
))
1609 rocker_desc_head_set(rocker
, &rocker
->cmd_ring
, desc_info
);
1611 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1616 if (!switchdev_trans_ph_prepare(trans
))
1617 if (!rocker_wait_event_timeout(wait
, HZ
/ 10))
1620 err
= rocker_desc_err(desc_info
);
1625 err
= process(rocker_port
, desc_info
, process_priv
);
1627 rocker_desc_gen_clear(desc_info
);
1629 rocker_wait_destroy(trans
, wait
);
1634 rocker_cmd_get_port_settings_prep(const struct rocker_port
*rocker_port
,
1635 struct rocker_desc_info
*desc_info
,
1638 struct rocker_tlv
*cmd_info
;
1640 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1641 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS
))
1643 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1646 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1647 rocker_port
->pport
))
1649 rocker_tlv_nest_end(desc_info
, cmd_info
);
1654 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port
*rocker_port
,
1655 const struct rocker_desc_info
*desc_info
,
1658 struct ethtool_cmd
*ecmd
= priv
;
1659 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1660 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1665 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1666 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1669 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1670 attrs
[ROCKER_TLV_CMD_INFO
]);
1671 if (!info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
] ||
1672 !info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
] ||
1673 !info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
])
1676 speed
= rocker_tlv_get_u32(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
]);
1677 duplex
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
]);
1678 autoneg
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
]);
1680 ecmd
->transceiver
= XCVR_INTERNAL
;
1681 ecmd
->supported
= SUPPORTED_TP
;
1682 ecmd
->phy_address
= 0xff;
1683 ecmd
->port
= PORT_TP
;
1684 ethtool_cmd_speed_set(ecmd
, speed
);
1685 ecmd
->duplex
= duplex
? DUPLEX_FULL
: DUPLEX_HALF
;
1686 ecmd
->autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
1692 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port
*rocker_port
,
1693 const struct rocker_desc_info
*desc_info
,
1696 unsigned char *macaddr
= priv
;
1697 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1698 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1699 const struct rocker_tlv
*attr
;
1701 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1702 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1705 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1706 attrs
[ROCKER_TLV_CMD_INFO
]);
1707 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
];
1711 if (rocker_tlv_len(attr
) != ETH_ALEN
)
1714 ether_addr_copy(macaddr
, rocker_tlv_data(attr
));
1724 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port
*rocker_port
,
1725 const struct rocker_desc_info
*desc_info
,
1728 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1729 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1730 struct port_name
*name
= priv
;
1731 const struct rocker_tlv
*attr
;
1735 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1736 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1739 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1740 attrs
[ROCKER_TLV_CMD_INFO
]);
1741 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME
];
1745 len
= min_t(size_t, rocker_tlv_len(attr
), name
->len
);
1746 str
= rocker_tlv_data(attr
);
1748 /* make sure name only contains alphanumeric characters */
1749 for (i
= j
= 0; i
< len
; ++i
) {
1750 if (isalnum(str
[i
])) {
1751 name
->buf
[j
] = str
[i
];
1759 name
->buf
[j
] = '\0';
1765 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port
*rocker_port
,
1766 struct rocker_desc_info
*desc_info
,
1769 struct ethtool_cmd
*ecmd
= priv
;
1770 struct rocker_tlv
*cmd_info
;
1772 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1773 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1775 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1778 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1779 rocker_port
->pport
))
1781 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
,
1782 ethtool_cmd_speed(ecmd
)))
1784 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
,
1787 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
,
1790 rocker_tlv_nest_end(desc_info
, cmd_info
);
1795 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port
*rocker_port
,
1796 struct rocker_desc_info
*desc_info
,
1799 const unsigned char *macaddr
= priv
;
1800 struct rocker_tlv
*cmd_info
;
1802 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1803 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1805 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1808 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1809 rocker_port
->pport
))
1811 if (rocker_tlv_put(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
,
1814 rocker_tlv_nest_end(desc_info
, cmd_info
);
1819 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port
*rocker_port
,
1820 struct rocker_desc_info
*desc_info
,
1823 int mtu
= *(int *)priv
;
1824 struct rocker_tlv
*cmd_info
;
1826 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1827 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1829 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1832 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1833 rocker_port
->pport
))
1835 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_MTU
,
1838 rocker_tlv_nest_end(desc_info
, cmd_info
);
1843 rocker_cmd_set_port_learning_prep(const struct rocker_port
*rocker_port
,
1844 struct rocker_desc_info
*desc_info
,
1847 struct rocker_tlv
*cmd_info
;
1849 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1850 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1852 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1855 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1856 rocker_port
->pport
))
1858 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING
,
1859 !!(rocker_port
->brport_flags
& BR_LEARNING
)))
1861 rocker_tlv_nest_end(desc_info
, cmd_info
);
1865 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port
*rocker_port
,
1866 struct ethtool_cmd
*ecmd
)
1868 return rocker_cmd_exec(rocker_port
, NULL
, 0,
1869 rocker_cmd_get_port_settings_prep
, NULL
,
1870 rocker_cmd_get_port_settings_ethtool_proc
,
1874 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port
*rocker_port
,
1875 unsigned char *macaddr
)
1877 return rocker_cmd_exec(rocker_port
, NULL
, 0,
1878 rocker_cmd_get_port_settings_prep
, NULL
,
1879 rocker_cmd_get_port_settings_macaddr_proc
,
1883 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port
*rocker_port
,
1884 struct ethtool_cmd
*ecmd
)
1886 return rocker_cmd_exec(rocker_port
, NULL
, 0,
1887 rocker_cmd_set_port_settings_ethtool_prep
,
1891 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port
*rocker_port
,
1892 unsigned char *macaddr
)
1894 return rocker_cmd_exec(rocker_port
, NULL
, 0,
1895 rocker_cmd_set_port_settings_macaddr_prep
,
1896 macaddr
, NULL
, NULL
);
1899 static int rocker_cmd_set_port_settings_mtu(struct rocker_port
*rocker_port
,
1902 return rocker_cmd_exec(rocker_port
, NULL
, 0,
1903 rocker_cmd_set_port_settings_mtu_prep
,
1907 static int rocker_port_set_learning(struct rocker_port
*rocker_port
,
1908 struct switchdev_trans
*trans
)
1910 return rocker_cmd_exec(rocker_port
, trans
, 0,
1911 rocker_cmd_set_port_learning_prep
,
1916 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info
*desc_info
,
1917 const struct rocker_flow_tbl_entry
*entry
)
1919 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
1920 entry
->key
.ig_port
.in_pport
))
1922 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
1923 entry
->key
.ig_port
.in_pport_mask
))
1925 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
1926 entry
->key
.ig_port
.goto_tbl
))
1933 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info
*desc_info
,
1934 const struct rocker_flow_tbl_entry
*entry
)
1936 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
1937 entry
->key
.vlan
.in_pport
))
1939 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
1940 entry
->key
.vlan
.vlan_id
))
1942 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
1943 entry
->key
.vlan
.vlan_id_mask
))
1945 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
1946 entry
->key
.vlan
.goto_tbl
))
1948 if (entry
->key
.vlan
.untagged
&&
1949 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_NEW_VLAN_ID
,
1950 entry
->key
.vlan
.new_vlan_id
))
1957 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info
*desc_info
,
1958 const struct rocker_flow_tbl_entry
*entry
)
1960 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
1961 entry
->key
.term_mac
.in_pport
))
1963 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
1964 entry
->key
.term_mac
.in_pport_mask
))
1966 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
1967 entry
->key
.term_mac
.eth_type
))
1969 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
1970 ETH_ALEN
, entry
->key
.term_mac
.eth_dst
))
1972 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
1973 ETH_ALEN
, entry
->key
.term_mac
.eth_dst_mask
))
1975 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
1976 entry
->key
.term_mac
.vlan_id
))
1978 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
1979 entry
->key
.term_mac
.vlan_id_mask
))
1981 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
1982 entry
->key
.term_mac
.goto_tbl
))
1984 if (entry
->key
.term_mac
.copy_to_cpu
&&
1985 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
1986 entry
->key
.term_mac
.copy_to_cpu
))
1993 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info
*desc_info
,
1994 const struct rocker_flow_tbl_entry
*entry
)
1996 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
1997 entry
->key
.ucast_routing
.eth_type
))
1999 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP
,
2000 entry
->key
.ucast_routing
.dst4
))
2002 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP_MASK
,
2003 entry
->key
.ucast_routing
.dst4_mask
))
2005 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
2006 entry
->key
.ucast_routing
.goto_tbl
))
2008 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2009 entry
->key
.ucast_routing
.group_id
))
2016 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info
*desc_info
,
2017 const struct rocker_flow_tbl_entry
*entry
)
2019 if (entry
->key
.bridge
.has_eth_dst
&&
2020 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2021 ETH_ALEN
, entry
->key
.bridge
.eth_dst
))
2023 if (entry
->key
.bridge
.has_eth_dst_mask
&&
2024 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
2025 ETH_ALEN
, entry
->key
.bridge
.eth_dst_mask
))
2027 if (entry
->key
.bridge
.vlan_id
&&
2028 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2029 entry
->key
.bridge
.vlan_id
))
2031 if (entry
->key
.bridge
.tunnel_id
&&
2032 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_TUNNEL_ID
,
2033 entry
->key
.bridge
.tunnel_id
))
2035 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
2036 entry
->key
.bridge
.goto_tbl
))
2038 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2039 entry
->key
.bridge
.group_id
))
2041 if (entry
->key
.bridge
.copy_to_cpu
&&
2042 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
2043 entry
->key
.bridge
.copy_to_cpu
))
2050 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info
*desc_info
,
2051 const struct rocker_flow_tbl_entry
*entry
)
2053 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
2054 entry
->key
.acl
.in_pport
))
2056 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
2057 entry
->key
.acl
.in_pport_mask
))
2059 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
2060 ETH_ALEN
, entry
->key
.acl
.eth_src
))
2062 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC_MASK
,
2063 ETH_ALEN
, entry
->key
.acl
.eth_src_mask
))
2065 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2066 ETH_ALEN
, entry
->key
.acl
.eth_dst
))
2068 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
2069 ETH_ALEN
, entry
->key
.acl
.eth_dst_mask
))
2071 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
2072 entry
->key
.acl
.eth_type
))
2074 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2075 entry
->key
.acl
.vlan_id
))
2077 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
2078 entry
->key
.acl
.vlan_id_mask
))
2081 switch (ntohs(entry
->key
.acl
.eth_type
)) {
2084 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_PROTO
,
2085 entry
->key
.acl
.ip_proto
))
2087 if (rocker_tlv_put_u8(desc_info
,
2088 ROCKER_TLV_OF_DPA_IP_PROTO_MASK
,
2089 entry
->key
.acl
.ip_proto_mask
))
2091 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_DSCP
,
2092 entry
->key
.acl
.ip_tos
& 0x3f))
2094 if (rocker_tlv_put_u8(desc_info
,
2095 ROCKER_TLV_OF_DPA_IP_DSCP_MASK
,
2096 entry
->key
.acl
.ip_tos_mask
& 0x3f))
2098 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_ECN
,
2099 (entry
->key
.acl
.ip_tos
& 0xc0) >> 6))
2101 if (rocker_tlv_put_u8(desc_info
,
2102 ROCKER_TLV_OF_DPA_IP_ECN_MASK
,
2103 (entry
->key
.acl
.ip_tos_mask
& 0xc0) >> 6))
2108 if (entry
->key
.acl
.group_id
!= ROCKER_GROUP_NONE
&&
2109 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2110 entry
->key
.acl
.group_id
))
2116 static int rocker_cmd_flow_tbl_add(const struct rocker_port
*rocker_port
,
2117 struct rocker_desc_info
*desc_info
,
2120 const struct rocker_flow_tbl_entry
*entry
= priv
;
2121 struct rocker_tlv
*cmd_info
;
2124 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2126 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2129 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_TABLE_ID
,
2132 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_PRIORITY
,
2133 entry
->key
.priority
))
2135 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_HARDTIME
, 0))
2137 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
2141 switch (entry
->key
.tbl_id
) {
2142 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
:
2143 err
= rocker_cmd_flow_tbl_add_ig_port(desc_info
, entry
);
2145 case ROCKER_OF_DPA_TABLE_ID_VLAN
:
2146 err
= rocker_cmd_flow_tbl_add_vlan(desc_info
, entry
);
2148 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
:
2149 err
= rocker_cmd_flow_tbl_add_term_mac(desc_info
, entry
);
2151 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
:
2152 err
= rocker_cmd_flow_tbl_add_ucast_routing(desc_info
, entry
);
2154 case ROCKER_OF_DPA_TABLE_ID_BRIDGING
:
2155 err
= rocker_cmd_flow_tbl_add_bridge(desc_info
, entry
);
2157 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
:
2158 err
= rocker_cmd_flow_tbl_add_acl(desc_info
, entry
);
2168 rocker_tlv_nest_end(desc_info
, cmd_info
);
2173 static int rocker_cmd_flow_tbl_del(const struct rocker_port
*rocker_port
,
2174 struct rocker_desc_info
*desc_info
,
2177 const struct rocker_flow_tbl_entry
*entry
= priv
;
2178 struct rocker_tlv
*cmd_info
;
2180 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2182 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2185 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
2188 rocker_tlv_nest_end(desc_info
, cmd_info
);
2194 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info
*desc_info
,
2195 struct rocker_group_tbl_entry
*entry
)
2197 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_OUT_PPORT
,
2198 ROCKER_GROUP_PORT_GET(entry
->group_id
)))
2200 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_POP_VLAN
,
2201 entry
->l2_interface
.pop_vlan
))
2208 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info
*desc_info
,
2209 const struct rocker_group_tbl_entry
*entry
)
2211 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
2212 entry
->l2_rewrite
.group_id
))
2214 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_src
) &&
2215 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
2216 ETH_ALEN
, entry
->l2_rewrite
.eth_src
))
2218 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_dst
) &&
2219 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2220 ETH_ALEN
, entry
->l2_rewrite
.eth_dst
))
2222 if (entry
->l2_rewrite
.vlan_id
&&
2223 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2224 entry
->l2_rewrite
.vlan_id
))
2231 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info
*desc_info
,
2232 const struct rocker_group_tbl_entry
*entry
)
2235 struct rocker_tlv
*group_ids
;
2237 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GROUP_COUNT
,
2238 entry
->group_count
))
2241 group_ids
= rocker_tlv_nest_start(desc_info
,
2242 ROCKER_TLV_OF_DPA_GROUP_IDS
);
2246 for (i
= 0; i
< entry
->group_count
; i
++)
2247 /* Note TLV array is 1-based */
2248 if (rocker_tlv_put_u32(desc_info
, i
+ 1, entry
->group_ids
[i
]))
2251 rocker_tlv_nest_end(desc_info
, group_ids
);
2257 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info
*desc_info
,
2258 const struct rocker_group_tbl_entry
*entry
)
2260 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_src
) &&
2261 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
2262 ETH_ALEN
, entry
->l3_unicast
.eth_src
))
2264 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_dst
) &&
2265 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2266 ETH_ALEN
, entry
->l3_unicast
.eth_dst
))
2268 if (entry
->l3_unicast
.vlan_id
&&
2269 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2270 entry
->l3_unicast
.vlan_id
))
2272 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_TTL_CHECK
,
2273 entry
->l3_unicast
.ttl_check
))
2275 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
2276 entry
->l3_unicast
.group_id
))
2282 static int rocker_cmd_group_tbl_add(const struct rocker_port
*rocker_port
,
2283 struct rocker_desc_info
*desc_info
,
2286 struct rocker_group_tbl_entry
*entry
= priv
;
2287 struct rocker_tlv
*cmd_info
;
2290 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2292 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2296 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2300 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
2301 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
2302 err
= rocker_cmd_group_tbl_add_l2_interface(desc_info
, entry
);
2304 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
2305 err
= rocker_cmd_group_tbl_add_l2_rewrite(desc_info
, entry
);
2307 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
2308 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
2309 err
= rocker_cmd_group_tbl_add_group_ids(desc_info
, entry
);
2311 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
2312 err
= rocker_cmd_group_tbl_add_l3_unicast(desc_info
, entry
);
2322 rocker_tlv_nest_end(desc_info
, cmd_info
);
2327 static int rocker_cmd_group_tbl_del(const struct rocker_port
*rocker_port
,
2328 struct rocker_desc_info
*desc_info
,
2331 const struct rocker_group_tbl_entry
*entry
= priv
;
2332 struct rocker_tlv
*cmd_info
;
2334 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2336 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2339 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2342 rocker_tlv_nest_end(desc_info
, cmd_info
);
2347 /***************************************************
2348 * Flow, group, FDB, internal VLAN and neigh tables
2349 ***************************************************/
2351 static int rocker_init_tbls(struct rocker
*rocker
)
2353 hash_init(rocker
->flow_tbl
);
2354 spin_lock_init(&rocker
->flow_tbl_lock
);
2356 hash_init(rocker
->group_tbl
);
2357 spin_lock_init(&rocker
->group_tbl_lock
);
2359 hash_init(rocker
->fdb_tbl
);
2360 spin_lock_init(&rocker
->fdb_tbl_lock
);
2362 hash_init(rocker
->internal_vlan_tbl
);
2363 spin_lock_init(&rocker
->internal_vlan_tbl_lock
);
2365 hash_init(rocker
->neigh_tbl
);
2366 spin_lock_init(&rocker
->neigh_tbl_lock
);
2371 static void rocker_free_tbls(struct rocker
*rocker
)
2373 unsigned long flags
;
2374 struct rocker_flow_tbl_entry
*flow_entry
;
2375 struct rocker_group_tbl_entry
*group_entry
;
2376 struct rocker_fdb_tbl_entry
*fdb_entry
;
2377 struct rocker_internal_vlan_tbl_entry
*internal_vlan_entry
;
2378 struct rocker_neigh_tbl_entry
*neigh_entry
;
2379 struct hlist_node
*tmp
;
2382 spin_lock_irqsave(&rocker
->flow_tbl_lock
, flags
);
2383 hash_for_each_safe(rocker
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
)
2384 hash_del(&flow_entry
->entry
);
2385 spin_unlock_irqrestore(&rocker
->flow_tbl_lock
, flags
);
2387 spin_lock_irqsave(&rocker
->group_tbl_lock
, flags
);
2388 hash_for_each_safe(rocker
->group_tbl
, bkt
, tmp
, group_entry
, entry
)
2389 hash_del(&group_entry
->entry
);
2390 spin_unlock_irqrestore(&rocker
->group_tbl_lock
, flags
);
2392 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, flags
);
2393 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, fdb_entry
, entry
)
2394 hash_del(&fdb_entry
->entry
);
2395 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, flags
);
2397 spin_lock_irqsave(&rocker
->internal_vlan_tbl_lock
, flags
);
2398 hash_for_each_safe(rocker
->internal_vlan_tbl
, bkt
,
2399 tmp
, internal_vlan_entry
, entry
)
2400 hash_del(&internal_vlan_entry
->entry
);
2401 spin_unlock_irqrestore(&rocker
->internal_vlan_tbl_lock
, flags
);
2403 spin_lock_irqsave(&rocker
->neigh_tbl_lock
, flags
);
2404 hash_for_each_safe(rocker
->neigh_tbl
, bkt
, tmp
, neigh_entry
, entry
)
2405 hash_del(&neigh_entry
->entry
);
2406 spin_unlock_irqrestore(&rocker
->neigh_tbl_lock
, flags
);
2409 static struct rocker_flow_tbl_entry
*
2410 rocker_flow_tbl_find(const struct rocker
*rocker
,
2411 const struct rocker_flow_tbl_entry
*match
)
2413 struct rocker_flow_tbl_entry
*found
;
2414 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
2416 hash_for_each_possible(rocker
->flow_tbl
, found
,
2417 entry
, match
->key_crc32
) {
2418 if (memcmp(&found
->key
, &match
->key
, key_len
) == 0)
2425 static int rocker_flow_tbl_add(struct rocker_port
*rocker_port
,
2426 struct switchdev_trans
*trans
, int flags
,
2427 struct rocker_flow_tbl_entry
*match
)
2429 struct rocker
*rocker
= rocker_port
->rocker
;
2430 struct rocker_flow_tbl_entry
*found
;
2431 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
2432 unsigned long lock_flags
;
2434 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
2436 spin_lock_irqsave(&rocker
->flow_tbl_lock
, lock_flags
);
2438 found
= rocker_flow_tbl_find(rocker
, match
);
2441 match
->cookie
= found
->cookie
;
2442 if (!switchdev_trans_ph_prepare(trans
))
2443 hash_del(&found
->entry
);
2444 rocker_port_kfree(trans
, found
);
2446 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
;
2449 found
->cookie
= rocker
->flow_tbl_next_cookie
++;
2450 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
;
2453 if (!switchdev_trans_ph_prepare(trans
))
2454 hash_add(rocker
->flow_tbl
, &found
->entry
, found
->key_crc32
);
2456 spin_unlock_irqrestore(&rocker
->flow_tbl_lock
, lock_flags
);
2458 return rocker_cmd_exec(rocker_port
, trans
, flags
,
2459 rocker_cmd_flow_tbl_add
, found
, NULL
, NULL
);
2462 static int rocker_flow_tbl_del(struct rocker_port
*rocker_port
,
2463 struct switchdev_trans
*trans
, int flags
,
2464 struct rocker_flow_tbl_entry
*match
)
2466 struct rocker
*rocker
= rocker_port
->rocker
;
2467 struct rocker_flow_tbl_entry
*found
;
2468 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
2469 unsigned long lock_flags
;
2472 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
2474 spin_lock_irqsave(&rocker
->flow_tbl_lock
, lock_flags
);
2476 found
= rocker_flow_tbl_find(rocker
, match
);
2479 if (!switchdev_trans_ph_prepare(trans
))
2480 hash_del(&found
->entry
);
2481 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
;
2484 spin_unlock_irqrestore(&rocker
->flow_tbl_lock
, lock_flags
);
2486 rocker_port_kfree(trans
, match
);
2489 err
= rocker_cmd_exec(rocker_port
, trans
, flags
,
2490 rocker_cmd_flow_tbl_del
,
2492 rocker_port_kfree(trans
, found
);
2498 static int rocker_flow_tbl_do(struct rocker_port
*rocker_port
,
2499 struct switchdev_trans
*trans
, int flags
,
2500 struct rocker_flow_tbl_entry
*entry
)
2502 if (flags
& ROCKER_OP_FLAG_REMOVE
)
2503 return rocker_flow_tbl_del(rocker_port
, trans
, flags
, entry
);
2505 return rocker_flow_tbl_add(rocker_port
, trans
, flags
, entry
);
2508 static int rocker_flow_tbl_ig_port(struct rocker_port
*rocker_port
,
2509 struct switchdev_trans
*trans
, int flags
,
2510 u32 in_pport
, u32 in_pport_mask
,
2511 enum rocker_of_dpa_table_id goto_tbl
)
2513 struct rocker_flow_tbl_entry
*entry
;
2515 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2519 entry
->key
.priority
= ROCKER_PRIORITY_IG_PORT
;
2520 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
2521 entry
->key
.ig_port
.in_pport
= in_pport
;
2522 entry
->key
.ig_port
.in_pport_mask
= in_pport_mask
;
2523 entry
->key
.ig_port
.goto_tbl
= goto_tbl
;
2525 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2528 static int rocker_flow_tbl_vlan(struct rocker_port
*rocker_port
,
2529 struct switchdev_trans
*trans
, int flags
,
2530 u32 in_pport
, __be16 vlan_id
,
2531 __be16 vlan_id_mask
,
2532 enum rocker_of_dpa_table_id goto_tbl
,
2533 bool untagged
, __be16 new_vlan_id
)
2535 struct rocker_flow_tbl_entry
*entry
;
2537 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2541 entry
->key
.priority
= ROCKER_PRIORITY_VLAN
;
2542 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
2543 entry
->key
.vlan
.in_pport
= in_pport
;
2544 entry
->key
.vlan
.vlan_id
= vlan_id
;
2545 entry
->key
.vlan
.vlan_id_mask
= vlan_id_mask
;
2546 entry
->key
.vlan
.goto_tbl
= goto_tbl
;
2548 entry
->key
.vlan
.untagged
= untagged
;
2549 entry
->key
.vlan
.new_vlan_id
= new_vlan_id
;
2551 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2554 static int rocker_flow_tbl_term_mac(struct rocker_port
*rocker_port
,
2555 struct switchdev_trans
*trans
,
2556 u32 in_pport
, u32 in_pport_mask
,
2557 __be16 eth_type
, const u8
*eth_dst
,
2558 const u8
*eth_dst_mask
, __be16 vlan_id
,
2559 __be16 vlan_id_mask
, bool copy_to_cpu
,
2562 struct rocker_flow_tbl_entry
*entry
;
2564 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2568 if (is_multicast_ether_addr(eth_dst
)) {
2569 entry
->key
.priority
= ROCKER_PRIORITY_TERM_MAC_MCAST
;
2570 entry
->key
.term_mac
.goto_tbl
=
2571 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
2573 entry
->key
.priority
= ROCKER_PRIORITY_TERM_MAC_UCAST
;
2574 entry
->key
.term_mac
.goto_tbl
=
2575 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
2578 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
2579 entry
->key
.term_mac
.in_pport
= in_pport
;
2580 entry
->key
.term_mac
.in_pport_mask
= in_pport_mask
;
2581 entry
->key
.term_mac
.eth_type
= eth_type
;
2582 ether_addr_copy(entry
->key
.term_mac
.eth_dst
, eth_dst
);
2583 ether_addr_copy(entry
->key
.term_mac
.eth_dst_mask
, eth_dst_mask
);
2584 entry
->key
.term_mac
.vlan_id
= vlan_id
;
2585 entry
->key
.term_mac
.vlan_id_mask
= vlan_id_mask
;
2586 entry
->key
.term_mac
.copy_to_cpu
= copy_to_cpu
;
2588 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2591 static int rocker_flow_tbl_bridge(struct rocker_port
*rocker_port
,
2592 struct switchdev_trans
*trans
, int flags
,
2593 const u8
*eth_dst
, const u8
*eth_dst_mask
,
2594 __be16 vlan_id
, u32 tunnel_id
,
2595 enum rocker_of_dpa_table_id goto_tbl
,
2596 u32 group_id
, bool copy_to_cpu
)
2598 struct rocker_flow_tbl_entry
*entry
;
2600 bool vlan_bridging
= !!vlan_id
;
2601 bool dflt
= !eth_dst
|| (eth_dst
&& eth_dst_mask
);
2604 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2608 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
2611 entry
->key
.bridge
.has_eth_dst
= 1;
2612 ether_addr_copy(entry
->key
.bridge
.eth_dst
, eth_dst
);
2615 entry
->key
.bridge
.has_eth_dst_mask
= 1;
2616 ether_addr_copy(entry
->key
.bridge
.eth_dst_mask
, eth_dst_mask
);
2617 if (!ether_addr_equal(eth_dst_mask
, ff_mac
))
2621 priority
= ROCKER_PRIORITY_UNKNOWN
;
2622 if (vlan_bridging
&& dflt
&& wild
)
2623 priority
= ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD
;
2624 else if (vlan_bridging
&& dflt
&& !wild
)
2625 priority
= ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
;
2626 else if (vlan_bridging
&& !dflt
)
2627 priority
= ROCKER_PRIORITY_BRIDGING_VLAN
;
2628 else if (!vlan_bridging
&& dflt
&& wild
)
2629 priority
= ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD
;
2630 else if (!vlan_bridging
&& dflt
&& !wild
)
2631 priority
= ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
;
2632 else if (!vlan_bridging
&& !dflt
)
2633 priority
= ROCKER_PRIORITY_BRIDGING_TENANT
;
2635 entry
->key
.priority
= priority
;
2636 entry
->key
.bridge
.vlan_id
= vlan_id
;
2637 entry
->key
.bridge
.tunnel_id
= tunnel_id
;
2638 entry
->key
.bridge
.goto_tbl
= goto_tbl
;
2639 entry
->key
.bridge
.group_id
= group_id
;
2640 entry
->key
.bridge
.copy_to_cpu
= copy_to_cpu
;
2642 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2645 static int rocker_flow_tbl_ucast4_routing(struct rocker_port
*rocker_port
,
2646 struct switchdev_trans
*trans
,
2647 __be16 eth_type
, __be32 dst
,
2648 __be32 dst_mask
, u32 priority
,
2649 enum rocker_of_dpa_table_id goto_tbl
,
2650 u32 group_id
, int flags
)
2652 struct rocker_flow_tbl_entry
*entry
;
2654 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2658 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
2659 entry
->key
.priority
= priority
;
2660 entry
->key
.ucast_routing
.eth_type
= eth_type
;
2661 entry
->key
.ucast_routing
.dst4
= dst
;
2662 entry
->key
.ucast_routing
.dst4_mask
= dst_mask
;
2663 entry
->key
.ucast_routing
.goto_tbl
= goto_tbl
;
2664 entry
->key
.ucast_routing
.group_id
= group_id
;
2665 entry
->key_len
= offsetof(struct rocker_flow_tbl_key
,
2666 ucast_routing
.group_id
);
2668 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2671 static int rocker_flow_tbl_acl(struct rocker_port
*rocker_port
,
2672 struct switchdev_trans
*trans
, int flags
,
2673 u32 in_pport
, u32 in_pport_mask
,
2674 const u8
*eth_src
, const u8
*eth_src_mask
,
2675 const u8
*eth_dst
, const u8
*eth_dst_mask
,
2676 __be16 eth_type
, __be16 vlan_id
,
2677 __be16 vlan_id_mask
, u8 ip_proto
,
2678 u8 ip_proto_mask
, u8 ip_tos
, u8 ip_tos_mask
,
2682 struct rocker_flow_tbl_entry
*entry
;
2684 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2688 priority
= ROCKER_PRIORITY_ACL_NORMAL
;
2689 if (eth_dst
&& eth_dst_mask
) {
2690 if (ether_addr_equal(eth_dst_mask
, mcast_mac
))
2691 priority
= ROCKER_PRIORITY_ACL_DFLT
;
2692 else if (is_link_local_ether_addr(eth_dst
))
2693 priority
= ROCKER_PRIORITY_ACL_CTRL
;
2696 entry
->key
.priority
= priority
;
2697 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2698 entry
->key
.acl
.in_pport
= in_pport
;
2699 entry
->key
.acl
.in_pport_mask
= in_pport_mask
;
2702 ether_addr_copy(entry
->key
.acl
.eth_src
, eth_src
);
2704 ether_addr_copy(entry
->key
.acl
.eth_src_mask
, eth_src_mask
);
2706 ether_addr_copy(entry
->key
.acl
.eth_dst
, eth_dst
);
2708 ether_addr_copy(entry
->key
.acl
.eth_dst_mask
, eth_dst_mask
);
2710 entry
->key
.acl
.eth_type
= eth_type
;
2711 entry
->key
.acl
.vlan_id
= vlan_id
;
2712 entry
->key
.acl
.vlan_id_mask
= vlan_id_mask
;
2713 entry
->key
.acl
.ip_proto
= ip_proto
;
2714 entry
->key
.acl
.ip_proto_mask
= ip_proto_mask
;
2715 entry
->key
.acl
.ip_tos
= ip_tos
;
2716 entry
->key
.acl
.ip_tos_mask
= ip_tos_mask
;
2717 entry
->key
.acl
.group_id
= group_id
;
2719 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2722 static struct rocker_group_tbl_entry
*
2723 rocker_group_tbl_find(const struct rocker
*rocker
,
2724 const struct rocker_group_tbl_entry
*match
)
2726 struct rocker_group_tbl_entry
*found
;
2728 hash_for_each_possible(rocker
->group_tbl
, found
,
2729 entry
, match
->group_id
) {
2730 if (found
->group_id
== match
->group_id
)
2737 static void rocker_group_tbl_entry_free(struct switchdev_trans
*trans
,
2738 struct rocker_group_tbl_entry
*entry
)
2740 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
2741 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
2742 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
2743 rocker_port_kfree(trans
, entry
->group_ids
);
2748 rocker_port_kfree(trans
, entry
);
2751 static int rocker_group_tbl_add(struct rocker_port
*rocker_port
,
2752 struct switchdev_trans
*trans
, int flags
,
2753 struct rocker_group_tbl_entry
*match
)
2755 struct rocker
*rocker
= rocker_port
->rocker
;
2756 struct rocker_group_tbl_entry
*found
;
2757 unsigned long lock_flags
;
2759 spin_lock_irqsave(&rocker
->group_tbl_lock
, lock_flags
);
2761 found
= rocker_group_tbl_find(rocker
, match
);
2764 if (!switchdev_trans_ph_prepare(trans
))
2765 hash_del(&found
->entry
);
2766 rocker_group_tbl_entry_free(trans
, found
);
2768 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
;
2771 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
;
2774 if (!switchdev_trans_ph_prepare(trans
))
2775 hash_add(rocker
->group_tbl
, &found
->entry
, found
->group_id
);
2777 spin_unlock_irqrestore(&rocker
->group_tbl_lock
, lock_flags
);
2779 return rocker_cmd_exec(rocker_port
, trans
, flags
,
2780 rocker_cmd_group_tbl_add
, found
, NULL
, NULL
);
2783 static int rocker_group_tbl_del(struct rocker_port
*rocker_port
,
2784 struct switchdev_trans
*trans
, int flags
,
2785 struct rocker_group_tbl_entry
*match
)
2787 struct rocker
*rocker
= rocker_port
->rocker
;
2788 struct rocker_group_tbl_entry
*found
;
2789 unsigned long lock_flags
;
2792 spin_lock_irqsave(&rocker
->group_tbl_lock
, lock_flags
);
2794 found
= rocker_group_tbl_find(rocker
, match
);
2797 if (!switchdev_trans_ph_prepare(trans
))
2798 hash_del(&found
->entry
);
2799 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
;
2802 spin_unlock_irqrestore(&rocker
->group_tbl_lock
, lock_flags
);
2804 rocker_group_tbl_entry_free(trans
, match
);
2807 err
= rocker_cmd_exec(rocker_port
, trans
, flags
,
2808 rocker_cmd_group_tbl_del
,
2810 rocker_group_tbl_entry_free(trans
, found
);
2816 static int rocker_group_tbl_do(struct rocker_port
*rocker_port
,
2817 struct switchdev_trans
*trans
, int flags
,
2818 struct rocker_group_tbl_entry
*entry
)
2820 if (flags
& ROCKER_OP_FLAG_REMOVE
)
2821 return rocker_group_tbl_del(rocker_port
, trans
, flags
, entry
);
2823 return rocker_group_tbl_add(rocker_port
, trans
, flags
, entry
);
2826 static int rocker_group_l2_interface(struct rocker_port
*rocker_port
,
2827 struct switchdev_trans
*trans
, int flags
,
2828 __be16 vlan_id
, u32 out_pport
,
2831 struct rocker_group_tbl_entry
*entry
;
2833 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2837 entry
->group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
2838 entry
->l2_interface
.pop_vlan
= pop_vlan
;
2840 return rocker_group_tbl_do(rocker_port
, trans
, flags
, entry
);
2843 static int rocker_group_l2_fan_out(struct rocker_port
*rocker_port
,
2844 struct switchdev_trans
*trans
,
2845 int flags
, u8 group_count
,
2846 const u32
*group_ids
, u32 group_id
)
2848 struct rocker_group_tbl_entry
*entry
;
2850 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2854 entry
->group_id
= group_id
;
2855 entry
->group_count
= group_count
;
2857 entry
->group_ids
= rocker_port_kcalloc(rocker_port
, trans
, flags
,
2858 group_count
, sizeof(u32
));
2859 if (!entry
->group_ids
) {
2860 rocker_port_kfree(trans
, entry
);
2863 memcpy(entry
->group_ids
, group_ids
, group_count
* sizeof(u32
));
2865 return rocker_group_tbl_do(rocker_port
, trans
, flags
, entry
);
2868 static int rocker_group_l2_flood(struct rocker_port
*rocker_port
,
2869 struct switchdev_trans
*trans
, int flags
,
2870 __be16 vlan_id
, u8 group_count
,
2871 const u32
*group_ids
, u32 group_id
)
2873 return rocker_group_l2_fan_out(rocker_port
, trans
, flags
,
2874 group_count
, group_ids
,
2878 static int rocker_group_l3_unicast(struct rocker_port
*rocker_port
,
2879 struct switchdev_trans
*trans
, int flags
,
2880 u32 index
, const u8
*src_mac
, const u8
*dst_mac
,
2881 __be16 vlan_id
, bool ttl_check
, u32 pport
)
2883 struct rocker_group_tbl_entry
*entry
;
2885 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2889 entry
->group_id
= ROCKER_GROUP_L3_UNICAST(index
);
2891 ether_addr_copy(entry
->l3_unicast
.eth_src
, src_mac
);
2893 ether_addr_copy(entry
->l3_unicast
.eth_dst
, dst_mac
);
2894 entry
->l3_unicast
.vlan_id
= vlan_id
;
2895 entry
->l3_unicast
.ttl_check
= ttl_check
;
2896 entry
->l3_unicast
.group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, pport
);
2898 return rocker_group_tbl_do(rocker_port
, trans
, flags
, entry
);
2901 static struct rocker_neigh_tbl_entry
*
2902 rocker_neigh_tbl_find(const struct rocker
*rocker
, __be32 ip_addr
)
2904 struct rocker_neigh_tbl_entry
*found
;
2906 hash_for_each_possible(rocker
->neigh_tbl
, found
,
2907 entry
, be32_to_cpu(ip_addr
))
2908 if (found
->ip_addr
== ip_addr
)
2914 static void _rocker_neigh_add(struct rocker
*rocker
,
2915 struct switchdev_trans
*trans
,
2916 struct rocker_neigh_tbl_entry
*entry
)
2918 if (!switchdev_trans_ph_commit(trans
))
2919 entry
->index
= rocker
->neigh_tbl_next_index
++;
2920 if (switchdev_trans_ph_prepare(trans
))
2923 hash_add(rocker
->neigh_tbl
, &entry
->entry
,
2924 be32_to_cpu(entry
->ip_addr
));
2927 static void _rocker_neigh_del(struct switchdev_trans
*trans
,
2928 struct rocker_neigh_tbl_entry
*entry
)
2930 if (switchdev_trans_ph_prepare(trans
))
2932 if (--entry
->ref_count
== 0) {
2933 hash_del(&entry
->entry
);
2934 rocker_port_kfree(trans
, entry
);
2938 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry
*entry
,
2939 struct switchdev_trans
*trans
,
2940 const u8
*eth_dst
, bool ttl_check
)
2943 ether_addr_copy(entry
->eth_dst
, eth_dst
);
2944 entry
->ttl_check
= ttl_check
;
2945 } else if (!switchdev_trans_ph_prepare(trans
)) {
2950 static int rocker_port_ipv4_neigh(struct rocker_port
*rocker_port
,
2951 struct switchdev_trans
*trans
,
2952 int flags
, __be32 ip_addr
, const u8
*eth_dst
)
2954 struct rocker
*rocker
= rocker_port
->rocker
;
2955 struct rocker_neigh_tbl_entry
*entry
;
2956 struct rocker_neigh_tbl_entry
*found
;
2957 unsigned long lock_flags
;
2958 __be16 eth_type
= htons(ETH_P_IP
);
2959 enum rocker_of_dpa_table_id goto_tbl
=
2960 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2963 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
2968 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2972 spin_lock_irqsave(&rocker
->neigh_tbl_lock
, lock_flags
);
2974 found
= rocker_neigh_tbl_find(rocker
, ip_addr
);
2976 updating
= found
&& adding
;
2977 removing
= found
&& !adding
;
2978 adding
= !found
&& adding
;
2981 entry
->ip_addr
= ip_addr
;
2982 entry
->dev
= rocker_port
->dev
;
2983 ether_addr_copy(entry
->eth_dst
, eth_dst
);
2984 entry
->ttl_check
= true;
2985 _rocker_neigh_add(rocker
, trans
, entry
);
2986 } else if (removing
) {
2987 memcpy(entry
, found
, sizeof(*entry
));
2988 _rocker_neigh_del(trans
, found
);
2989 } else if (updating
) {
2990 _rocker_neigh_update(found
, trans
, eth_dst
, true);
2991 memcpy(entry
, found
, sizeof(*entry
));
2996 spin_unlock_irqrestore(&rocker
->neigh_tbl_lock
, lock_flags
);
3001 /* For each active neighbor, we have an L3 unicast group and
3002 * a /32 route to the neighbor, which uses the L3 unicast
3003 * group. The L3 unicast group can also be referred to by
3004 * other routes' nexthops.
3007 err
= rocker_group_l3_unicast(rocker_port
, trans
, flags
,
3009 rocker_port
->dev
->dev_addr
,
3011 rocker_port
->internal_vlan_id
,
3013 rocker_port
->pport
);
3015 netdev_err(rocker_port
->dev
,
3016 "Error (%d) L3 unicast group index %d\n",
3021 if (adding
|| removing
) {
3022 group_id
= ROCKER_GROUP_L3_UNICAST(entry
->index
);
3023 err
= rocker_flow_tbl_ucast4_routing(rocker_port
, trans
,
3030 netdev_err(rocker_port
->dev
,
3031 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3032 err
, &entry
->ip_addr
, group_id
);
3037 rocker_port_kfree(trans
, entry
);
3042 static int rocker_port_ipv4_resolve(struct rocker_port
*rocker_port
,
3043 struct switchdev_trans
*trans
,
3046 struct net_device
*dev
= rocker_port
->dev
;
3047 struct neighbour
*n
= __ipv4_neigh_lookup(dev
, (__force u32
)ip_addr
);
3051 n
= neigh_create(&arp_tbl
, &ip_addr
, dev
);
3056 /* If the neigh is already resolved, then go ahead and
3057 * install the entry, otherwise start the ARP process to
3058 * resolve the neigh.
3061 if (n
->nud_state
& NUD_VALID
)
3062 err
= rocker_port_ipv4_neigh(rocker_port
, trans
, 0,
3065 neigh_event_send(n
, NULL
);
3071 static int rocker_port_ipv4_nh(struct rocker_port
*rocker_port
,
3072 struct switchdev_trans
*trans
, int flags
,
3073 __be32 ip_addr
, u32
*index
)
3075 struct rocker
*rocker
= rocker_port
->rocker
;
3076 struct rocker_neigh_tbl_entry
*entry
;
3077 struct rocker_neigh_tbl_entry
*found
;
3078 unsigned long lock_flags
;
3079 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
3082 bool resolved
= true;
3085 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
3089 spin_lock_irqsave(&rocker
->neigh_tbl_lock
, lock_flags
);
3091 found
= rocker_neigh_tbl_find(rocker
, ip_addr
);
3093 *index
= found
->index
;
3095 updating
= found
&& adding
;
3096 removing
= found
&& !adding
;
3097 adding
= !found
&& adding
;
3100 entry
->ip_addr
= ip_addr
;
3101 entry
->dev
= rocker_port
->dev
;
3102 _rocker_neigh_add(rocker
, trans
, entry
);
3103 *index
= entry
->index
;
3105 } else if (removing
) {
3106 _rocker_neigh_del(trans
, found
);
3107 } else if (updating
) {
3108 _rocker_neigh_update(found
, trans
, NULL
, false);
3109 resolved
= !is_zero_ether_addr(found
->eth_dst
);
3114 spin_unlock_irqrestore(&rocker
->neigh_tbl_lock
, lock_flags
);
3117 rocker_port_kfree(trans
, entry
);
3122 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3125 err
= rocker_port_ipv4_resolve(rocker_port
, trans
, ip_addr
);
3130 static int rocker_port_vlan_flood_group(struct rocker_port
*rocker_port
,
3131 struct switchdev_trans
*trans
,
3132 int flags
, __be16 vlan_id
)
3134 struct rocker_port
*p
;
3135 const struct rocker
*rocker
= rocker_port
->rocker
;
3136 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
3142 group_ids
= rocker_port_kcalloc(rocker_port
, trans
, flags
,
3143 rocker
->port_count
, sizeof(u32
));
3147 /* Adjust the flood group for this VLAN. The flood group
3148 * references an L2 interface group for each port in this
3152 for (i
= 0; i
< rocker
->port_count
; i
++) {
3153 p
= rocker
->ports
[i
];
3156 if (!rocker_port_is_bridged(p
))
3158 if (test_bit(ntohs(vlan_id
), p
->vlan_bitmap
)) {
3159 group_ids
[group_count
++] =
3160 ROCKER_GROUP_L2_INTERFACE(vlan_id
, p
->pport
);
3164 /* If there are no bridged ports in this VLAN, we're done */
3165 if (group_count
== 0)
3166 goto no_ports_in_vlan
;
3168 err
= rocker_group_l2_flood(rocker_port
, trans
, flags
, vlan_id
,
3169 group_count
, group_ids
, group_id
);
3171 netdev_err(rocker_port
->dev
,
3172 "Error (%d) port VLAN l2 flood group\n", err
);
3175 rocker_port_kfree(trans
, group_ids
);
3179 static int rocker_port_vlan_l2_groups(struct rocker_port
*rocker_port
,
3180 struct switchdev_trans
*trans
, int flags
,
3181 __be16 vlan_id
, bool pop_vlan
)
3183 const struct rocker
*rocker
= rocker_port
->rocker
;
3184 struct rocker_port
*p
;
3185 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
3191 /* An L2 interface group for this port in this VLAN, but
3192 * only when port STP state is LEARNING|FORWARDING.
3195 if (rocker_port
->stp_state
== BR_STATE_LEARNING
||
3196 rocker_port
->stp_state
== BR_STATE_FORWARDING
) {
3197 out_pport
= rocker_port
->pport
;
3198 err
= rocker_group_l2_interface(rocker_port
, trans
, flags
,
3199 vlan_id
, out_pport
, pop_vlan
);
3201 netdev_err(rocker_port
->dev
,
3202 "Error (%d) port VLAN l2 group for pport %d\n",
3208 /* An L2 interface group for this VLAN to CPU port.
3209 * Add when first port joins this VLAN and destroy when
3210 * last port leaves this VLAN.
3213 for (i
= 0; i
< rocker
->port_count
; i
++) {
3214 p
= rocker
->ports
[i
];
3215 if (p
&& test_bit(ntohs(vlan_id
), p
->vlan_bitmap
))
3219 if ((!adding
|| ref
!= 1) && (adding
|| ref
!= 0))
3223 err
= rocker_group_l2_interface(rocker_port
, trans
, flags
,
3224 vlan_id
, out_pport
, pop_vlan
);
3226 netdev_err(rocker_port
->dev
,
3227 "Error (%d) port VLAN l2 group for CPU port\n", err
);
3234 static struct rocker_ctrl
{
3236 const u8
*eth_dst_mask
;
3242 } rocker_ctrls
[] = {
3243 [ROCKER_CTRL_LINK_LOCAL_MCAST
] = {
3244 /* pass link local multicast pkts up to CPU for filtering */
3246 .eth_dst_mask
= ll_mask
,
3249 [ROCKER_CTRL_LOCAL_ARP
] = {
3250 /* pass local ARP pkts up to CPU */
3251 .eth_dst
= zero_mac
,
3252 .eth_dst_mask
= zero_mac
,
3253 .eth_type
= htons(ETH_P_ARP
),
3256 [ROCKER_CTRL_IPV4_MCAST
] = {
3257 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3258 .eth_dst
= ipv4_mcast
,
3259 .eth_dst_mask
= ipv4_mask
,
3260 .eth_type
= htons(ETH_P_IP
),
3262 .copy_to_cpu
= true,
3264 [ROCKER_CTRL_IPV6_MCAST
] = {
3265 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3266 .eth_dst
= ipv6_mcast
,
3267 .eth_dst_mask
= ipv6_mask
,
3268 .eth_type
= htons(ETH_P_IPV6
),
3270 .copy_to_cpu
= true,
3272 [ROCKER_CTRL_DFLT_BRIDGING
] = {
3273 /* flood any pkts on vlan */
3275 .copy_to_cpu
= true,
3277 [ROCKER_CTRL_DFLT_OVS
] = {
3278 /* pass all pkts up to CPU */
3279 .eth_dst
= zero_mac
,
3280 .eth_dst_mask
= zero_mac
,
3285 static int rocker_port_ctrl_vlan_acl(struct rocker_port
*rocker_port
,
3286 struct switchdev_trans
*trans
, int flags
,
3287 const struct rocker_ctrl
*ctrl
, __be16 vlan_id
)
3289 u32 in_pport
= rocker_port
->pport
;
3290 u32 in_pport_mask
= 0xffffffff;
3292 const u8
*eth_src
= NULL
;
3293 const u8
*eth_src_mask
= NULL
;
3294 __be16 vlan_id_mask
= htons(0xffff);
3296 u8 ip_proto_mask
= 0;
3299 u32 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
3302 err
= rocker_flow_tbl_acl(rocker_port
, trans
, flags
,
3303 in_pport
, in_pport_mask
,
3304 eth_src
, eth_src_mask
,
3305 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
3307 vlan_id
, vlan_id_mask
,
3308 ip_proto
, ip_proto_mask
,
3309 ip_tos
, ip_tos_mask
,
3313 netdev_err(rocker_port
->dev
, "Error (%d) ctrl ACL\n", err
);
3318 static int rocker_port_ctrl_vlan_bridge(struct rocker_port
*rocker_port
,
3319 struct switchdev_trans
*trans
,
3321 const struct rocker_ctrl
*ctrl
,
3324 enum rocker_of_dpa_table_id goto_tbl
=
3325 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
3326 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
3330 if (!rocker_port_is_bridged(rocker_port
))
3333 err
= rocker_flow_tbl_bridge(rocker_port
, trans
, flags
,
3334 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
3336 goto_tbl
, group_id
, ctrl
->copy_to_cpu
);
3339 netdev_err(rocker_port
->dev
, "Error (%d) ctrl FLOOD\n", err
);
3344 static int rocker_port_ctrl_vlan_term(struct rocker_port
*rocker_port
,
3345 struct switchdev_trans
*trans
, int flags
,
3346 const struct rocker_ctrl
*ctrl
, __be16 vlan_id
)
3348 u32 in_pport_mask
= 0xffffffff;
3349 __be16 vlan_id_mask
= htons(0xffff);
3352 if (ntohs(vlan_id
) == 0)
3353 vlan_id
= rocker_port
->internal_vlan_id
;
3355 err
= rocker_flow_tbl_term_mac(rocker_port
, trans
,
3356 rocker_port
->pport
, in_pport_mask
,
3357 ctrl
->eth_type
, ctrl
->eth_dst
,
3358 ctrl
->eth_dst_mask
, vlan_id
,
3359 vlan_id_mask
, ctrl
->copy_to_cpu
,
3363 netdev_err(rocker_port
->dev
, "Error (%d) ctrl term\n", err
);
3368 static int rocker_port_ctrl_vlan(struct rocker_port
*rocker_port
,
3369 struct switchdev_trans
*trans
, int flags
,
3370 const struct rocker_ctrl
*ctrl
, __be16 vlan_id
)
3373 return rocker_port_ctrl_vlan_acl(rocker_port
, trans
, flags
,
3376 return rocker_port_ctrl_vlan_bridge(rocker_port
, trans
, flags
,
3380 return rocker_port_ctrl_vlan_term(rocker_port
, trans
, flags
,
3386 static int rocker_port_ctrl_vlan_add(struct rocker_port
*rocker_port
,
3387 struct switchdev_trans
*trans
, int flags
,
3393 for (i
= 0; i
< ROCKER_CTRL_MAX
; i
++) {
3394 if (rocker_port
->ctrls
[i
]) {
3395 err
= rocker_port_ctrl_vlan(rocker_port
, trans
, flags
,
3396 &rocker_ctrls
[i
], vlan_id
);
3405 static int rocker_port_ctrl(struct rocker_port
*rocker_port
,
3406 struct switchdev_trans
*trans
, int flags
,
3407 const struct rocker_ctrl
*ctrl
)
3412 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
3413 if (!test_bit(vid
, rocker_port
->vlan_bitmap
))
3415 err
= rocker_port_ctrl_vlan(rocker_port
, trans
, flags
,
3424 static int rocker_port_vlan(struct rocker_port
*rocker_port
,
3425 struct switchdev_trans
*trans
, int flags
, u16 vid
)
3427 enum rocker_of_dpa_table_id goto_tbl
=
3428 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
3429 u32 in_pport
= rocker_port
->pport
;
3430 __be16 vlan_id
= htons(vid
);
3431 __be16 vlan_id_mask
= htons(0xffff);
3432 __be16 internal_vlan_id
;
3434 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
3437 internal_vlan_id
= rocker_port_vid_to_vlan(rocker_port
, vid
, &untagged
);
3439 if (adding
&& test_bit(ntohs(internal_vlan_id
),
3440 rocker_port
->vlan_bitmap
))
3441 return 0; /* already added */
3442 else if (!adding
&& !test_bit(ntohs(internal_vlan_id
),
3443 rocker_port
->vlan_bitmap
))
3444 return 0; /* already removed */
3446 change_bit(ntohs(internal_vlan_id
), rocker_port
->vlan_bitmap
);
3449 err
= rocker_port_ctrl_vlan_add(rocker_port
, trans
, flags
,
3452 netdev_err(rocker_port
->dev
,
3453 "Error (%d) port ctrl vlan add\n", err
);
3458 err
= rocker_port_vlan_l2_groups(rocker_port
, trans
, flags
,
3459 internal_vlan_id
, untagged
);
3461 netdev_err(rocker_port
->dev
,
3462 "Error (%d) port VLAN l2 groups\n", err
);
3466 err
= rocker_port_vlan_flood_group(rocker_port
, trans
, flags
,
3469 netdev_err(rocker_port
->dev
,
3470 "Error (%d) port VLAN l2 flood group\n", err
);
3474 err
= rocker_flow_tbl_vlan(rocker_port
, trans
, flags
,
3475 in_pport
, vlan_id
, vlan_id_mask
,
3476 goto_tbl
, untagged
, internal_vlan_id
);
3478 netdev_err(rocker_port
->dev
,
3479 "Error (%d) port VLAN table\n", err
);
3482 if (switchdev_trans_ph_prepare(trans
))
3483 change_bit(ntohs(internal_vlan_id
), rocker_port
->vlan_bitmap
);
3488 static int rocker_port_ig_tbl(struct rocker_port
*rocker_port
,
3489 struct switchdev_trans
*trans
, int flags
)
3491 enum rocker_of_dpa_table_id goto_tbl
;
3496 /* Normal Ethernet Frames. Matches pkts from any local physical
3497 * ports. Goto VLAN tbl.
3501 in_pport_mask
= 0xffff0000;
3502 goto_tbl
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
3504 err
= rocker_flow_tbl_ig_port(rocker_port
, trans
, flags
,
3505 in_pport
, in_pport_mask
,
3508 netdev_err(rocker_port
->dev
,
3509 "Error (%d) ingress port table entry\n", err
);
3514 struct rocker_fdb_learn_work
{
3515 struct work_struct work
;
3516 struct rocker_port
*rocker_port
;
3517 struct switchdev_trans
*trans
;
3523 static void rocker_port_fdb_learn_work(struct work_struct
*work
)
3525 const struct rocker_fdb_learn_work
*lw
=
3526 container_of(work
, struct rocker_fdb_learn_work
, work
);
3527 bool removing
= (lw
->flags
& ROCKER_OP_FLAG_REMOVE
);
3528 bool learned
= (lw
->flags
& ROCKER_OP_FLAG_LEARNED
);
3529 struct switchdev_notifier_fdb_info info
;
3531 info
.addr
= lw
->addr
;
3534 if (learned
&& removing
)
3535 call_switchdev_notifiers(SWITCHDEV_FDB_DEL
,
3536 lw
->rocker_port
->dev
, &info
.info
);
3537 else if (learned
&& !removing
)
3538 call_switchdev_notifiers(SWITCHDEV_FDB_ADD
,
3539 lw
->rocker_port
->dev
, &info
.info
);
3541 rocker_port_kfree(lw
->trans
, work
);
3544 static int rocker_port_fdb_learn(struct rocker_port
*rocker_port
,
3545 struct switchdev_trans
*trans
, int flags
,
3546 const u8
*addr
, __be16 vlan_id
)
3548 struct rocker_fdb_learn_work
*lw
;
3549 enum rocker_of_dpa_table_id goto_tbl
=
3550 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
3551 u32 out_pport
= rocker_port
->pport
;
3553 u32 group_id
= ROCKER_GROUP_NONE
;
3554 bool syncing
= !!(rocker_port
->brport_flags
& BR_LEARNING_SYNC
);
3555 bool copy_to_cpu
= false;
3558 if (rocker_port_is_bridged(rocker_port
))
3559 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
3561 if (!(flags
& ROCKER_OP_FLAG_REFRESH
)) {
3562 err
= rocker_flow_tbl_bridge(rocker_port
, trans
, flags
, addr
,
3563 NULL
, vlan_id
, tunnel_id
, goto_tbl
,
3564 group_id
, copy_to_cpu
);
3572 if (!rocker_port_is_bridged(rocker_port
))
3575 lw
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*lw
));
3579 INIT_WORK(&lw
->work
, rocker_port_fdb_learn_work
);
3581 lw
->rocker_port
= rocker_port
;
3584 ether_addr_copy(lw
->addr
, addr
);
3585 lw
->vid
= rocker_port_vlan_to_vid(rocker_port
, vlan_id
);
3587 if (switchdev_trans_ph_prepare(trans
))
3588 rocker_port_kfree(trans
, lw
);
3590 schedule_work(&lw
->work
);
3595 static struct rocker_fdb_tbl_entry
*
3596 rocker_fdb_tbl_find(const struct rocker
*rocker
,
3597 const struct rocker_fdb_tbl_entry
*match
)
3599 struct rocker_fdb_tbl_entry
*found
;
3601 hash_for_each_possible(rocker
->fdb_tbl
, found
, entry
, match
->key_crc32
)
3602 if (memcmp(&found
->key
, &match
->key
, sizeof(found
->key
)) == 0)
3608 static int rocker_port_fdb(struct rocker_port
*rocker_port
,
3609 struct switchdev_trans
*trans
,
3610 const unsigned char *addr
,
3611 __be16 vlan_id
, int flags
)
3613 struct rocker
*rocker
= rocker_port
->rocker
;
3614 struct rocker_fdb_tbl_entry
*fdb
;
3615 struct rocker_fdb_tbl_entry
*found
;
3616 bool removing
= (flags
& ROCKER_OP_FLAG_REMOVE
);
3617 unsigned long lock_flags
;
3619 fdb
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*fdb
));
3623 fdb
->learned
= (flags
& ROCKER_OP_FLAG_LEARNED
);
3624 fdb
->touched
= jiffies
;
3625 fdb
->key
.rocker_port
= rocker_port
;
3626 ether_addr_copy(fdb
->key
.addr
, addr
);
3627 fdb
->key
.vlan_id
= vlan_id
;
3628 fdb
->key_crc32
= crc32(~0, &fdb
->key
, sizeof(fdb
->key
));
3630 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
3632 found
= rocker_fdb_tbl_find(rocker
, fdb
);
3635 found
->touched
= jiffies
;
3637 rocker_port_kfree(trans
, fdb
);
3638 if (!switchdev_trans_ph_prepare(trans
))
3639 hash_del(&found
->entry
);
3641 } else if (!removing
) {
3642 if (!switchdev_trans_ph_prepare(trans
))
3643 hash_add(rocker
->fdb_tbl
, &fdb
->entry
,
3647 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
3649 /* Check if adding and already exists, or removing and can't find */
3650 if (!found
!= !removing
) {
3651 rocker_port_kfree(trans
, fdb
);
3652 if (!found
&& removing
)
3654 /* Refreshing existing to update aging timers */
3655 flags
|= ROCKER_OP_FLAG_REFRESH
;
3658 return rocker_port_fdb_learn(rocker_port
, trans
, flags
, addr
, vlan_id
);
3661 static int rocker_port_fdb_flush(struct rocker_port
*rocker_port
,
3662 struct switchdev_trans
*trans
, int flags
)
3664 struct rocker
*rocker
= rocker_port
->rocker
;
3665 struct rocker_fdb_tbl_entry
*found
;
3666 unsigned long lock_flags
;
3667 struct hlist_node
*tmp
;
3671 if (rocker_port
->stp_state
== BR_STATE_LEARNING
||
3672 rocker_port
->stp_state
== BR_STATE_FORWARDING
)
3675 flags
|= ROCKER_OP_FLAG_NOWAIT
| ROCKER_OP_FLAG_REMOVE
;
3677 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
3679 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
3680 if (found
->key
.rocker_port
!= rocker_port
)
3682 if (!found
->learned
)
3684 err
= rocker_port_fdb_learn(rocker_port
, trans
, flags
,
3686 found
->key
.vlan_id
);
3689 if (!switchdev_trans_ph_prepare(trans
))
3690 hash_del(&found
->entry
);
3694 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
3699 static void rocker_fdb_cleanup(unsigned long data
)
3701 struct rocker
*rocker
= (struct rocker
*)data
;
3702 struct rocker_port
*rocker_port
;
3703 struct rocker_fdb_tbl_entry
*entry
;
3704 struct hlist_node
*tmp
;
3705 unsigned long next_timer
= jiffies
+ BR_MIN_AGEING_TIME
;
3706 unsigned long expires
;
3707 unsigned long lock_flags
;
3708 int flags
= ROCKER_OP_FLAG_NOWAIT
| ROCKER_OP_FLAG_REMOVE
|
3709 ROCKER_OP_FLAG_LEARNED
;
3712 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
3714 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, entry
, entry
) {
3715 if (!entry
->learned
)
3717 rocker_port
= entry
->key
.rocker_port
;
3718 expires
= entry
->touched
+ rocker_port
->ageing_time
;
3719 if (time_before_eq(expires
, jiffies
)) {
3720 rocker_port_fdb_learn(rocker_port
, NULL
,
3721 flags
, entry
->key
.addr
,
3722 entry
->key
.vlan_id
);
3723 hash_del(&entry
->entry
);
3724 } else if (time_before(expires
, next_timer
)) {
3725 next_timer
= expires
;
3729 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
3731 mod_timer(&rocker
->fdb_cleanup_timer
, round_jiffies_up(next_timer
));
3734 static int rocker_port_router_mac(struct rocker_port
*rocker_port
,
3735 struct switchdev_trans
*trans
, int flags
,
3738 u32 in_pport_mask
= 0xffffffff;
3740 const u8
*dst_mac_mask
= ff_mac
;
3741 __be16 vlan_id_mask
= htons(0xffff);
3742 bool copy_to_cpu
= false;
3745 if (ntohs(vlan_id
) == 0)
3746 vlan_id
= rocker_port
->internal_vlan_id
;
3748 eth_type
= htons(ETH_P_IP
);
3749 err
= rocker_flow_tbl_term_mac(rocker_port
, trans
,
3750 rocker_port
->pport
, in_pport_mask
,
3751 eth_type
, rocker_port
->dev
->dev_addr
,
3752 dst_mac_mask
, vlan_id
, vlan_id_mask
,
3753 copy_to_cpu
, flags
);
3757 eth_type
= htons(ETH_P_IPV6
);
3758 err
= rocker_flow_tbl_term_mac(rocker_port
, trans
,
3759 rocker_port
->pport
, in_pport_mask
,
3760 eth_type
, rocker_port
->dev
->dev_addr
,
3761 dst_mac_mask
, vlan_id
, vlan_id_mask
,
3762 copy_to_cpu
, flags
);
3767 static int rocker_port_fwding(struct rocker_port
*rocker_port
,
3768 struct switchdev_trans
*trans
, int flags
)
3776 /* Port will be forwarding-enabled if its STP state is LEARNING
3777 * or FORWARDING. Traffic from CPU can still egress, regardless of
3778 * port STP state. Use L2 interface group on port VLANs as a way
3779 * to toggle port forwarding: if forwarding is disabled, L2
3780 * interface group will not exist.
3783 if (rocker_port
->stp_state
!= BR_STATE_LEARNING
&&
3784 rocker_port
->stp_state
!= BR_STATE_FORWARDING
)
3785 flags
|= ROCKER_OP_FLAG_REMOVE
;
3787 out_pport
= rocker_port
->pport
;
3788 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
3789 if (!test_bit(vid
, rocker_port
->vlan_bitmap
))
3791 vlan_id
= htons(vid
);
3792 pop_vlan
= rocker_vlan_id_is_internal(vlan_id
);
3793 err
= rocker_group_l2_interface(rocker_port
, trans
, flags
,
3794 vlan_id
, out_pport
, pop_vlan
);
3796 netdev_err(rocker_port
->dev
,
3797 "Error (%d) port VLAN l2 group for pport %d\n",
3806 static int rocker_port_stp_update(struct rocker_port
*rocker_port
,
3807 struct switchdev_trans
*trans
, int flags
,
3810 bool want
[ROCKER_CTRL_MAX
] = { 0, };
3811 bool prev_ctrls
[ROCKER_CTRL_MAX
];
3812 u8
uninitialized_var(prev_state
);
3816 if (switchdev_trans_ph_prepare(trans
)) {
3817 memcpy(prev_ctrls
, rocker_port
->ctrls
, sizeof(prev_ctrls
));
3818 prev_state
= rocker_port
->stp_state
;
3821 if (rocker_port
->stp_state
== state
)
3824 rocker_port
->stp_state
= state
;
3827 case BR_STATE_DISABLED
:
3828 /* port is completely disabled */
3830 case BR_STATE_LISTENING
:
3831 case BR_STATE_BLOCKING
:
3832 want
[ROCKER_CTRL_LINK_LOCAL_MCAST
] = true;
3834 case BR_STATE_LEARNING
:
3835 case BR_STATE_FORWARDING
:
3836 if (!rocker_port_is_ovsed(rocker_port
))
3837 want
[ROCKER_CTRL_LINK_LOCAL_MCAST
] = true;
3838 want
[ROCKER_CTRL_IPV4_MCAST
] = true;
3839 want
[ROCKER_CTRL_IPV6_MCAST
] = true;
3840 if (rocker_port_is_bridged(rocker_port
))
3841 want
[ROCKER_CTRL_DFLT_BRIDGING
] = true;
3842 else if (rocker_port_is_ovsed(rocker_port
))
3843 want
[ROCKER_CTRL_DFLT_OVS
] = true;
3845 want
[ROCKER_CTRL_LOCAL_ARP
] = true;
3849 for (i
= 0; i
< ROCKER_CTRL_MAX
; i
++) {
3850 if (want
[i
] != rocker_port
->ctrls
[i
]) {
3851 int ctrl_flags
= flags
|
3852 (want
[i
] ? 0 : ROCKER_OP_FLAG_REMOVE
);
3853 err
= rocker_port_ctrl(rocker_port
, trans
, ctrl_flags
,
3857 rocker_port
->ctrls
[i
] = want
[i
];
3861 err
= rocker_port_fdb_flush(rocker_port
, trans
, flags
);
3865 err
= rocker_port_fwding(rocker_port
, trans
, flags
);
3868 if (switchdev_trans_ph_prepare(trans
)) {
3869 memcpy(rocker_port
->ctrls
, prev_ctrls
, sizeof(prev_ctrls
));
3870 rocker_port
->stp_state
= prev_state
;
3876 static int rocker_port_fwd_enable(struct rocker_port
*rocker_port
,
3877 struct switchdev_trans
*trans
, int flags
)
3879 if (rocker_port_is_bridged(rocker_port
))
3880 /* bridge STP will enable port */
3883 /* port is not bridged, so simulate going to FORWARDING state */
3884 return rocker_port_stp_update(rocker_port
, trans
, flags
,
3885 BR_STATE_FORWARDING
);
3888 static int rocker_port_fwd_disable(struct rocker_port
*rocker_port
,
3889 struct switchdev_trans
*trans
, int flags
)
3891 if (rocker_port_is_bridged(rocker_port
))
3892 /* bridge STP will disable port */
3895 /* port is not bridged, so simulate going to DISABLED state */
3896 return rocker_port_stp_update(rocker_port
, trans
, flags
,
3900 static struct rocker_internal_vlan_tbl_entry
*
3901 rocker_internal_vlan_tbl_find(const struct rocker
*rocker
, int ifindex
)
3903 struct rocker_internal_vlan_tbl_entry
*found
;
3905 hash_for_each_possible(rocker
->internal_vlan_tbl
, found
,
3907 if (found
->ifindex
== ifindex
)
3914 static __be16
rocker_port_internal_vlan_id_get(struct rocker_port
*rocker_port
,
3917 struct rocker
*rocker
= rocker_port
->rocker
;
3918 struct rocker_internal_vlan_tbl_entry
*entry
;
3919 struct rocker_internal_vlan_tbl_entry
*found
;
3920 unsigned long lock_flags
;
3923 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
3927 entry
->ifindex
= ifindex
;
3929 spin_lock_irqsave(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3931 found
= rocker_internal_vlan_tbl_find(rocker
, ifindex
);
3938 hash_add(rocker
->internal_vlan_tbl
, &found
->entry
, found
->ifindex
);
3940 for (i
= 0; i
< ROCKER_N_INTERNAL_VLANS
; i
++) {
3941 if (test_and_set_bit(i
, rocker
->internal_vlan_bitmap
))
3943 found
->vlan_id
= htons(ROCKER_INTERNAL_VLAN_ID_BASE
+ i
);
3947 netdev_err(rocker_port
->dev
, "Out of internal VLAN IDs\n");
3951 spin_unlock_irqrestore(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3953 return found
->vlan_id
;
3957 rocker_port_internal_vlan_id_put(const struct rocker_port
*rocker_port
,
3960 struct rocker
*rocker
= rocker_port
->rocker
;
3961 struct rocker_internal_vlan_tbl_entry
*found
;
3962 unsigned long lock_flags
;
3965 spin_lock_irqsave(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3967 found
= rocker_internal_vlan_tbl_find(rocker
, ifindex
);
3969 netdev_err(rocker_port
->dev
,
3970 "ifindex (%d) not found in internal VLAN tbl\n",
3975 if (--found
->ref_count
<= 0) {
3976 bit
= ntohs(found
->vlan_id
) - ROCKER_INTERNAL_VLAN_ID_BASE
;
3977 clear_bit(bit
, rocker
->internal_vlan_bitmap
);
3978 hash_del(&found
->entry
);
3983 spin_unlock_irqrestore(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3986 static int rocker_port_fib_ipv4(struct rocker_port
*rocker_port
,
3987 struct switchdev_trans
*trans
, __be32 dst
,
3988 int dst_len
, const struct fib_info
*fi
,
3989 u32 tb_id
, int flags
)
3991 const struct fib_nh
*nh
;
3992 __be16 eth_type
= htons(ETH_P_IP
);
3993 __be32 dst_mask
= inet_make_mask(dst_len
);
3994 __be16 internal_vlan_id
= rocker_port
->internal_vlan_id
;
3995 u32 priority
= fi
->fib_priority
;
3996 enum rocker_of_dpa_table_id goto_tbl
=
3997 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
4004 /* XXX support ECMP */
4007 nh_on_port
= (fi
->fib_dev
== rocker_port
->dev
);
4008 has_gw
= !!nh
->nh_gw
;
4010 if (has_gw
&& nh_on_port
) {
4011 err
= rocker_port_ipv4_nh(rocker_port
, trans
, flags
,
4016 group_id
= ROCKER_GROUP_L3_UNICAST(index
);
4018 /* Send to CPU for processing */
4019 group_id
= ROCKER_GROUP_L2_INTERFACE(internal_vlan_id
, 0);
4022 err
= rocker_flow_tbl_ucast4_routing(rocker_port
, trans
, eth_type
, dst
,
4023 dst_mask
, priority
, goto_tbl
,
4026 netdev_err(rocker_port
->dev
, "Error (%d) IPv4 route %pI4\n",
4036 static int rocker_port_open(struct net_device
*dev
)
4038 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4041 err
= rocker_port_dma_rings_init(rocker_port
);
4045 err
= request_irq(rocker_msix_tx_vector(rocker_port
),
4046 rocker_tx_irq_handler
, 0,
4047 rocker_driver_name
, rocker_port
);
4049 netdev_err(rocker_port
->dev
, "cannot assign tx irq\n");
4050 goto err_request_tx_irq
;
4053 err
= request_irq(rocker_msix_rx_vector(rocker_port
),
4054 rocker_rx_irq_handler
, 0,
4055 rocker_driver_name
, rocker_port
);
4057 netdev_err(rocker_port
->dev
, "cannot assign rx irq\n");
4058 goto err_request_rx_irq
;
4061 err
= rocker_port_fwd_enable(rocker_port
, NULL
, 0);
4063 goto err_fwd_enable
;
4065 napi_enable(&rocker_port
->napi_tx
);
4066 napi_enable(&rocker_port
->napi_rx
);
4067 if (!dev
->proto_down
)
4068 rocker_port_set_enable(rocker_port
, true);
4069 netif_start_queue(dev
);
4073 free_irq(rocker_msix_rx_vector(rocker_port
), rocker_port
);
4075 free_irq(rocker_msix_tx_vector(rocker_port
), rocker_port
);
4077 rocker_port_dma_rings_fini(rocker_port
);
4081 static int rocker_port_stop(struct net_device
*dev
)
4083 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4085 netif_stop_queue(dev
);
4086 rocker_port_set_enable(rocker_port
, false);
4087 napi_disable(&rocker_port
->napi_rx
);
4088 napi_disable(&rocker_port
->napi_tx
);
4089 rocker_port_fwd_disable(rocker_port
, NULL
,
4090 ROCKER_OP_FLAG_NOWAIT
);
4091 free_irq(rocker_msix_rx_vector(rocker_port
), rocker_port
);
4092 free_irq(rocker_msix_tx_vector(rocker_port
), rocker_port
);
4093 rocker_port_dma_rings_fini(rocker_port
);
4098 static void rocker_tx_desc_frags_unmap(const struct rocker_port
*rocker_port
,
4099 const struct rocker_desc_info
*desc_info
)
4101 const struct rocker
*rocker
= rocker_port
->rocker
;
4102 struct pci_dev
*pdev
= rocker
->pdev
;
4103 const struct rocker_tlv
*attrs
[ROCKER_TLV_TX_MAX
+ 1];
4104 struct rocker_tlv
*attr
;
4107 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_TX_MAX
, desc_info
);
4108 if (!attrs
[ROCKER_TLV_TX_FRAGS
])
4110 rocker_tlv_for_each_nested(attr
, attrs
[ROCKER_TLV_TX_FRAGS
], rem
) {
4111 const struct rocker_tlv
*frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_MAX
+ 1];
4112 dma_addr_t dma_handle
;
4115 if (rocker_tlv_type(attr
) != ROCKER_TLV_TX_FRAG
)
4117 rocker_tlv_parse_nested(frag_attrs
, ROCKER_TLV_TX_FRAG_ATTR_MAX
,
4119 if (!frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
] ||
4120 !frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
])
4122 dma_handle
= rocker_tlv_get_u64(frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
]);
4123 len
= rocker_tlv_get_u16(frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
]);
4124 pci_unmap_single(pdev
, dma_handle
, len
, DMA_TO_DEVICE
);
4128 static int rocker_tx_desc_frag_map_put(const struct rocker_port
*rocker_port
,
4129 struct rocker_desc_info
*desc_info
,
4130 char *buf
, size_t buf_len
)
4132 const struct rocker
*rocker
= rocker_port
->rocker
;
4133 struct pci_dev
*pdev
= rocker
->pdev
;
4134 dma_addr_t dma_handle
;
4135 struct rocker_tlv
*frag
;
4137 dma_handle
= pci_map_single(pdev
, buf
, buf_len
, DMA_TO_DEVICE
);
4138 if (unlikely(pci_dma_mapping_error(pdev
, dma_handle
))) {
4139 if (net_ratelimit())
4140 netdev_err(rocker_port
->dev
, "failed to dma map tx frag\n");
4143 frag
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_TX_FRAG
);
4146 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_TX_FRAG_ATTR_ADDR
,
4149 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_TX_FRAG_ATTR_LEN
,
4152 rocker_tlv_nest_end(desc_info
, frag
);
4156 rocker_tlv_nest_cancel(desc_info
, frag
);
4158 pci_unmap_single(pdev
, dma_handle
, buf_len
, DMA_TO_DEVICE
);
4162 static netdev_tx_t
rocker_port_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4164 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4165 struct rocker
*rocker
= rocker_port
->rocker
;
4166 struct rocker_desc_info
*desc_info
;
4167 struct rocker_tlv
*frags
;
4171 desc_info
= rocker_desc_head_get(&rocker_port
->tx_ring
);
4172 if (unlikely(!desc_info
)) {
4173 if (net_ratelimit())
4174 netdev_err(dev
, "tx ring full when queue awake\n");
4175 return NETDEV_TX_BUSY
;
4178 rocker_desc_cookie_ptr_set(desc_info
, skb
);
4180 frags
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_TX_FRAGS
);
4183 err
= rocker_tx_desc_frag_map_put(rocker_port
, desc_info
,
4184 skb
->data
, skb_headlen(skb
));
4187 if (skb_shinfo(skb
)->nr_frags
> ROCKER_TX_FRAGS_MAX
) {
4188 err
= skb_linearize(skb
);
4193 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4194 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4196 err
= rocker_tx_desc_frag_map_put(rocker_port
, desc_info
,
4197 skb_frag_address(frag
),
4198 skb_frag_size(frag
));
4202 rocker_tlv_nest_end(desc_info
, frags
);
4204 rocker_desc_gen_clear(desc_info
);
4205 rocker_desc_head_set(rocker
, &rocker_port
->tx_ring
, desc_info
);
4207 desc_info
= rocker_desc_head_get(&rocker_port
->tx_ring
);
4209 netif_stop_queue(dev
);
4211 return NETDEV_TX_OK
;
4214 rocker_tx_desc_frags_unmap(rocker_port
, desc_info
);
4216 rocker_tlv_nest_cancel(desc_info
, frags
);
4219 dev
->stats
.tx_dropped
++;
4221 return NETDEV_TX_OK
;
4224 static int rocker_port_set_mac_address(struct net_device
*dev
, void *p
)
4226 struct sockaddr
*addr
= p
;
4227 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4230 if (!is_valid_ether_addr(addr
->sa_data
))
4231 return -EADDRNOTAVAIL
;
4233 err
= rocker_cmd_set_port_settings_macaddr(rocker_port
, addr
->sa_data
);
4236 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4240 static int rocker_port_change_mtu(struct net_device
*dev
, int new_mtu
)
4242 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4243 int running
= netif_running(dev
);
4246 #define ROCKER_PORT_MIN_MTU 68
4247 #define ROCKER_PORT_MAX_MTU 9000
4249 if (new_mtu
< ROCKER_PORT_MIN_MTU
|| new_mtu
> ROCKER_PORT_MAX_MTU
)
4253 rocker_port_stop(dev
);
4255 netdev_info(dev
, "MTU change from %d to %d\n", dev
->mtu
, new_mtu
);
4258 err
= rocker_cmd_set_port_settings_mtu(rocker_port
, new_mtu
);
4263 err
= rocker_port_open(dev
);
4268 static int rocker_port_get_phys_port_name(struct net_device
*dev
,
4269 char *buf
, size_t len
)
4271 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4272 struct port_name name
= { .buf
= buf
, .len
= len
};
4275 err
= rocker_cmd_exec(rocker_port
, NULL
, 0,
4276 rocker_cmd_get_port_settings_prep
, NULL
,
4277 rocker_cmd_get_port_settings_phys_name_proc
,
4280 return err
? -EOPNOTSUPP
: 0;
4283 static int rocker_port_change_proto_down(struct net_device
*dev
,
4286 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4288 if (rocker_port
->dev
->flags
& IFF_UP
)
4289 rocker_port_set_enable(rocker_port
, !proto_down
);
4290 rocker_port
->dev
->proto_down
= proto_down
;
4294 static void rocker_port_neigh_destroy(struct neighbour
*n
)
4296 struct rocker_port
*rocker_port
= netdev_priv(n
->dev
);
4297 int flags
= ROCKER_OP_FLAG_REMOVE
| ROCKER_OP_FLAG_NOWAIT
;
4298 __be32 ip_addr
= *(__be32
*)n
->primary_key
;
4300 rocker_port_ipv4_neigh(rocker_port
, NULL
,
4301 flags
, ip_addr
, n
->ha
);
4304 static const struct net_device_ops rocker_port_netdev_ops
= {
4305 .ndo_open
= rocker_port_open
,
4306 .ndo_stop
= rocker_port_stop
,
4307 .ndo_start_xmit
= rocker_port_xmit
,
4308 .ndo_set_mac_address
= rocker_port_set_mac_address
,
4309 .ndo_change_mtu
= rocker_port_change_mtu
,
4310 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
4311 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
4312 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
4313 .ndo_fdb_add
= switchdev_port_fdb_add
,
4314 .ndo_fdb_del
= switchdev_port_fdb_del
,
4315 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
4316 .ndo_get_phys_port_name
= rocker_port_get_phys_port_name
,
4317 .ndo_change_proto_down
= rocker_port_change_proto_down
,
4318 .ndo_neigh_destroy
= rocker_port_neigh_destroy
,
4321 /********************
4323 ********************/
4325 static int rocker_port_attr_get(struct net_device
*dev
,
4326 struct switchdev_attr
*attr
)
4328 const struct rocker_port
*rocker_port
= netdev_priv(dev
);
4329 const struct rocker
*rocker
= rocker_port
->rocker
;
4332 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
4333 attr
->u
.ppid
.id_len
= sizeof(rocker
->hw
.id
);
4334 memcpy(&attr
->u
.ppid
.id
, &rocker
->hw
.id
, attr
->u
.ppid
.id_len
);
4336 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
4337 attr
->u
.brport_flags
= rocker_port
->brport_flags
;
4346 static int rocker_port_brport_flags_set(struct rocker_port
*rocker_port
,
4347 struct switchdev_trans
*trans
,
4348 unsigned long brport_flags
)
4350 unsigned long orig_flags
;
4353 orig_flags
= rocker_port
->brport_flags
;
4354 rocker_port
->brport_flags
= brport_flags
;
4355 if ((orig_flags
^ rocker_port
->brport_flags
) & BR_LEARNING
)
4356 err
= rocker_port_set_learning(rocker_port
, trans
);
4358 if (switchdev_trans_ph_prepare(trans
))
4359 rocker_port
->brport_flags
= orig_flags
;
4364 static int rocker_port_bridge_ageing_time(struct rocker_port
*rocker_port
,
4365 struct switchdev_trans
*trans
,
4368 if (!switchdev_trans_ph_prepare(trans
)) {
4369 rocker_port
->ageing_time
= clock_t_to_jiffies(ageing_time
);
4370 mod_timer(&rocker_port
->rocker
->fdb_cleanup_timer
, jiffies
);
4376 static int rocker_port_attr_set(struct net_device
*dev
,
4377 const struct switchdev_attr
*attr
,
4378 struct switchdev_trans
*trans
)
4380 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4384 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
4385 err
= rocker_port_stp_update(rocker_port
, trans
, 0,
4388 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
4389 err
= rocker_port_brport_flags_set(rocker_port
, trans
,
4390 attr
->u
.brport_flags
);
4392 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
4393 err
= rocker_port_bridge_ageing_time(rocker_port
, trans
,
4394 attr
->u
.ageing_time
);
4404 static int rocker_port_vlan_add(struct rocker_port
*rocker_port
,
4405 struct switchdev_trans
*trans
,
4410 /* XXX deal with flags for PVID and untagged */
4412 err
= rocker_port_vlan(rocker_port
, trans
, 0, vid
);
4416 err
= rocker_port_router_mac(rocker_port
, trans
, 0, htons(vid
));
4418 rocker_port_vlan(rocker_port
, trans
,
4419 ROCKER_OP_FLAG_REMOVE
, vid
);
4424 static int rocker_port_vlans_add(struct rocker_port
*rocker_port
,
4425 struct switchdev_trans
*trans
,
4426 const struct switchdev_obj_port_vlan
*vlan
)
4431 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
4432 err
= rocker_port_vlan_add(rocker_port
, trans
,
4441 static int rocker_port_fdb_add(struct rocker_port
*rocker_port
,
4442 struct switchdev_trans
*trans
,
4443 const struct switchdev_obj_port_fdb
*fdb
)
4445 __be16 vlan_id
= rocker_port_vid_to_vlan(rocker_port
, fdb
->vid
, NULL
);
4448 if (!rocker_port_is_bridged(rocker_port
))
4451 return rocker_port_fdb(rocker_port
, trans
, fdb
->addr
, vlan_id
, flags
);
4454 static int rocker_port_obj_add(struct net_device
*dev
,
4455 const struct switchdev_obj
*obj
,
4456 struct switchdev_trans
*trans
)
4458 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4459 const struct switchdev_obj_ipv4_fib
*fib4
;
4463 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
4464 err
= rocker_port_vlans_add(rocker_port
, trans
,
4465 SWITCHDEV_OBJ_PORT_VLAN(obj
));
4467 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
4468 fib4
= SWITCHDEV_OBJ_IPV4_FIB(obj
);
4469 err
= rocker_port_fib_ipv4(rocker_port
, trans
,
4470 htonl(fib4
->dst
), fib4
->dst_len
,
4471 &fib4
->fi
, fib4
->tb_id
, 0);
4473 case SWITCHDEV_OBJ_ID_PORT_FDB
:
4474 err
= rocker_port_fdb_add(rocker_port
, trans
,
4475 SWITCHDEV_OBJ_PORT_FDB(obj
));
4485 static int rocker_port_vlan_del(struct rocker_port
*rocker_port
,
4490 err
= rocker_port_router_mac(rocker_port
, NULL
,
4491 ROCKER_OP_FLAG_REMOVE
, htons(vid
));
4495 return rocker_port_vlan(rocker_port
, NULL
,
4496 ROCKER_OP_FLAG_REMOVE
, vid
);
4499 static int rocker_port_vlans_del(struct rocker_port
*rocker_port
,
4500 const struct switchdev_obj_port_vlan
*vlan
)
4505 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
4506 err
= rocker_port_vlan_del(rocker_port
, vid
, vlan
->flags
);
4514 static int rocker_port_fdb_del(struct rocker_port
*rocker_port
,
4515 struct switchdev_trans
*trans
,
4516 const struct switchdev_obj_port_fdb
*fdb
)
4518 __be16 vlan_id
= rocker_port_vid_to_vlan(rocker_port
, fdb
->vid
, NULL
);
4519 int flags
= ROCKER_OP_FLAG_REMOVE
;
4521 if (!rocker_port_is_bridged(rocker_port
))
4524 return rocker_port_fdb(rocker_port
, trans
, fdb
->addr
, vlan_id
, flags
);
4527 static int rocker_port_obj_del(struct net_device
*dev
,
4528 const struct switchdev_obj
*obj
)
4530 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4531 const struct switchdev_obj_ipv4_fib
*fib4
;
4535 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
4536 err
= rocker_port_vlans_del(rocker_port
,
4537 SWITCHDEV_OBJ_PORT_VLAN(obj
));
4539 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
4540 fib4
= SWITCHDEV_OBJ_IPV4_FIB(obj
);
4541 err
= rocker_port_fib_ipv4(rocker_port
, NULL
,
4542 htonl(fib4
->dst
), fib4
->dst_len
,
4543 &fib4
->fi
, fib4
->tb_id
,
4544 ROCKER_OP_FLAG_REMOVE
);
4546 case SWITCHDEV_OBJ_ID_PORT_FDB
:
4547 err
= rocker_port_fdb_del(rocker_port
, NULL
,
4548 SWITCHDEV_OBJ_PORT_FDB(obj
));
4558 static int rocker_port_fdb_dump(const struct rocker_port
*rocker_port
,
4559 struct switchdev_obj_port_fdb
*fdb
,
4560 switchdev_obj_dump_cb_t
*cb
)
4562 struct rocker
*rocker
= rocker_port
->rocker
;
4563 struct rocker_fdb_tbl_entry
*found
;
4564 struct hlist_node
*tmp
;
4565 unsigned long lock_flags
;
4569 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
4570 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
4571 if (found
->key
.rocker_port
!= rocker_port
)
4573 ether_addr_copy(fdb
->addr
, found
->key
.addr
);
4574 fdb
->ndm_state
= NUD_REACHABLE
;
4575 fdb
->vid
= rocker_port_vlan_to_vid(rocker_port
,
4576 found
->key
.vlan_id
);
4577 err
= cb(&fdb
->obj
);
4581 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
4586 static int rocker_port_vlan_dump(const struct rocker_port
*rocker_port
,
4587 struct switchdev_obj_port_vlan
*vlan
,
4588 switchdev_obj_dump_cb_t
*cb
)
4593 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
4594 if (!test_bit(vid
, rocker_port
->vlan_bitmap
))
4597 if (rocker_vlan_id_is_internal(htons(vid
)))
4598 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
4599 vlan
->vid_begin
= vlan
->vid_end
= vid
;
4600 err
= cb(&vlan
->obj
);
4608 static int rocker_port_obj_dump(struct net_device
*dev
,
4609 struct switchdev_obj
*obj
,
4610 switchdev_obj_dump_cb_t
*cb
)
4612 const struct rocker_port
*rocker_port
= netdev_priv(dev
);
4616 case SWITCHDEV_OBJ_ID_PORT_FDB
:
4617 err
= rocker_port_fdb_dump(rocker_port
,
4618 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
);
4620 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
4621 err
= rocker_port_vlan_dump(rocker_port
,
4622 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
4632 static const struct switchdev_ops rocker_port_switchdev_ops
= {
4633 .switchdev_port_attr_get
= rocker_port_attr_get
,
4634 .switchdev_port_attr_set
= rocker_port_attr_set
,
4635 .switchdev_port_obj_add
= rocker_port_obj_add
,
4636 .switchdev_port_obj_del
= rocker_port_obj_del
,
4637 .switchdev_port_obj_dump
= rocker_port_obj_dump
,
4640 /********************
4642 ********************/
4644 static int rocker_port_get_settings(struct net_device
*dev
,
4645 struct ethtool_cmd
*ecmd
)
4647 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4649 return rocker_cmd_get_port_settings_ethtool(rocker_port
, ecmd
);
4652 static int rocker_port_set_settings(struct net_device
*dev
,
4653 struct ethtool_cmd
*ecmd
)
4655 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4657 return rocker_cmd_set_port_settings_ethtool(rocker_port
, ecmd
);
4660 static void rocker_port_get_drvinfo(struct net_device
*dev
,
4661 struct ethtool_drvinfo
*drvinfo
)
4663 strlcpy(drvinfo
->driver
, rocker_driver_name
, sizeof(drvinfo
->driver
));
4664 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
4667 static struct rocker_port_stats
{
4668 char str
[ETH_GSTRING_LEN
];
4670 } rocker_port_stats
[] = {
4671 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS
, },
4672 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES
, },
4673 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED
, },
4674 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS
, },
4676 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS
, },
4677 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES
, },
4678 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED
, },
4679 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS
, },
4682 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4684 static void rocker_port_get_strings(struct net_device
*netdev
, u32 stringset
,
4690 switch (stringset
) {
4692 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); i
++) {
4693 memcpy(p
, rocker_port_stats
[i
].str
, ETH_GSTRING_LEN
);
4694 p
+= ETH_GSTRING_LEN
;
4701 rocker_cmd_get_port_stats_prep(const struct rocker_port
*rocker_port
,
4702 struct rocker_desc_info
*desc_info
,
4705 struct rocker_tlv
*cmd_stats
;
4707 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
4708 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS
))
4711 cmd_stats
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
4715 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_STATS_PPORT
,
4716 rocker_port
->pport
))
4719 rocker_tlv_nest_end(desc_info
, cmd_stats
);
4725 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port
*rocker_port
,
4726 const struct rocker_desc_info
*desc_info
,
4729 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
4730 const struct rocker_tlv
*stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_MAX
+ 1];
4731 const struct rocker_tlv
*pattr
;
4736 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
4738 if (!attrs
[ROCKER_TLV_CMD_INFO
])
4741 rocker_tlv_parse_nested(stats_attrs
, ROCKER_TLV_CMD_PORT_STATS_MAX
,
4742 attrs
[ROCKER_TLV_CMD_INFO
]);
4744 if (!stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_PPORT
])
4747 pport
= rocker_tlv_get_u32(stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_PPORT
]);
4748 if (pport
!= rocker_port
->pport
)
4751 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); i
++) {
4752 pattr
= stats_attrs
[rocker_port_stats
[i
].type
];
4756 data
[i
] = rocker_tlv_get_u64(pattr
);
4762 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port
*rocker_port
,
4765 return rocker_cmd_exec(rocker_port
, NULL
, 0,
4766 rocker_cmd_get_port_stats_prep
, NULL
,
4767 rocker_cmd_get_port_stats_ethtool_proc
,
4771 static void rocker_port_get_stats(struct net_device
*dev
,
4772 struct ethtool_stats
*stats
, u64
*data
)
4774 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4776 if (rocker_cmd_get_port_stats_ethtool(rocker_port
, data
) != 0) {
4779 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); ++i
)
4784 static int rocker_port_get_sset_count(struct net_device
*netdev
, int sset
)
4788 return ROCKER_PORT_STATS_LEN
;
4794 static const struct ethtool_ops rocker_port_ethtool_ops
= {
4795 .get_settings
= rocker_port_get_settings
,
4796 .set_settings
= rocker_port_set_settings
,
4797 .get_drvinfo
= rocker_port_get_drvinfo
,
4798 .get_link
= ethtool_op_get_link
,
4799 .get_strings
= rocker_port_get_strings
,
4800 .get_ethtool_stats
= rocker_port_get_stats
,
4801 .get_sset_count
= rocker_port_get_sset_count
,
4808 static struct rocker_port
*rocker_port_napi_tx_get(struct napi_struct
*napi
)
4810 return container_of(napi
, struct rocker_port
, napi_tx
);
4813 static int rocker_port_poll_tx(struct napi_struct
*napi
, int budget
)
4815 struct rocker_port
*rocker_port
= rocker_port_napi_tx_get(napi
);
4816 const struct rocker
*rocker
= rocker_port
->rocker
;
4817 const struct rocker_desc_info
*desc_info
;
4821 /* Cleanup tx descriptors */
4822 while ((desc_info
= rocker_desc_tail_get(&rocker_port
->tx_ring
))) {
4823 struct sk_buff
*skb
;
4825 err
= rocker_desc_err(desc_info
);
4826 if (err
&& net_ratelimit())
4827 netdev_err(rocker_port
->dev
, "tx desc received with err %d\n",
4829 rocker_tx_desc_frags_unmap(rocker_port
, desc_info
);
4831 skb
= rocker_desc_cookie_ptr_get(desc_info
);
4833 rocker_port
->dev
->stats
.tx_packets
++;
4834 rocker_port
->dev
->stats
.tx_bytes
+= skb
->len
;
4836 rocker_port
->dev
->stats
.tx_errors
++;
4839 dev_kfree_skb_any(skb
);
4843 if (credits
&& netif_queue_stopped(rocker_port
->dev
))
4844 netif_wake_queue(rocker_port
->dev
);
4846 napi_complete(napi
);
4847 rocker_dma_ring_credits_set(rocker
, &rocker_port
->tx_ring
, credits
);
4852 static int rocker_port_rx_proc(const struct rocker
*rocker
,
4853 const struct rocker_port
*rocker_port
,
4854 struct rocker_desc_info
*desc_info
)
4856 const struct rocker_tlv
*attrs
[ROCKER_TLV_RX_MAX
+ 1];
4857 struct sk_buff
*skb
= rocker_desc_cookie_ptr_get(desc_info
);
4864 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_RX_MAX
, desc_info
);
4865 if (!attrs
[ROCKER_TLV_RX_FRAG_LEN
])
4867 if (attrs
[ROCKER_TLV_RX_FLAGS
])
4868 rx_flags
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FLAGS
]);
4870 rocker_dma_rx_ring_skb_unmap(rocker
, attrs
);
4872 rx_len
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FRAG_LEN
]);
4873 skb_put(skb
, rx_len
);
4874 skb
->protocol
= eth_type_trans(skb
, rocker_port
->dev
);
4876 if (rx_flags
& ROCKER_RX_FLAGS_FWD_OFFLOAD
)
4877 skb
->offload_fwd_mark
= rocker_port
->dev
->offload_fwd_mark
;
4879 rocker_port
->dev
->stats
.rx_packets
++;
4880 rocker_port
->dev
->stats
.rx_bytes
+= skb
->len
;
4882 netif_receive_skb(skb
);
4884 return rocker_dma_rx_ring_skb_alloc(rocker_port
, desc_info
);
4887 static struct rocker_port
*rocker_port_napi_rx_get(struct napi_struct
*napi
)
4889 return container_of(napi
, struct rocker_port
, napi_rx
);
4892 static int rocker_port_poll_rx(struct napi_struct
*napi
, int budget
)
4894 struct rocker_port
*rocker_port
= rocker_port_napi_rx_get(napi
);
4895 const struct rocker
*rocker
= rocker_port
->rocker
;
4896 struct rocker_desc_info
*desc_info
;
4900 /* Process rx descriptors */
4901 while (credits
< budget
&&
4902 (desc_info
= rocker_desc_tail_get(&rocker_port
->rx_ring
))) {
4903 err
= rocker_desc_err(desc_info
);
4905 if (net_ratelimit())
4906 netdev_err(rocker_port
->dev
, "rx desc received with err %d\n",
4909 err
= rocker_port_rx_proc(rocker
, rocker_port
,
4911 if (err
&& net_ratelimit())
4912 netdev_err(rocker_port
->dev
, "rx processing failed with err %d\n",
4916 rocker_port
->dev
->stats
.rx_errors
++;
4918 rocker_desc_gen_clear(desc_info
);
4919 rocker_desc_head_set(rocker
, &rocker_port
->rx_ring
, desc_info
);
4923 if (credits
< budget
)
4924 napi_complete(napi
);
4926 rocker_dma_ring_credits_set(rocker
, &rocker_port
->rx_ring
, credits
);
4935 static void rocker_carrier_init(const struct rocker_port
*rocker_port
)
4937 const struct rocker
*rocker
= rocker_port
->rocker
;
4938 u64 link_status
= rocker_read64(rocker
, PORT_PHYS_LINK_STATUS
);
4941 link_up
= link_status
& (1 << rocker_port
->pport
);
4943 netif_carrier_on(rocker_port
->dev
);
4945 netif_carrier_off(rocker_port
->dev
);
4948 static void rocker_remove_ports(const struct rocker
*rocker
)
4950 struct rocker_port
*rocker_port
;
4953 for (i
= 0; i
< rocker
->port_count
; i
++) {
4954 rocker_port
= rocker
->ports
[i
];
4957 rocker_port_ig_tbl(rocker_port
, NULL
, ROCKER_OP_FLAG_REMOVE
);
4958 unregister_netdev(rocker_port
->dev
);
4959 free_netdev(rocker_port
->dev
);
4961 kfree(rocker
->ports
);
4964 static void rocker_port_dev_addr_init(struct rocker_port
*rocker_port
)
4966 const struct rocker
*rocker
= rocker_port
->rocker
;
4967 const struct pci_dev
*pdev
= rocker
->pdev
;
4970 err
= rocker_cmd_get_port_settings_macaddr(rocker_port
,
4971 rocker_port
->dev
->dev_addr
);
4973 dev_warn(&pdev
->dev
, "failed to get mac address, using random\n");
4974 eth_hw_addr_random(rocker_port
->dev
);
4978 static int rocker_probe_port(struct rocker
*rocker
, unsigned int port_number
)
4980 const struct pci_dev
*pdev
= rocker
->pdev
;
4981 struct rocker_port
*rocker_port
;
4982 struct net_device
*dev
;
4983 u16 untagged_vid
= 0;
4986 dev
= alloc_etherdev(sizeof(struct rocker_port
));
4989 rocker_port
= netdev_priv(dev
);
4990 rocker_port
->dev
= dev
;
4991 rocker_port
->rocker
= rocker
;
4992 rocker_port
->port_number
= port_number
;
4993 rocker_port
->pport
= port_number
+ 1;
4994 rocker_port
->brport_flags
= BR_LEARNING
| BR_LEARNING_SYNC
;
4995 rocker_port
->ageing_time
= BR_DEFAULT_AGEING_TIME
;
4997 rocker_port_dev_addr_init(rocker_port
);
4998 dev
->netdev_ops
= &rocker_port_netdev_ops
;
4999 dev
->ethtool_ops
= &rocker_port_ethtool_ops
;
5000 dev
->switchdev_ops
= &rocker_port_switchdev_ops
;
5001 netif_napi_add(dev
, &rocker_port
->napi_tx
, rocker_port_poll_tx
,
5003 netif_napi_add(dev
, &rocker_port
->napi_rx
, rocker_port_poll_rx
,
5005 rocker_carrier_init(rocker_port
);
5007 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_SG
;
5009 err
= register_netdev(dev
);
5011 dev_err(&pdev
->dev
, "register_netdev failed\n");
5012 goto err_register_netdev
;
5014 rocker
->ports
[port_number
] = rocker_port
;
5016 switchdev_port_fwd_mark_set(rocker_port
->dev
, NULL
, false);
5018 rocker_port_set_learning(rocker_port
, NULL
);
5020 err
= rocker_port_ig_tbl(rocker_port
, NULL
, 0);
5022 netdev_err(rocker_port
->dev
, "install ig port table failed\n");
5023 goto err_port_ig_tbl
;
5026 rocker_port
->internal_vlan_id
=
5027 rocker_port_internal_vlan_id_get(rocker_port
, dev
->ifindex
);
5029 err
= rocker_port_vlan_add(rocker_port
, NULL
, untagged_vid
, 0);
5031 netdev_err(rocker_port
->dev
, "install untagged VLAN failed\n");
5032 goto err_untagged_vlan
;
5038 rocker_port_ig_tbl(rocker_port
, NULL
, ROCKER_OP_FLAG_REMOVE
);
5040 rocker
->ports
[port_number
] = NULL
;
5041 unregister_netdev(dev
);
5042 err_register_netdev
:
5047 static int rocker_probe_ports(struct rocker
*rocker
)
5053 alloc_size
= sizeof(struct rocker_port
*) * rocker
->port_count
;
5054 rocker
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
5057 for (i
= 0; i
< rocker
->port_count
; i
++) {
5058 err
= rocker_probe_port(rocker
, i
);
5065 rocker_remove_ports(rocker
);
5069 static int rocker_msix_init(struct rocker
*rocker
)
5071 struct pci_dev
*pdev
= rocker
->pdev
;
5076 msix_entries
= pci_msix_vec_count(pdev
);
5077 if (msix_entries
< 0)
5078 return msix_entries
;
5080 if (msix_entries
!= ROCKER_MSIX_VEC_COUNT(rocker
->port_count
))
5083 rocker
->msix_entries
= kmalloc_array(msix_entries
,
5084 sizeof(struct msix_entry
),
5086 if (!rocker
->msix_entries
)
5089 for (i
= 0; i
< msix_entries
; i
++)
5090 rocker
->msix_entries
[i
].entry
= i
;
5092 err
= pci_enable_msix_exact(pdev
, rocker
->msix_entries
, msix_entries
);
5094 goto err_enable_msix
;
5099 kfree(rocker
->msix_entries
);
5103 static void rocker_msix_fini(const struct rocker
*rocker
)
5105 pci_disable_msix(rocker
->pdev
);
5106 kfree(rocker
->msix_entries
);
5109 static int rocker_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
5111 struct rocker
*rocker
;
5114 rocker
= kzalloc(sizeof(*rocker
), GFP_KERNEL
);
5118 err
= pci_enable_device(pdev
);
5120 dev_err(&pdev
->dev
, "pci_enable_device failed\n");
5121 goto err_pci_enable_device
;
5124 err
= pci_request_regions(pdev
, rocker_driver_name
);
5126 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
5127 goto err_pci_request_regions
;
5130 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
5132 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
5134 dev_err(&pdev
->dev
, "pci_set_consistent_dma_mask failed\n");
5135 goto err_pci_set_dma_mask
;
5138 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
5140 dev_err(&pdev
->dev
, "pci_set_dma_mask failed\n");
5141 goto err_pci_set_dma_mask
;
5145 if (pci_resource_len(pdev
, 0) < ROCKER_PCI_BAR0_SIZE
) {
5146 dev_err(&pdev
->dev
, "invalid PCI region size\n");
5148 goto err_pci_resource_len_check
;
5151 rocker
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
5152 pci_resource_len(pdev
, 0));
5153 if (!rocker
->hw_addr
) {
5154 dev_err(&pdev
->dev
, "ioremap failed\n");
5158 pci_set_master(pdev
);
5160 rocker
->pdev
= pdev
;
5161 pci_set_drvdata(pdev
, rocker
);
5163 rocker
->port_count
= rocker_read32(rocker
, PORT_PHYS_COUNT
);
5165 err
= rocker_msix_init(rocker
);
5167 dev_err(&pdev
->dev
, "MSI-X init failed\n");
5171 err
= rocker_basic_hw_test(rocker
);
5173 dev_err(&pdev
->dev
, "basic hw test failed\n");
5174 goto err_basic_hw_test
;
5177 rocker_write32(rocker
, CONTROL
, ROCKER_CONTROL_RESET
);
5179 err
= rocker_dma_rings_init(rocker
);
5181 goto err_dma_rings_init
;
5183 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
),
5184 rocker_cmd_irq_handler
, 0,
5185 rocker_driver_name
, rocker
);
5187 dev_err(&pdev
->dev
, "cannot assign cmd irq\n");
5188 goto err_request_cmd_irq
;
5191 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
),
5192 rocker_event_irq_handler
, 0,
5193 rocker_driver_name
, rocker
);
5195 dev_err(&pdev
->dev
, "cannot assign event irq\n");
5196 goto err_request_event_irq
;
5199 rocker
->hw
.id
= rocker_read64(rocker
, SWITCH_ID
);
5201 err
= rocker_init_tbls(rocker
);
5203 dev_err(&pdev
->dev
, "cannot init rocker tables\n");
5207 setup_timer(&rocker
->fdb_cleanup_timer
, rocker_fdb_cleanup
,
5208 (unsigned long) rocker
);
5209 mod_timer(&rocker
->fdb_cleanup_timer
, jiffies
);
5211 err
= rocker_probe_ports(rocker
);
5213 dev_err(&pdev
->dev
, "failed to probe ports\n");
5214 goto err_probe_ports
;
5217 dev_info(&pdev
->dev
, "Rocker switch with id %*phN\n",
5218 (int)sizeof(rocker
->hw
.id
), &rocker
->hw
.id
);
5223 del_timer_sync(&rocker
->fdb_cleanup_timer
);
5224 rocker_free_tbls(rocker
);
5226 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
), rocker
);
5227 err_request_event_irq
:
5228 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
), rocker
);
5229 err_request_cmd_irq
:
5230 rocker_dma_rings_fini(rocker
);
5233 rocker_msix_fini(rocker
);
5235 iounmap(rocker
->hw_addr
);
5237 err_pci_resource_len_check
:
5238 err_pci_set_dma_mask
:
5239 pci_release_regions(pdev
);
5240 err_pci_request_regions
:
5241 pci_disable_device(pdev
);
5242 err_pci_enable_device
:
5247 static void rocker_remove(struct pci_dev
*pdev
)
5249 struct rocker
*rocker
= pci_get_drvdata(pdev
);
5251 del_timer_sync(&rocker
->fdb_cleanup_timer
);
5252 rocker_free_tbls(rocker
);
5253 rocker_write32(rocker
, CONTROL
, ROCKER_CONTROL_RESET
);
5254 rocker_remove_ports(rocker
);
5255 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
), rocker
);
5256 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
), rocker
);
5257 rocker_dma_rings_fini(rocker
);
5258 rocker_msix_fini(rocker
);
5259 iounmap(rocker
->hw_addr
);
5260 pci_release_regions(rocker
->pdev
);
5261 pci_disable_device(rocker
->pdev
);
5265 static struct pci_driver rocker_pci_driver
= {
5266 .name
= rocker_driver_name
,
5267 .id_table
= rocker_pci_id_table
,
5268 .probe
= rocker_probe
,
5269 .remove
= rocker_remove
,
5272 /************************************
5273 * Net device notifier event handler
5274 ************************************/
5276 static bool rocker_port_dev_check(const struct net_device
*dev
)
5278 return dev
->netdev_ops
== &rocker_port_netdev_ops
;
5281 static int rocker_port_bridge_join(struct rocker_port
*rocker_port
,
5282 struct net_device
*bridge
)
5284 u16 untagged_vid
= 0;
5287 /* Port is joining bridge, so the internal VLAN for the
5288 * port is going to change to the bridge internal VLAN.
5289 * Let's remove untagged VLAN (vid=0) from port and
5290 * re-add once internal VLAN has changed.
5293 err
= rocker_port_vlan_del(rocker_port
, untagged_vid
, 0);
5297 rocker_port_internal_vlan_id_put(rocker_port
,
5298 rocker_port
->dev
->ifindex
);
5299 rocker_port
->internal_vlan_id
=
5300 rocker_port_internal_vlan_id_get(rocker_port
, bridge
->ifindex
);
5302 rocker_port
->bridge_dev
= bridge
;
5303 switchdev_port_fwd_mark_set(rocker_port
->dev
, bridge
, true);
5305 return rocker_port_vlan_add(rocker_port
, NULL
, untagged_vid
, 0);
5308 static int rocker_port_bridge_leave(struct rocker_port
*rocker_port
)
5310 u16 untagged_vid
= 0;
5313 err
= rocker_port_vlan_del(rocker_port
, untagged_vid
, 0);
5317 rocker_port_internal_vlan_id_put(rocker_port
,
5318 rocker_port
->bridge_dev
->ifindex
);
5319 rocker_port
->internal_vlan_id
=
5320 rocker_port_internal_vlan_id_get(rocker_port
,
5321 rocker_port
->dev
->ifindex
);
5323 switchdev_port_fwd_mark_set(rocker_port
->dev
, rocker_port
->bridge_dev
,
5325 rocker_port
->bridge_dev
= NULL
;
5327 err
= rocker_port_vlan_add(rocker_port
, NULL
, untagged_vid
, 0);
5331 if (rocker_port
->dev
->flags
& IFF_UP
)
5332 err
= rocker_port_fwd_enable(rocker_port
, NULL
, 0);
5338 static int rocker_port_ovs_changed(struct rocker_port
*rocker_port
,
5339 struct net_device
*master
)
5343 rocker_port
->bridge_dev
= master
;
5345 err
= rocker_port_fwd_disable(rocker_port
, NULL
, 0);
5348 err
= rocker_port_fwd_enable(rocker_port
, NULL
, 0);
5353 static int rocker_port_master_linked(struct rocker_port
*rocker_port
,
5354 struct net_device
*master
)
5358 if (netif_is_bridge_master(master
))
5359 err
= rocker_port_bridge_join(rocker_port
, master
);
5360 else if (netif_is_ovs_master(master
))
5361 err
= rocker_port_ovs_changed(rocker_port
, master
);
5365 static int rocker_port_master_unlinked(struct rocker_port
*rocker_port
)
5369 if (rocker_port_is_bridged(rocker_port
))
5370 err
= rocker_port_bridge_leave(rocker_port
);
5371 else if (rocker_port_is_ovsed(rocker_port
))
5372 err
= rocker_port_ovs_changed(rocker_port
, NULL
);
5376 static int rocker_netdevice_event(struct notifier_block
*unused
,
5377 unsigned long event
, void *ptr
)
5379 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
5380 struct netdev_notifier_changeupper_info
*info
;
5381 struct rocker_port
*rocker_port
;
5384 if (!rocker_port_dev_check(dev
))
5388 case NETDEV_CHANGEUPPER
:
5392 rocker_port
= netdev_priv(dev
);
5393 if (info
->linking
) {
5394 err
= rocker_port_master_linked(rocker_port
,
5397 netdev_warn(dev
, "failed to reflect master linked (err %d)\n",
5400 err
= rocker_port_master_unlinked(rocker_port
);
5402 netdev_warn(dev
, "failed to reflect master unlinked (err %d)\n",
5411 static struct notifier_block rocker_netdevice_nb __read_mostly
= {
5412 .notifier_call
= rocker_netdevice_event
,
5415 /************************************
5416 * Net event notifier event handler
5417 ************************************/
5419 static int rocker_neigh_update(struct net_device
*dev
, struct neighbour
*n
)
5421 struct rocker_port
*rocker_port
= netdev_priv(dev
);
5422 int flags
= (n
->nud_state
& NUD_VALID
? 0 : ROCKER_OP_FLAG_REMOVE
) |
5423 ROCKER_OP_FLAG_NOWAIT
;
5424 __be32 ip_addr
= *(__be32
*)n
->primary_key
;
5426 return rocker_port_ipv4_neigh(rocker_port
, NULL
, flags
, ip_addr
, n
->ha
);
5429 static int rocker_netevent_event(struct notifier_block
*unused
,
5430 unsigned long event
, void *ptr
)
5432 struct net_device
*dev
;
5433 struct neighbour
*n
= ptr
;
5437 case NETEVENT_NEIGH_UPDATE
:
5438 if (n
->tbl
!= &arp_tbl
)
5441 if (!rocker_port_dev_check(dev
))
5443 err
= rocker_neigh_update(dev
, n
);
5446 "failed to handle neigh update (err %d)\n",
5454 static struct notifier_block rocker_netevent_nb __read_mostly
= {
5455 .notifier_call
= rocker_netevent_event
,
5458 /***********************
5459 * Module init and exit
5460 ***********************/
5462 static int __init
rocker_module_init(void)
5466 register_netdevice_notifier(&rocker_netdevice_nb
);
5467 register_netevent_notifier(&rocker_netevent_nb
);
5468 err
= pci_register_driver(&rocker_pci_driver
);
5470 goto err_pci_register_driver
;
5473 err_pci_register_driver
:
5474 unregister_netevent_notifier(&rocker_netevent_nb
);
5475 unregister_netdevice_notifier(&rocker_netdevice_nb
);
5479 static void __exit
rocker_module_exit(void)
5481 unregister_netevent_notifier(&rocker_netevent_nb
);
5482 unregister_netdevice_notifier(&rocker_netdevice_nb
);
5483 pci_unregister_driver(&rocker_pci_driver
);
5486 module_init(rocker_module_init
);
5487 module_exit(rocker_module_exit
);
5489 MODULE_LICENSE("GPL v2");
5490 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5491 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5492 MODULE_DESCRIPTION("Rocker switch device driver");
5493 MODULE_DEVICE_TABLE(pci
, rocker_pci_id_table
);