1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
4 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/spinlock.h>
15 #include <linux/sort.h>
16 #include <linux/random.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/socket.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_bridge.h>
25 #include <linux/bitops.h>
26 #include <linux/ctype.h>
27 #include <linux/workqueue.h>
28 #include <net/switchdev.h>
29 #include <net/rtnetlink.h>
30 #include <net/netevent.h>
32 #include <net/fib_rules.h>
33 #include <net/fib_notifier.h>
34 #include <linux/io-64-nonatomic-lo-hi.h>
35 #include <generated/utsrelease.h>
37 #include "rocker_hw.h"
39 #include "rocker_tlv.h"
41 static const char rocker_driver_name
[] = "rocker";
43 static const struct pci_device_id rocker_pci_id_table
[] = {
44 {PCI_VDEVICE(REDHAT
, PCI_DEVICE_ID_REDHAT_ROCKER
), 0},
49 wait_queue_head_t wait
;
54 static void rocker_wait_reset(struct rocker_wait
*wait
)
60 static void rocker_wait_init(struct rocker_wait
*wait
)
62 init_waitqueue_head(&wait
->wait
);
63 rocker_wait_reset(wait
);
66 static struct rocker_wait
*rocker_wait_create(void)
68 struct rocker_wait
*wait
;
70 wait
= kzalloc(sizeof(*wait
), GFP_KERNEL
);
76 static void rocker_wait_destroy(struct rocker_wait
*wait
)
81 static bool rocker_wait_event_timeout(struct rocker_wait
*wait
,
82 unsigned long timeout
)
84 wait_event_timeout(wait
->wait
, wait
->done
, HZ
/ 10);
90 static void rocker_wait_wake_up(struct rocker_wait
*wait
)
96 static u32
rocker_msix_vector(const struct rocker
*rocker
, unsigned int vector
)
98 return rocker
->msix_entries
[vector
].vector
;
101 static u32
rocker_msix_tx_vector(const struct rocker_port
*rocker_port
)
103 return rocker_msix_vector(rocker_port
->rocker
,
104 ROCKER_MSIX_VEC_TX(rocker_port
->port_number
));
107 static u32
rocker_msix_rx_vector(const struct rocker_port
*rocker_port
)
109 return rocker_msix_vector(rocker_port
->rocker
,
110 ROCKER_MSIX_VEC_RX(rocker_port
->port_number
));
113 #define rocker_write32(rocker, reg, val) \
114 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
115 #define rocker_read32(rocker, reg) \
116 readl((rocker)->hw_addr + (ROCKER_ ## reg))
117 #define rocker_write64(rocker, reg, val) \
118 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
119 #define rocker_read64(rocker, reg) \
120 readq((rocker)->hw_addr + (ROCKER_ ## reg))
122 /*****************************
123 * HW basic testing functions
124 *****************************/
126 static int rocker_reg_test(const struct rocker
*rocker
)
128 const struct pci_dev
*pdev
= rocker
->pdev
;
134 rocker_write32(rocker
, TEST_REG
, rnd
);
135 test_reg
= rocker_read32(rocker
, TEST_REG
);
136 if (test_reg
!= rnd
* 2) {
137 dev_err(&pdev
->dev
, "unexpected 32bit register value %08llx, expected %08llx\n",
144 rnd
|= prandom_u32();
145 rocker_write64(rocker
, TEST_REG64
, rnd
);
146 test_reg
= rocker_read64(rocker
, TEST_REG64
);
147 if (test_reg
!= rnd
* 2) {
148 dev_err(&pdev
->dev
, "unexpected 64bit register value %16llx, expected %16llx\n",
156 static int rocker_dma_test_one(const struct rocker
*rocker
,
157 struct rocker_wait
*wait
, u32 test_type
,
158 dma_addr_t dma_handle
, const unsigned char *buf
,
159 const unsigned char *expect
, size_t size
)
161 const struct pci_dev
*pdev
= rocker
->pdev
;
164 rocker_wait_reset(wait
);
165 rocker_write32(rocker
, TEST_DMA_CTRL
, test_type
);
167 if (!rocker_wait_event_timeout(wait
, HZ
/ 10)) {
168 dev_err(&pdev
->dev
, "no interrupt received within a timeout\n");
172 for (i
= 0; i
< size
; i
++) {
173 if (buf
[i
] != expect
[i
]) {
174 dev_err(&pdev
->dev
, "unexpected memory content %02x at byte %x\n, %02x expected",
175 buf
[i
], i
, expect
[i
]);
182 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
183 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
185 static int rocker_dma_test_offset(const struct rocker
*rocker
,
186 struct rocker_wait
*wait
, int offset
)
188 struct pci_dev
*pdev
= rocker
->pdev
;
189 unsigned char *alloc
;
191 unsigned char *expect
;
192 dma_addr_t dma_handle
;
196 alloc
= kzalloc(ROCKER_TEST_DMA_BUF_SIZE
* 2 + offset
,
197 GFP_KERNEL
| GFP_DMA
);
200 buf
= alloc
+ offset
;
201 expect
= buf
+ ROCKER_TEST_DMA_BUF_SIZE
;
203 dma_handle
= dma_map_single(&pdev
->dev
, buf
, ROCKER_TEST_DMA_BUF_SIZE
,
205 if (dma_mapping_error(&pdev
->dev
, dma_handle
)) {
210 rocker_write64(rocker
, TEST_DMA_ADDR
, dma_handle
);
211 rocker_write32(rocker
, TEST_DMA_SIZE
, ROCKER_TEST_DMA_BUF_SIZE
);
213 memset(expect
, ROCKER_TEST_DMA_FILL_PATTERN
, ROCKER_TEST_DMA_BUF_SIZE
);
214 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_FILL
,
215 dma_handle
, buf
, expect
,
216 ROCKER_TEST_DMA_BUF_SIZE
);
220 memset(expect
, 0, ROCKER_TEST_DMA_BUF_SIZE
);
221 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_CLEAR
,
222 dma_handle
, buf
, expect
,
223 ROCKER_TEST_DMA_BUF_SIZE
);
227 prandom_bytes(buf
, ROCKER_TEST_DMA_BUF_SIZE
);
228 for (i
= 0; i
< ROCKER_TEST_DMA_BUF_SIZE
; i
++)
230 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_INVERT
,
231 dma_handle
, buf
, expect
,
232 ROCKER_TEST_DMA_BUF_SIZE
);
237 dma_unmap_single(&pdev
->dev
, dma_handle
, ROCKER_TEST_DMA_BUF_SIZE
,
245 static int rocker_dma_test(const struct rocker
*rocker
,
246 struct rocker_wait
*wait
)
251 for (i
= 0; i
< 8; i
++) {
252 err
= rocker_dma_test_offset(rocker
, wait
, i
);
259 static irqreturn_t
rocker_test_irq_handler(int irq
, void *dev_id
)
261 struct rocker_wait
*wait
= dev_id
;
263 rocker_wait_wake_up(wait
);
268 static int rocker_basic_hw_test(const struct rocker
*rocker
)
270 const struct pci_dev
*pdev
= rocker
->pdev
;
271 struct rocker_wait wait
;
274 err
= rocker_reg_test(rocker
);
276 dev_err(&pdev
->dev
, "reg test failed\n");
280 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_TEST
),
281 rocker_test_irq_handler
, 0,
282 rocker_driver_name
, &wait
);
284 dev_err(&pdev
->dev
, "cannot assign test irq\n");
288 rocker_wait_init(&wait
);
289 rocker_write32(rocker
, TEST_IRQ
, ROCKER_MSIX_VEC_TEST
);
291 if (!rocker_wait_event_timeout(&wait
, HZ
/ 10)) {
292 dev_err(&pdev
->dev
, "no interrupt received within a timeout\n");
297 err
= rocker_dma_test(rocker
, &wait
);
299 dev_err(&pdev
->dev
, "dma test failed\n");
302 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_TEST
), &wait
);
306 /******************************************
307 * DMA rings and descriptors manipulations
308 ******************************************/
310 static u32
__pos_inc(u32 pos
, size_t limit
)
312 return ++pos
== limit
? 0 : pos
;
315 static int rocker_desc_err(const struct rocker_desc_info
*desc_info
)
317 int err
= desc_info
->desc
->comp_err
& ~ROCKER_DMA_DESC_COMP_ERR_GEN
;
332 case -ROCKER_EMSGSIZE
:
334 case -ROCKER_ENOTSUP
:
336 case -ROCKER_ENOBUFS
:
343 static void rocker_desc_gen_clear(const struct rocker_desc_info
*desc_info
)
345 desc_info
->desc
->comp_err
&= ~ROCKER_DMA_DESC_COMP_ERR_GEN
;
348 static bool rocker_desc_gen(const struct rocker_desc_info
*desc_info
)
350 u32 comp_err
= desc_info
->desc
->comp_err
;
352 return comp_err
& ROCKER_DMA_DESC_COMP_ERR_GEN
? true : false;
356 rocker_desc_cookie_ptr_get(const struct rocker_desc_info
*desc_info
)
358 return (void *)(uintptr_t)desc_info
->desc
->cookie
;
361 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info
*desc_info
,
364 desc_info
->desc
->cookie
= (uintptr_t) ptr
;
367 static struct rocker_desc_info
*
368 rocker_desc_head_get(const struct rocker_dma_ring_info
*info
)
370 struct rocker_desc_info
*desc_info
;
371 u32 head
= __pos_inc(info
->head
, info
->size
);
373 desc_info
= &info
->desc_info
[info
->head
];
374 if (head
== info
->tail
)
375 return NULL
; /* ring full */
376 desc_info
->tlv_size
= 0;
380 static void rocker_desc_commit(const struct rocker_desc_info
*desc_info
)
382 desc_info
->desc
->buf_size
= desc_info
->data_size
;
383 desc_info
->desc
->tlv_size
= desc_info
->tlv_size
;
386 static void rocker_desc_head_set(const struct rocker
*rocker
,
387 struct rocker_dma_ring_info
*info
,
388 const struct rocker_desc_info
*desc_info
)
390 u32 head
= __pos_inc(info
->head
, info
->size
);
392 BUG_ON(head
== info
->tail
);
393 rocker_desc_commit(desc_info
);
395 rocker_write32(rocker
, DMA_DESC_HEAD(info
->type
), head
);
398 static struct rocker_desc_info
*
399 rocker_desc_tail_get(struct rocker_dma_ring_info
*info
)
401 struct rocker_desc_info
*desc_info
;
403 if (info
->tail
== info
->head
)
404 return NULL
; /* nothing to be done between head and tail */
405 desc_info
= &info
->desc_info
[info
->tail
];
406 if (!rocker_desc_gen(desc_info
))
407 return NULL
; /* gen bit not set, desc is not ready yet */
408 info
->tail
= __pos_inc(info
->tail
, info
->size
);
409 desc_info
->tlv_size
= desc_info
->desc
->tlv_size
;
413 static void rocker_dma_ring_credits_set(const struct rocker
*rocker
,
414 const struct rocker_dma_ring_info
*info
,
418 rocker_write32(rocker
, DMA_DESC_CREDITS(info
->type
), credits
);
421 static unsigned long rocker_dma_ring_size_fix(size_t size
)
423 return max(ROCKER_DMA_SIZE_MIN
,
424 min(roundup_pow_of_two(size
), ROCKER_DMA_SIZE_MAX
));
427 static int rocker_dma_ring_create(const struct rocker
*rocker
,
430 struct rocker_dma_ring_info
*info
)
434 BUG_ON(size
!= rocker_dma_ring_size_fix(size
));
439 info
->desc_info
= kcalloc(info
->size
, sizeof(*info
->desc_info
),
441 if (!info
->desc_info
)
444 info
->desc
= dma_alloc_coherent(&rocker
->pdev
->dev
,
445 info
->size
* sizeof(*info
->desc
),
446 &info
->mapaddr
, GFP_KERNEL
);
448 kfree(info
->desc_info
);
452 for (i
= 0; i
< info
->size
; i
++)
453 info
->desc_info
[i
].desc
= &info
->desc
[i
];
455 rocker_write32(rocker
, DMA_DESC_CTRL(info
->type
),
456 ROCKER_DMA_DESC_CTRL_RESET
);
457 rocker_write64(rocker
, DMA_DESC_ADDR(info
->type
), info
->mapaddr
);
458 rocker_write32(rocker
, DMA_DESC_SIZE(info
->type
), info
->size
);
463 static void rocker_dma_ring_destroy(const struct rocker
*rocker
,
464 const struct rocker_dma_ring_info
*info
)
466 rocker_write64(rocker
, DMA_DESC_ADDR(info
->type
), 0);
468 dma_free_coherent(&rocker
->pdev
->dev
,
469 info
->size
* sizeof(struct rocker_desc
), info
->desc
,
471 kfree(info
->desc_info
);
474 static void rocker_dma_ring_pass_to_producer(const struct rocker
*rocker
,
475 struct rocker_dma_ring_info
*info
)
479 BUG_ON(info
->head
|| info
->tail
);
481 /* When ring is consumer, we need to advance head for each desc.
482 * That tells hw that the desc is ready to be used by it.
484 for (i
= 0; i
< info
->size
- 1; i
++)
485 rocker_desc_head_set(rocker
, info
, &info
->desc_info
[i
]);
486 rocker_desc_commit(&info
->desc_info
[i
]);
489 static int rocker_dma_ring_bufs_alloc(const struct rocker
*rocker
,
490 const struct rocker_dma_ring_info
*info
,
491 int direction
, size_t buf_size
)
493 struct pci_dev
*pdev
= rocker
->pdev
;
497 for (i
= 0; i
< info
->size
; i
++) {
498 struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
499 struct rocker_desc
*desc
= &info
->desc
[i
];
500 dma_addr_t dma_handle
;
503 buf
= kzalloc(buf_size
, GFP_KERNEL
| GFP_DMA
);
509 dma_handle
= dma_map_single(&pdev
->dev
, buf
, buf_size
,
511 if (dma_mapping_error(&pdev
->dev
, dma_handle
)) {
517 desc_info
->data
= buf
;
518 desc_info
->data_size
= buf_size
;
519 dma_unmap_addr_set(desc_info
, mapaddr
, dma_handle
);
521 desc
->buf_addr
= dma_handle
;
522 desc
->buf_size
= buf_size
;
527 for (i
--; i
>= 0; i
--) {
528 const struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
530 dma_unmap_single(&pdev
->dev
,
531 dma_unmap_addr(desc_info
, mapaddr
),
532 desc_info
->data_size
, direction
);
533 kfree(desc_info
->data
);
538 static void rocker_dma_ring_bufs_free(const struct rocker
*rocker
,
539 const struct rocker_dma_ring_info
*info
,
542 struct pci_dev
*pdev
= rocker
->pdev
;
545 for (i
= 0; i
< info
->size
; i
++) {
546 const struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
547 struct rocker_desc
*desc
= &info
->desc
[i
];
551 dma_unmap_single(&pdev
->dev
,
552 dma_unmap_addr(desc_info
, mapaddr
),
553 desc_info
->data_size
, direction
);
554 kfree(desc_info
->data
);
558 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info
*desc_info
)
560 struct rocker_wait
*wait
;
562 wait
= rocker_wait_create();
565 rocker_desc_cookie_ptr_set(desc_info
, wait
);
570 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info
*desc_info
)
572 struct rocker_wait
*wait
= rocker_desc_cookie_ptr_get(desc_info
);
574 rocker_wait_destroy(wait
);
577 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker
*rocker
)
579 const struct rocker_dma_ring_info
*cmd_ring
= &rocker
->cmd_ring
;
583 for (i
= 0; i
< cmd_ring
->size
; i
++) {
584 err
= rocker_dma_cmd_ring_wait_alloc(&cmd_ring
->desc_info
[i
]);
591 for (i
--; i
>= 0; i
--)
592 rocker_dma_cmd_ring_wait_free(&cmd_ring
->desc_info
[i
]);
596 static void rocker_dma_cmd_ring_waits_free(const struct rocker
*rocker
)
598 const struct rocker_dma_ring_info
*cmd_ring
= &rocker
->cmd_ring
;
601 for (i
= 0; i
< cmd_ring
->size
; i
++)
602 rocker_dma_cmd_ring_wait_free(&cmd_ring
->desc_info
[i
]);
605 static int rocker_dma_rings_init(struct rocker
*rocker
)
607 const struct pci_dev
*pdev
= rocker
->pdev
;
610 err
= rocker_dma_ring_create(rocker
, ROCKER_DMA_CMD
,
611 ROCKER_DMA_CMD_DEFAULT_SIZE
,
614 dev_err(&pdev
->dev
, "failed to create command dma ring\n");
618 spin_lock_init(&rocker
->cmd_ring_lock
);
620 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker
->cmd_ring
,
621 DMA_BIDIRECTIONAL
, PAGE_SIZE
);
623 dev_err(&pdev
->dev
, "failed to alloc command dma ring buffers\n");
624 goto err_dma_cmd_ring_bufs_alloc
;
627 err
= rocker_dma_cmd_ring_waits_alloc(rocker
);
629 dev_err(&pdev
->dev
, "failed to alloc command dma ring waits\n");
630 goto err_dma_cmd_ring_waits_alloc
;
633 err
= rocker_dma_ring_create(rocker
, ROCKER_DMA_EVENT
,
634 ROCKER_DMA_EVENT_DEFAULT_SIZE
,
635 &rocker
->event_ring
);
637 dev_err(&pdev
->dev
, "failed to create event dma ring\n");
638 goto err_dma_event_ring_create
;
641 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker
->event_ring
,
642 DMA_FROM_DEVICE
, PAGE_SIZE
);
644 dev_err(&pdev
->dev
, "failed to alloc event dma ring buffers\n");
645 goto err_dma_event_ring_bufs_alloc
;
647 rocker_dma_ring_pass_to_producer(rocker
, &rocker
->event_ring
);
650 err_dma_event_ring_bufs_alloc
:
651 rocker_dma_ring_destroy(rocker
, &rocker
->event_ring
);
652 err_dma_event_ring_create
:
653 rocker_dma_cmd_ring_waits_free(rocker
);
654 err_dma_cmd_ring_waits_alloc
:
655 rocker_dma_ring_bufs_free(rocker
, &rocker
->cmd_ring
,
657 err_dma_cmd_ring_bufs_alloc
:
658 rocker_dma_ring_destroy(rocker
, &rocker
->cmd_ring
);
662 static void rocker_dma_rings_fini(struct rocker
*rocker
)
664 rocker_dma_ring_bufs_free(rocker
, &rocker
->event_ring
,
666 rocker_dma_ring_destroy(rocker
, &rocker
->event_ring
);
667 rocker_dma_cmd_ring_waits_free(rocker
);
668 rocker_dma_ring_bufs_free(rocker
, &rocker
->cmd_ring
,
670 rocker_dma_ring_destroy(rocker
, &rocker
->cmd_ring
);
673 static int rocker_dma_rx_ring_skb_map(const struct rocker_port
*rocker_port
,
674 struct rocker_desc_info
*desc_info
,
675 struct sk_buff
*skb
, size_t buf_len
)
677 const struct rocker
*rocker
= rocker_port
->rocker
;
678 struct pci_dev
*pdev
= rocker
->pdev
;
679 dma_addr_t dma_handle
;
681 dma_handle
= dma_map_single(&pdev
->dev
, skb
->data
, buf_len
,
683 if (dma_mapping_error(&pdev
->dev
, dma_handle
))
685 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_RX_FRAG_ADDR
, dma_handle
))
686 goto tlv_put_failure
;
687 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_RX_FRAG_MAX_LEN
, buf_len
))
688 goto tlv_put_failure
;
692 dma_unmap_single(&pdev
->dev
, dma_handle
, buf_len
, DMA_FROM_DEVICE
);
693 desc_info
->tlv_size
= 0;
697 static size_t rocker_port_rx_buf_len(const struct rocker_port
*rocker_port
)
699 return rocker_port
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
702 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port
*rocker_port
,
703 struct rocker_desc_info
*desc_info
)
705 struct net_device
*dev
= rocker_port
->dev
;
707 size_t buf_len
= rocker_port_rx_buf_len(rocker_port
);
710 /* Ensure that hw will see tlv_size zero in case of an error.
711 * That tells hw to use another descriptor.
713 rocker_desc_cookie_ptr_set(desc_info
, NULL
);
714 desc_info
->tlv_size
= 0;
716 skb
= netdev_alloc_skb_ip_align(dev
, buf_len
);
719 err
= rocker_dma_rx_ring_skb_map(rocker_port
, desc_info
, skb
, buf_len
);
721 dev_kfree_skb_any(skb
);
724 rocker_desc_cookie_ptr_set(desc_info
, skb
);
728 static void rocker_dma_rx_ring_skb_unmap(const struct rocker
*rocker
,
729 const struct rocker_tlv
**attrs
)
731 struct pci_dev
*pdev
= rocker
->pdev
;
732 dma_addr_t dma_handle
;
735 if (!attrs
[ROCKER_TLV_RX_FRAG_ADDR
] ||
736 !attrs
[ROCKER_TLV_RX_FRAG_MAX_LEN
])
738 dma_handle
= rocker_tlv_get_u64(attrs
[ROCKER_TLV_RX_FRAG_ADDR
]);
739 len
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FRAG_MAX_LEN
]);
740 dma_unmap_single(&pdev
->dev
, dma_handle
, len
, DMA_FROM_DEVICE
);
743 static void rocker_dma_rx_ring_skb_free(const struct rocker
*rocker
,
744 const struct rocker_desc_info
*desc_info
)
746 const struct rocker_tlv
*attrs
[ROCKER_TLV_RX_MAX
+ 1];
747 struct sk_buff
*skb
= rocker_desc_cookie_ptr_get(desc_info
);
751 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_RX_MAX
, desc_info
);
752 rocker_dma_rx_ring_skb_unmap(rocker
, attrs
);
753 dev_kfree_skb_any(skb
);
756 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port
*rocker_port
)
758 const struct rocker_dma_ring_info
*rx_ring
= &rocker_port
->rx_ring
;
759 const struct rocker
*rocker
= rocker_port
->rocker
;
763 for (i
= 0; i
< rx_ring
->size
; i
++) {
764 err
= rocker_dma_rx_ring_skb_alloc(rocker_port
,
765 &rx_ring
->desc_info
[i
]);
772 for (i
--; i
>= 0; i
--)
773 rocker_dma_rx_ring_skb_free(rocker
, &rx_ring
->desc_info
[i
]);
777 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port
*rocker_port
)
779 const struct rocker_dma_ring_info
*rx_ring
= &rocker_port
->rx_ring
;
780 const struct rocker
*rocker
= rocker_port
->rocker
;
783 for (i
= 0; i
< rx_ring
->size
; i
++)
784 rocker_dma_rx_ring_skb_free(rocker
, &rx_ring
->desc_info
[i
]);
787 static int rocker_port_dma_rings_init(struct rocker_port
*rocker_port
)
789 struct rocker
*rocker
= rocker_port
->rocker
;
792 err
= rocker_dma_ring_create(rocker
,
793 ROCKER_DMA_TX(rocker_port
->port_number
),
794 ROCKER_DMA_TX_DEFAULT_SIZE
,
795 &rocker_port
->tx_ring
);
797 netdev_err(rocker_port
->dev
, "failed to create tx dma ring\n");
801 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker_port
->tx_ring
,
803 ROCKER_DMA_TX_DESC_SIZE
);
805 netdev_err(rocker_port
->dev
, "failed to alloc tx dma ring buffers\n");
806 goto err_dma_tx_ring_bufs_alloc
;
809 err
= rocker_dma_ring_create(rocker
,
810 ROCKER_DMA_RX(rocker_port
->port_number
),
811 ROCKER_DMA_RX_DEFAULT_SIZE
,
812 &rocker_port
->rx_ring
);
814 netdev_err(rocker_port
->dev
, "failed to create rx dma ring\n");
815 goto err_dma_rx_ring_create
;
818 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker_port
->rx_ring
,
820 ROCKER_DMA_RX_DESC_SIZE
);
822 netdev_err(rocker_port
->dev
, "failed to alloc rx dma ring buffers\n");
823 goto err_dma_rx_ring_bufs_alloc
;
826 err
= rocker_dma_rx_ring_skbs_alloc(rocker_port
);
828 netdev_err(rocker_port
->dev
, "failed to alloc rx dma ring skbs\n");
829 goto err_dma_rx_ring_skbs_alloc
;
831 rocker_dma_ring_pass_to_producer(rocker
, &rocker_port
->rx_ring
);
835 err_dma_rx_ring_skbs_alloc
:
836 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->rx_ring
,
838 err_dma_rx_ring_bufs_alloc
:
839 rocker_dma_ring_destroy(rocker
, &rocker_port
->rx_ring
);
840 err_dma_rx_ring_create
:
841 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->tx_ring
,
843 err_dma_tx_ring_bufs_alloc
:
844 rocker_dma_ring_destroy(rocker
, &rocker_port
->tx_ring
);
848 static void rocker_port_dma_rings_fini(struct rocker_port
*rocker_port
)
850 struct rocker
*rocker
= rocker_port
->rocker
;
852 rocker_dma_rx_ring_skbs_free(rocker_port
);
853 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->rx_ring
,
855 rocker_dma_ring_destroy(rocker
, &rocker_port
->rx_ring
);
856 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->tx_ring
,
858 rocker_dma_ring_destroy(rocker
, &rocker_port
->tx_ring
);
861 static void rocker_port_set_enable(const struct rocker_port
*rocker_port
,
864 u64 val
= rocker_read64(rocker_port
->rocker
, PORT_PHYS_ENABLE
);
867 val
|= 1ULL << rocker_port
->pport
;
869 val
&= ~(1ULL << rocker_port
->pport
);
870 rocker_write64(rocker_port
->rocker
, PORT_PHYS_ENABLE
, val
);
873 /********************************
874 * Interrupt handler and helpers
875 ********************************/
877 static irqreturn_t
rocker_cmd_irq_handler(int irq
, void *dev_id
)
879 struct rocker
*rocker
= dev_id
;
880 const struct rocker_desc_info
*desc_info
;
881 struct rocker_wait
*wait
;
884 spin_lock(&rocker
->cmd_ring_lock
);
885 while ((desc_info
= rocker_desc_tail_get(&rocker
->cmd_ring
))) {
886 wait
= rocker_desc_cookie_ptr_get(desc_info
);
888 rocker_desc_gen_clear(desc_info
);
890 rocker_wait_wake_up(wait
);
894 spin_unlock(&rocker
->cmd_ring_lock
);
895 rocker_dma_ring_credits_set(rocker
, &rocker
->cmd_ring
, credits
);
900 static void rocker_port_link_up(const struct rocker_port
*rocker_port
)
902 netif_carrier_on(rocker_port
->dev
);
903 netdev_info(rocker_port
->dev
, "Link is up\n");
906 static void rocker_port_link_down(const struct rocker_port
*rocker_port
)
908 netif_carrier_off(rocker_port
->dev
);
909 netdev_info(rocker_port
->dev
, "Link is down\n");
912 static int rocker_event_link_change(const struct rocker
*rocker
,
913 const struct rocker_tlv
*info
)
915 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_MAX
+ 1];
916 unsigned int port_number
;
918 struct rocker_port
*rocker_port
;
920 rocker_tlv_parse_nested(attrs
, ROCKER_TLV_EVENT_LINK_CHANGED_MAX
, info
);
921 if (!attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
] ||
922 !attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
])
925 rocker_tlv_get_u32(attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
]) - 1;
926 link_up
= rocker_tlv_get_u8(attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
]);
928 if (port_number
>= rocker
->port_count
)
931 rocker_port
= rocker
->ports
[port_number
];
932 if (netif_carrier_ok(rocker_port
->dev
) != link_up
) {
934 rocker_port_link_up(rocker_port
);
936 rocker_port_link_down(rocker_port
);
942 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port
*rocker_port
,
943 const unsigned char *addr
,
946 static int rocker_event_mac_vlan_seen(const struct rocker
*rocker
,
947 const struct rocker_tlv
*info
)
949 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAX
+ 1];
950 unsigned int port_number
;
951 struct rocker_port
*rocker_port
;
952 const unsigned char *addr
;
955 rocker_tlv_parse_nested(attrs
, ROCKER_TLV_EVENT_MAC_VLAN_MAX
, info
);
956 if (!attrs
[ROCKER_TLV_EVENT_MAC_VLAN_PPORT
] ||
957 !attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAC
] ||
958 !attrs
[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
])
961 rocker_tlv_get_u32(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_PPORT
]) - 1;
962 addr
= rocker_tlv_data(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAC
]);
963 vlan_id
= rocker_tlv_get_be16(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
]);
965 if (port_number
>= rocker
->port_count
)
968 rocker_port
= rocker
->ports
[port_number
];
969 return rocker_world_port_ev_mac_vlan_seen(rocker_port
, addr
, vlan_id
);
972 static int rocker_event_process(const struct rocker
*rocker
,
973 const struct rocker_desc_info
*desc_info
)
975 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_MAX
+ 1];
976 const struct rocker_tlv
*info
;
979 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_EVENT_MAX
, desc_info
);
980 if (!attrs
[ROCKER_TLV_EVENT_TYPE
] ||
981 !attrs
[ROCKER_TLV_EVENT_INFO
])
984 type
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_EVENT_TYPE
]);
985 info
= attrs
[ROCKER_TLV_EVENT_INFO
];
988 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED
:
989 return rocker_event_link_change(rocker
, info
);
990 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN
:
991 return rocker_event_mac_vlan_seen(rocker
, info
);
997 static irqreturn_t
rocker_event_irq_handler(int irq
, void *dev_id
)
999 struct rocker
*rocker
= dev_id
;
1000 const struct pci_dev
*pdev
= rocker
->pdev
;
1001 const struct rocker_desc_info
*desc_info
;
1005 while ((desc_info
= rocker_desc_tail_get(&rocker
->event_ring
))) {
1006 err
= rocker_desc_err(desc_info
);
1008 dev_err(&pdev
->dev
, "event desc received with err %d\n",
1011 err
= rocker_event_process(rocker
, desc_info
);
1013 dev_err(&pdev
->dev
, "event processing failed with err %d\n",
1016 rocker_desc_gen_clear(desc_info
);
1017 rocker_desc_head_set(rocker
, &rocker
->event_ring
, desc_info
);
1020 rocker_dma_ring_credits_set(rocker
, &rocker
->event_ring
, credits
);
1025 static irqreturn_t
rocker_tx_irq_handler(int irq
, void *dev_id
)
1027 struct rocker_port
*rocker_port
= dev_id
;
1029 napi_schedule(&rocker_port
->napi_tx
);
1033 static irqreturn_t
rocker_rx_irq_handler(int irq
, void *dev_id
)
1035 struct rocker_port
*rocker_port
= dev_id
;
1037 napi_schedule(&rocker_port
->napi_rx
);
1041 /********************
1043 ********************/
1045 int rocker_cmd_exec(struct rocker_port
*rocker_port
, bool nowait
,
1046 rocker_cmd_prep_cb_t prepare
, void *prepare_priv
,
1047 rocker_cmd_proc_cb_t process
, void *process_priv
)
1049 struct rocker
*rocker
= rocker_port
->rocker
;
1050 struct rocker_desc_info
*desc_info
;
1051 struct rocker_wait
*wait
;
1052 unsigned long lock_flags
;
1055 spin_lock_irqsave(&rocker
->cmd_ring_lock
, lock_flags
);
1057 desc_info
= rocker_desc_head_get(&rocker
->cmd_ring
);
1059 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1063 wait
= rocker_desc_cookie_ptr_get(desc_info
);
1064 rocker_wait_init(wait
);
1065 wait
->nowait
= nowait
;
1067 err
= prepare(rocker_port
, desc_info
, prepare_priv
);
1069 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1073 rocker_desc_head_set(rocker
, &rocker
->cmd_ring
, desc_info
);
1075 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1080 if (!rocker_wait_event_timeout(wait
, HZ
/ 10))
1083 err
= rocker_desc_err(desc_info
);
1088 err
= process(rocker_port
, desc_info
, process_priv
);
1090 rocker_desc_gen_clear(desc_info
);
1095 rocker_cmd_get_port_settings_prep(const struct rocker_port
*rocker_port
,
1096 struct rocker_desc_info
*desc_info
,
1099 struct rocker_tlv
*cmd_info
;
1101 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1102 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS
))
1104 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1107 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1108 rocker_port
->pport
))
1110 rocker_tlv_nest_end(desc_info
, cmd_info
);
1115 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port
*rocker_port
,
1116 const struct rocker_desc_info
*desc_info
,
1119 struct ethtool_link_ksettings
*ecmd
= priv
;
1120 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1121 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1126 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1127 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1130 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1131 attrs
[ROCKER_TLV_CMD_INFO
]);
1132 if (!info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
] ||
1133 !info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
] ||
1134 !info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
])
1137 speed
= rocker_tlv_get_u32(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
]);
1138 duplex
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
]);
1139 autoneg
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
]);
1141 ethtool_link_ksettings_zero_link_mode(ecmd
, supported
);
1142 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, TP
);
1144 ecmd
->base
.phy_address
= 0xff;
1145 ecmd
->base
.port
= PORT_TP
;
1146 ecmd
->base
.speed
= speed
;
1147 ecmd
->base
.duplex
= duplex
? DUPLEX_FULL
: DUPLEX_HALF
;
1148 ecmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
1154 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port
*rocker_port
,
1155 const struct rocker_desc_info
*desc_info
,
1158 unsigned char *macaddr
= priv
;
1159 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1160 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1161 const struct rocker_tlv
*attr
;
1163 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1164 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1167 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1168 attrs
[ROCKER_TLV_CMD_INFO
]);
1169 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
];
1173 if (rocker_tlv_len(attr
) != ETH_ALEN
)
1176 ether_addr_copy(macaddr
, rocker_tlv_data(attr
));
1181 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port
*rocker_port
,
1182 const struct rocker_desc_info
*desc_info
,
1186 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1187 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1188 const struct rocker_tlv
*attr
;
1190 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1191 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1194 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1195 attrs
[ROCKER_TLV_CMD_INFO
]);
1196 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MODE
];
1200 *p_mode
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MODE
]);
1210 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port
*rocker_port
,
1211 const struct rocker_desc_info
*desc_info
,
1214 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1215 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1216 struct port_name
*name
= priv
;
1217 const struct rocker_tlv
*attr
;
1221 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1222 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1225 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1226 attrs
[ROCKER_TLV_CMD_INFO
]);
1227 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME
];
1231 len
= min_t(size_t, rocker_tlv_len(attr
), name
->len
);
1232 str
= rocker_tlv_data(attr
);
1234 /* make sure name only contains alphanumeric characters */
1235 for (i
= j
= 0; i
< len
; ++i
) {
1236 if (isalnum(str
[i
])) {
1237 name
->buf
[j
] = str
[i
];
1245 name
->buf
[j
] = '\0';
1251 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port
*rocker_port
,
1252 struct rocker_desc_info
*desc_info
,
1255 struct ethtool_link_ksettings
*ecmd
= priv
;
1256 struct rocker_tlv
*cmd_info
;
1258 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1259 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1261 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1264 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1265 rocker_port
->pport
))
1267 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
,
1270 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
,
1273 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
,
1274 ecmd
->base
.autoneg
))
1276 rocker_tlv_nest_end(desc_info
, cmd_info
);
1281 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port
*rocker_port
,
1282 struct rocker_desc_info
*desc_info
,
1285 const unsigned char *macaddr
= priv
;
1286 struct rocker_tlv
*cmd_info
;
1288 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1289 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1291 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1294 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1295 rocker_port
->pport
))
1297 if (rocker_tlv_put(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
,
1300 rocker_tlv_nest_end(desc_info
, cmd_info
);
1305 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port
*rocker_port
,
1306 struct rocker_desc_info
*desc_info
,
1309 int mtu
= *(int *)priv
;
1310 struct rocker_tlv
*cmd_info
;
1312 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1313 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1315 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1318 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1319 rocker_port
->pport
))
1321 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_MTU
,
1324 rocker_tlv_nest_end(desc_info
, cmd_info
);
1329 rocker_cmd_set_port_learning_prep(const struct rocker_port
*rocker_port
,
1330 struct rocker_desc_info
*desc_info
,
1333 bool learning
= *(bool *)priv
;
1334 struct rocker_tlv
*cmd_info
;
1336 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1337 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1339 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1342 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1343 rocker_port
->pport
))
1345 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING
,
1348 rocker_tlv_nest_end(desc_info
, cmd_info
);
1353 rocker_cmd_get_port_settings_ethtool(struct rocker_port
*rocker_port
,
1354 struct ethtool_link_ksettings
*ecmd
)
1356 return rocker_cmd_exec(rocker_port
, false,
1357 rocker_cmd_get_port_settings_prep
, NULL
,
1358 rocker_cmd_get_port_settings_ethtool_proc
,
1362 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port
*rocker_port
,
1363 unsigned char *macaddr
)
1365 return rocker_cmd_exec(rocker_port
, false,
1366 rocker_cmd_get_port_settings_prep
, NULL
,
1367 rocker_cmd_get_port_settings_macaddr_proc
,
1371 static int rocker_cmd_get_port_settings_mode(struct rocker_port
*rocker_port
,
1374 return rocker_cmd_exec(rocker_port
, false,
1375 rocker_cmd_get_port_settings_prep
, NULL
,
1376 rocker_cmd_get_port_settings_mode_proc
, p_mode
);
1380 rocker_cmd_set_port_settings_ethtool(struct rocker_port
*rocker_port
,
1381 const struct ethtool_link_ksettings
*ecmd
)
1383 struct ethtool_link_ksettings copy_ecmd
;
1385 memcpy(©_ecmd
, ecmd
, sizeof(copy_ecmd
));
1387 return rocker_cmd_exec(rocker_port
, false,
1388 rocker_cmd_set_port_settings_ethtool_prep
,
1389 ©_ecmd
, NULL
, NULL
);
1392 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port
*rocker_port
,
1393 unsigned char *macaddr
)
1395 return rocker_cmd_exec(rocker_port
, false,
1396 rocker_cmd_set_port_settings_macaddr_prep
,
1397 macaddr
, NULL
, NULL
);
1400 static int rocker_cmd_set_port_settings_mtu(struct rocker_port
*rocker_port
,
1403 return rocker_cmd_exec(rocker_port
, false,
1404 rocker_cmd_set_port_settings_mtu_prep
,
1408 int rocker_port_set_learning(struct rocker_port
*rocker_port
,
1411 return rocker_cmd_exec(rocker_port
, false,
1412 rocker_cmd_set_port_learning_prep
,
1413 &learning
, NULL
, NULL
);
1416 /**********************
1417 * Worlds manipulation
1418 **********************/
1420 static struct rocker_world_ops
*rocker_world_ops
[] = {
1424 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1426 static struct rocker_world_ops
*rocker_world_ops_find(u8 mode
)
1430 for (i
= 0; i
< ROCKER_WORLD_OPS_LEN
; i
++)
1431 if (rocker_world_ops
[i
]->mode
== mode
)
1432 return rocker_world_ops
[i
];
1436 static int rocker_world_init(struct rocker
*rocker
, u8 mode
)
1438 struct rocker_world_ops
*wops
;
1441 wops
= rocker_world_ops_find(mode
);
1443 dev_err(&rocker
->pdev
->dev
, "port mode \"%d\" is not supported\n",
1447 rocker
->wops
= wops
;
1448 rocker
->wpriv
= kzalloc(wops
->priv_size
, GFP_KERNEL
);
1453 err
= wops
->init(rocker
);
1455 kfree(rocker
->wpriv
);
1459 static void rocker_world_fini(struct rocker
*rocker
)
1461 struct rocker_world_ops
*wops
= rocker
->wops
;
1463 if (!wops
|| !wops
->fini
)
1466 kfree(rocker
->wpriv
);
1469 static int rocker_world_check_init(struct rocker_port
*rocker_port
)
1471 struct rocker
*rocker
= rocker_port
->rocker
;
1475 err
= rocker_cmd_get_port_settings_mode(rocker_port
, &mode
);
1477 dev_err(&rocker
->pdev
->dev
, "failed to get port mode\n");
1481 if (rocker
->wops
->mode
!= mode
) {
1482 dev_err(&rocker
->pdev
->dev
, "hardware has ports in different worlds, which is not supported\n");
1487 return rocker_world_init(rocker
, mode
);
1490 static int rocker_world_port_pre_init(struct rocker_port
*rocker_port
)
1492 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1495 rocker_port
->wpriv
= kzalloc(wops
->port_priv_size
, GFP_KERNEL
);
1496 if (!rocker_port
->wpriv
)
1498 if (!wops
->port_pre_init
)
1500 err
= wops
->port_pre_init(rocker_port
);
1502 kfree(rocker_port
->wpriv
);
1506 static int rocker_world_port_init(struct rocker_port
*rocker_port
)
1508 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1510 if (!wops
->port_init
)
1512 return wops
->port_init(rocker_port
);
1515 static void rocker_world_port_fini(struct rocker_port
*rocker_port
)
1517 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1519 if (!wops
->port_fini
)
1521 wops
->port_fini(rocker_port
);
1524 static void rocker_world_port_post_fini(struct rocker_port
*rocker_port
)
1526 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1528 if (!wops
->port_post_fini
)
1530 wops
->port_post_fini(rocker_port
);
1531 kfree(rocker_port
->wpriv
);
1534 static int rocker_world_port_open(struct rocker_port
*rocker_port
)
1536 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1538 if (!wops
->port_open
)
1540 return wops
->port_open(rocker_port
);
1543 static void rocker_world_port_stop(struct rocker_port
*rocker_port
)
1545 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1547 if (!wops
->port_stop
)
1549 wops
->port_stop(rocker_port
);
1552 static int rocker_world_port_attr_stp_state_set(struct rocker_port
*rocker_port
,
1554 struct switchdev_trans
*trans
)
1556 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1558 if (!wops
->port_attr_stp_state_set
)
1561 if (switchdev_trans_ph_prepare(trans
))
1564 return wops
->port_attr_stp_state_set(rocker_port
, state
);
1568 rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port
*
1571 p_brport_flags_support
)
1573 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1575 if (!wops
->port_attr_bridge_flags_support_get
)
1577 return wops
->port_attr_bridge_flags_support_get(rocker_port
,
1578 p_brport_flags_support
);
1582 rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port
*rocker_port
,
1583 unsigned long brport_flags
,
1584 struct switchdev_trans
*trans
)
1586 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1587 unsigned long brport_flags_s
;
1590 if (!wops
->port_attr_bridge_flags_set
)
1593 err
= rocker_world_port_attr_bridge_flags_support_get(rocker_port
,
1598 if (brport_flags
& ~brport_flags_s
)
1605 rocker_world_port_attr_bridge_flags_set(struct rocker_port
*rocker_port
,
1606 unsigned long brport_flags
,
1607 struct switchdev_trans
*trans
)
1609 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1611 if (!wops
->port_attr_bridge_flags_set
)
1614 if (switchdev_trans_ph_prepare(trans
))
1617 return wops
->port_attr_bridge_flags_set(rocker_port
, brport_flags
,
1622 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port
*rocker_port
,
1624 struct switchdev_trans
*trans
)
1627 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1629 if (!wops
->port_attr_bridge_ageing_time_set
)
1632 if (switchdev_trans_ph_prepare(trans
))
1635 return wops
->port_attr_bridge_ageing_time_set(rocker_port
, ageing_time
,
1640 rocker_world_port_obj_vlan_add(struct rocker_port
*rocker_port
,
1641 const struct switchdev_obj_port_vlan
*vlan
,
1642 struct switchdev_trans
*trans
)
1644 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1646 if (!wops
->port_obj_vlan_add
)
1649 if (switchdev_trans_ph_prepare(trans
))
1652 return wops
->port_obj_vlan_add(rocker_port
, vlan
);
1656 rocker_world_port_obj_vlan_del(struct rocker_port
*rocker_port
,
1657 const struct switchdev_obj_port_vlan
*vlan
)
1659 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1661 if (netif_is_bridge_master(vlan
->obj
.orig_dev
))
1664 if (!wops
->port_obj_vlan_del
)
1666 return wops
->port_obj_vlan_del(rocker_port
, vlan
);
1670 rocker_world_port_fdb_add(struct rocker_port
*rocker_port
,
1671 struct switchdev_notifier_fdb_info
*info
)
1673 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1675 if (!wops
->port_obj_fdb_add
)
1678 return wops
->port_obj_fdb_add(rocker_port
, info
->vid
, info
->addr
);
1682 rocker_world_port_fdb_del(struct rocker_port
*rocker_port
,
1683 struct switchdev_notifier_fdb_info
*info
)
1685 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1687 if (!wops
->port_obj_fdb_del
)
1689 return wops
->port_obj_fdb_del(rocker_port
, info
->vid
, info
->addr
);
1692 static int rocker_world_port_master_linked(struct rocker_port
*rocker_port
,
1693 struct net_device
*master
)
1695 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1697 if (!wops
->port_master_linked
)
1699 return wops
->port_master_linked(rocker_port
, master
);
1702 static int rocker_world_port_master_unlinked(struct rocker_port
*rocker_port
,
1703 struct net_device
*master
)
1705 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1707 if (!wops
->port_master_unlinked
)
1709 return wops
->port_master_unlinked(rocker_port
, master
);
1712 static int rocker_world_port_neigh_update(struct rocker_port
*rocker_port
,
1713 struct neighbour
*n
)
1715 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1717 if (!wops
->port_neigh_update
)
1719 return wops
->port_neigh_update(rocker_port
, n
);
1722 static int rocker_world_port_neigh_destroy(struct rocker_port
*rocker_port
,
1723 struct neighbour
*n
)
1725 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1727 if (!wops
->port_neigh_destroy
)
1729 return wops
->port_neigh_destroy(rocker_port
, n
);
1732 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port
*rocker_port
,
1733 const unsigned char *addr
,
1736 struct rocker_world_ops
*wops
= rocker_port
->rocker
->wops
;
1738 if (!wops
->port_ev_mac_vlan_seen
)
1740 return wops
->port_ev_mac_vlan_seen(rocker_port
, addr
, vlan_id
);
1743 static int rocker_world_fib4_add(struct rocker
*rocker
,
1744 const struct fib_entry_notifier_info
*fen_info
)
1746 struct rocker_world_ops
*wops
= rocker
->wops
;
1748 if (!wops
->fib4_add
)
1750 return wops
->fib4_add(rocker
, fen_info
);
1753 static int rocker_world_fib4_del(struct rocker
*rocker
,
1754 const struct fib_entry_notifier_info
*fen_info
)
1756 struct rocker_world_ops
*wops
= rocker
->wops
;
1758 if (!wops
->fib4_del
)
1760 return wops
->fib4_del(rocker
, fen_info
);
1763 static void rocker_world_fib4_abort(struct rocker
*rocker
)
1765 struct rocker_world_ops
*wops
= rocker
->wops
;
1767 if (wops
->fib4_abort
)
1768 wops
->fib4_abort(rocker
);
1775 static int rocker_port_open(struct net_device
*dev
)
1777 struct rocker_port
*rocker_port
= netdev_priv(dev
);
1780 err
= rocker_port_dma_rings_init(rocker_port
);
1784 err
= request_irq(rocker_msix_tx_vector(rocker_port
),
1785 rocker_tx_irq_handler
, 0,
1786 rocker_driver_name
, rocker_port
);
1788 netdev_err(rocker_port
->dev
, "cannot assign tx irq\n");
1789 goto err_request_tx_irq
;
1792 err
= request_irq(rocker_msix_rx_vector(rocker_port
),
1793 rocker_rx_irq_handler
, 0,
1794 rocker_driver_name
, rocker_port
);
1796 netdev_err(rocker_port
->dev
, "cannot assign rx irq\n");
1797 goto err_request_rx_irq
;
1800 err
= rocker_world_port_open(rocker_port
);
1802 netdev_err(rocker_port
->dev
, "cannot open port in world\n");
1803 goto err_world_port_open
;
1806 napi_enable(&rocker_port
->napi_tx
);
1807 napi_enable(&rocker_port
->napi_rx
);
1808 if (!dev
->proto_down
)
1809 rocker_port_set_enable(rocker_port
, true);
1810 netif_start_queue(dev
);
1813 err_world_port_open
:
1814 free_irq(rocker_msix_rx_vector(rocker_port
), rocker_port
);
1816 free_irq(rocker_msix_tx_vector(rocker_port
), rocker_port
);
1818 rocker_port_dma_rings_fini(rocker_port
);
1822 static int rocker_port_stop(struct net_device
*dev
)
1824 struct rocker_port
*rocker_port
= netdev_priv(dev
);
1826 netif_stop_queue(dev
);
1827 rocker_port_set_enable(rocker_port
, false);
1828 napi_disable(&rocker_port
->napi_rx
);
1829 napi_disable(&rocker_port
->napi_tx
);
1830 rocker_world_port_stop(rocker_port
);
1831 free_irq(rocker_msix_rx_vector(rocker_port
), rocker_port
);
1832 free_irq(rocker_msix_tx_vector(rocker_port
), rocker_port
);
1833 rocker_port_dma_rings_fini(rocker_port
);
1838 static void rocker_tx_desc_frags_unmap(const struct rocker_port
*rocker_port
,
1839 const struct rocker_desc_info
*desc_info
)
1841 const struct rocker
*rocker
= rocker_port
->rocker
;
1842 struct pci_dev
*pdev
= rocker
->pdev
;
1843 const struct rocker_tlv
*attrs
[ROCKER_TLV_TX_MAX
+ 1];
1844 struct rocker_tlv
*attr
;
1847 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_TX_MAX
, desc_info
);
1848 if (!attrs
[ROCKER_TLV_TX_FRAGS
])
1850 rocker_tlv_for_each_nested(attr
, attrs
[ROCKER_TLV_TX_FRAGS
], rem
) {
1851 const struct rocker_tlv
*frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_MAX
+ 1];
1852 dma_addr_t dma_handle
;
1855 if (rocker_tlv_type(attr
) != ROCKER_TLV_TX_FRAG
)
1857 rocker_tlv_parse_nested(frag_attrs
, ROCKER_TLV_TX_FRAG_ATTR_MAX
,
1859 if (!frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
] ||
1860 !frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
])
1862 dma_handle
= rocker_tlv_get_u64(frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
]);
1863 len
= rocker_tlv_get_u16(frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
]);
1864 dma_unmap_single(&pdev
->dev
, dma_handle
, len
, DMA_TO_DEVICE
);
1868 static int rocker_tx_desc_frag_map_put(const struct rocker_port
*rocker_port
,
1869 struct rocker_desc_info
*desc_info
,
1870 char *buf
, size_t buf_len
)
1872 const struct rocker
*rocker
= rocker_port
->rocker
;
1873 struct pci_dev
*pdev
= rocker
->pdev
;
1874 dma_addr_t dma_handle
;
1875 struct rocker_tlv
*frag
;
1877 dma_handle
= dma_map_single(&pdev
->dev
, buf
, buf_len
, DMA_TO_DEVICE
);
1878 if (unlikely(dma_mapping_error(&pdev
->dev
, dma_handle
))) {
1879 if (net_ratelimit())
1880 netdev_err(rocker_port
->dev
, "failed to dma map tx frag\n");
1883 frag
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_TX_FRAG
);
1886 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_TX_FRAG_ATTR_ADDR
,
1889 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_TX_FRAG_ATTR_LEN
,
1892 rocker_tlv_nest_end(desc_info
, frag
);
1896 rocker_tlv_nest_cancel(desc_info
, frag
);
1898 dma_unmap_single(&pdev
->dev
, dma_handle
, buf_len
, DMA_TO_DEVICE
);
1902 static netdev_tx_t
rocker_port_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1904 struct rocker_port
*rocker_port
= netdev_priv(dev
);
1905 struct rocker
*rocker
= rocker_port
->rocker
;
1906 struct rocker_desc_info
*desc_info
;
1907 struct rocker_tlv
*frags
;
1911 desc_info
= rocker_desc_head_get(&rocker_port
->tx_ring
);
1912 if (unlikely(!desc_info
)) {
1913 if (net_ratelimit())
1914 netdev_err(dev
, "tx ring full when queue awake\n");
1915 return NETDEV_TX_BUSY
;
1918 rocker_desc_cookie_ptr_set(desc_info
, skb
);
1920 frags
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_TX_FRAGS
);
1923 err
= rocker_tx_desc_frag_map_put(rocker_port
, desc_info
,
1924 skb
->data
, skb_headlen(skb
));
1927 if (skb_shinfo(skb
)->nr_frags
> ROCKER_TX_FRAGS_MAX
) {
1928 err
= skb_linearize(skb
);
1933 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1934 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1936 err
= rocker_tx_desc_frag_map_put(rocker_port
, desc_info
,
1937 skb_frag_address(frag
),
1938 skb_frag_size(frag
));
1942 rocker_tlv_nest_end(desc_info
, frags
);
1944 rocker_desc_gen_clear(desc_info
);
1945 rocker_desc_head_set(rocker
, &rocker_port
->tx_ring
, desc_info
);
1947 desc_info
= rocker_desc_head_get(&rocker_port
->tx_ring
);
1949 netif_stop_queue(dev
);
1951 return NETDEV_TX_OK
;
1954 rocker_tx_desc_frags_unmap(rocker_port
, desc_info
);
1956 rocker_tlv_nest_cancel(desc_info
, frags
);
1959 dev
->stats
.tx_dropped
++;
1961 return NETDEV_TX_OK
;
1964 static int rocker_port_set_mac_address(struct net_device
*dev
, void *p
)
1966 struct sockaddr
*addr
= p
;
1967 struct rocker_port
*rocker_port
= netdev_priv(dev
);
1970 if (!is_valid_ether_addr(addr
->sa_data
))
1971 return -EADDRNOTAVAIL
;
1973 err
= rocker_cmd_set_port_settings_macaddr(rocker_port
, addr
->sa_data
);
1976 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1980 static int rocker_port_change_mtu(struct net_device
*dev
, int new_mtu
)
1982 struct rocker_port
*rocker_port
= netdev_priv(dev
);
1983 int running
= netif_running(dev
);
1987 rocker_port_stop(dev
);
1989 netdev_info(dev
, "MTU change from %d to %d\n", dev
->mtu
, new_mtu
);
1992 err
= rocker_cmd_set_port_settings_mtu(rocker_port
, new_mtu
);
1997 err
= rocker_port_open(dev
);
2002 static int rocker_port_get_phys_port_name(struct net_device
*dev
,
2003 char *buf
, size_t len
)
2005 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2006 struct port_name name
= { .buf
= buf
, .len
= len
};
2009 err
= rocker_cmd_exec(rocker_port
, false,
2010 rocker_cmd_get_port_settings_prep
, NULL
,
2011 rocker_cmd_get_port_settings_phys_name_proc
,
2014 return err
? -EOPNOTSUPP
: 0;
2017 static int rocker_port_change_proto_down(struct net_device
*dev
,
2020 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2022 if (rocker_port
->dev
->flags
& IFF_UP
)
2023 rocker_port_set_enable(rocker_port
, !proto_down
);
2024 rocker_port
->dev
->proto_down
= proto_down
;
2028 static void rocker_port_neigh_destroy(struct net_device
*dev
,
2029 struct neighbour
*n
)
2031 struct rocker_port
*rocker_port
= netdev_priv(n
->dev
);
2034 err
= rocker_world_port_neigh_destroy(rocker_port
, n
);
2036 netdev_warn(rocker_port
->dev
, "failed to handle neigh destroy (err %d)\n",
2040 static int rocker_port_get_port_parent_id(struct net_device
*dev
,
2041 struct netdev_phys_item_id
*ppid
)
2043 const struct rocker_port
*rocker_port
= netdev_priv(dev
);
2044 const struct rocker
*rocker
= rocker_port
->rocker
;
2046 ppid
->id_len
= sizeof(rocker
->hw
.id
);
2047 memcpy(&ppid
->id
, &rocker
->hw
.id
, ppid
->id_len
);
2052 static const struct net_device_ops rocker_port_netdev_ops
= {
2053 .ndo_open
= rocker_port_open
,
2054 .ndo_stop
= rocker_port_stop
,
2055 .ndo_start_xmit
= rocker_port_xmit
,
2056 .ndo_set_mac_address
= rocker_port_set_mac_address
,
2057 .ndo_change_mtu
= rocker_port_change_mtu
,
2058 .ndo_get_phys_port_name
= rocker_port_get_phys_port_name
,
2059 .ndo_change_proto_down
= rocker_port_change_proto_down
,
2060 .ndo_neigh_destroy
= rocker_port_neigh_destroy
,
2061 .ndo_get_port_parent_id
= rocker_port_get_port_parent_id
,
2064 /********************
2066 ********************/
2068 static int rocker_port_attr_set(struct net_device
*dev
,
2069 const struct switchdev_attr
*attr
,
2070 struct switchdev_trans
*trans
)
2072 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2076 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
2077 err
= rocker_world_port_attr_stp_state_set(rocker_port
,
2081 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS
:
2082 err
= rocker_world_port_attr_pre_bridge_flags_set(rocker_port
,
2083 attr
->u
.brport_flags
,
2086 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
2087 err
= rocker_world_port_attr_bridge_flags_set(rocker_port
,
2088 attr
->u
.brport_flags
,
2091 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
2092 err
= rocker_world_port_attr_bridge_ageing_time_set(rocker_port
,
2093 attr
->u
.ageing_time
,
2104 static int rocker_port_obj_add(struct net_device
*dev
,
2105 const struct switchdev_obj
*obj
,
2106 struct switchdev_trans
*trans
)
2108 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2112 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
2113 err
= rocker_world_port_obj_vlan_add(rocker_port
,
2114 SWITCHDEV_OBJ_PORT_VLAN(obj
),
2125 static int rocker_port_obj_del(struct net_device
*dev
,
2126 const struct switchdev_obj
*obj
)
2128 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2132 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
2133 err
= rocker_world_port_obj_vlan_del(rocker_port
,
2134 SWITCHDEV_OBJ_PORT_VLAN(obj
));
2144 struct rocker_fib_event_work
{
2145 struct work_struct work
;
2147 struct fib_entry_notifier_info fen_info
;
2148 struct fib_rule_notifier_info fr_info
;
2150 struct rocker
*rocker
;
2151 unsigned long event
;
2154 static void rocker_router_fib_event_work(struct work_struct
*work
)
2156 struct rocker_fib_event_work
*fib_work
=
2157 container_of(work
, struct rocker_fib_event_work
, work
);
2158 struct rocker
*rocker
= fib_work
->rocker
;
2159 struct fib_rule
*rule
;
2162 /* Protect internal structures from changes */
2164 switch (fib_work
->event
) {
2165 case FIB_EVENT_ENTRY_REPLACE
:
2166 err
= rocker_world_fib4_add(rocker
, &fib_work
->fen_info
);
2168 rocker_world_fib4_abort(rocker
);
2169 fib_info_put(fib_work
->fen_info
.fi
);
2171 case FIB_EVENT_ENTRY_DEL
:
2172 rocker_world_fib4_del(rocker
, &fib_work
->fen_info
);
2173 fib_info_put(fib_work
->fen_info
.fi
);
2175 case FIB_EVENT_RULE_ADD
:
2176 case FIB_EVENT_RULE_DEL
:
2177 rule
= fib_work
->fr_info
.rule
;
2178 if (!fib4_rule_default(rule
))
2179 rocker_world_fib4_abort(rocker
);
2187 /* Called with rcu_read_lock() */
2188 static int rocker_router_fib_event(struct notifier_block
*nb
,
2189 unsigned long event
, void *ptr
)
2191 struct rocker
*rocker
= container_of(nb
, struct rocker
, fib_nb
);
2192 struct rocker_fib_event_work
*fib_work
;
2193 struct fib_notifier_info
*info
= ptr
;
2195 if (info
->family
!= AF_INET
)
2198 fib_work
= kzalloc(sizeof(*fib_work
), GFP_ATOMIC
);
2199 if (WARN_ON(!fib_work
))
2202 INIT_WORK(&fib_work
->work
, rocker_router_fib_event_work
);
2203 fib_work
->rocker
= rocker
;
2204 fib_work
->event
= event
;
2207 case FIB_EVENT_ENTRY_REPLACE
:
2208 case FIB_EVENT_ENTRY_DEL
:
2209 if (info
->family
== AF_INET
) {
2210 struct fib_entry_notifier_info
*fen_info
= ptr
;
2212 if (fen_info
->fi
->fib_nh_is_v6
) {
2213 NL_SET_ERR_MSG_MOD(info
->extack
, "IPv6 gateway with IPv4 route is not supported");
2215 return notifier_from_errno(-EINVAL
);
2217 if (fen_info
->fi
->nh
) {
2218 NL_SET_ERR_MSG_MOD(info
->extack
, "IPv4 route with nexthop objects is not supported");
2220 return notifier_from_errno(-EINVAL
);
2224 memcpy(&fib_work
->fen_info
, ptr
, sizeof(fib_work
->fen_info
));
2225 /* Take referece on fib_info to prevent it from being
2226 * freed while work is queued. Release it afterwards.
2228 fib_info_hold(fib_work
->fen_info
.fi
);
2230 case FIB_EVENT_RULE_ADD
:
2231 case FIB_EVENT_RULE_DEL
:
2232 memcpy(&fib_work
->fr_info
, ptr
, sizeof(fib_work
->fr_info
));
2233 fib_rule_get(fib_work
->fr_info
.rule
);
2237 queue_work(rocker
->rocker_owq
, &fib_work
->work
);
2242 /********************
2244 ********************/
2247 rocker_port_get_link_ksettings(struct net_device
*dev
,
2248 struct ethtool_link_ksettings
*ecmd
)
2250 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2252 return rocker_cmd_get_port_settings_ethtool(rocker_port
, ecmd
);
2256 rocker_port_set_link_ksettings(struct net_device
*dev
,
2257 const struct ethtool_link_ksettings
*ecmd
)
2259 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2261 return rocker_cmd_set_port_settings_ethtool(rocker_port
, ecmd
);
2264 static void rocker_port_get_drvinfo(struct net_device
*dev
,
2265 struct ethtool_drvinfo
*drvinfo
)
2267 strlcpy(drvinfo
->driver
, rocker_driver_name
, sizeof(drvinfo
->driver
));
2268 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
2271 static struct rocker_port_stats
{
2272 char str
[ETH_GSTRING_LEN
];
2274 } rocker_port_stats
[] = {
2275 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS
, },
2276 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES
, },
2277 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED
, },
2278 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS
, },
2280 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS
, },
2281 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES
, },
2282 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED
, },
2283 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS
, },
2286 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
2288 static void rocker_port_get_strings(struct net_device
*netdev
, u32 stringset
,
2294 switch (stringset
) {
2296 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); i
++) {
2297 memcpy(p
, rocker_port_stats
[i
].str
, ETH_GSTRING_LEN
);
2298 p
+= ETH_GSTRING_LEN
;
2305 rocker_cmd_get_port_stats_prep(const struct rocker_port
*rocker_port
,
2306 struct rocker_desc_info
*desc_info
,
2309 struct rocker_tlv
*cmd_stats
;
2311 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
2312 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS
))
2315 cmd_stats
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2319 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_STATS_PPORT
,
2320 rocker_port
->pport
))
2323 rocker_tlv_nest_end(desc_info
, cmd_stats
);
2329 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port
*rocker_port
,
2330 const struct rocker_desc_info
*desc_info
,
2333 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
2334 const struct rocker_tlv
*stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_MAX
+ 1];
2335 const struct rocker_tlv
*pattr
;
2340 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
2342 if (!attrs
[ROCKER_TLV_CMD_INFO
])
2345 rocker_tlv_parse_nested(stats_attrs
, ROCKER_TLV_CMD_PORT_STATS_MAX
,
2346 attrs
[ROCKER_TLV_CMD_INFO
]);
2348 if (!stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_PPORT
])
2351 pport
= rocker_tlv_get_u32(stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_PPORT
]);
2352 if (pport
!= rocker_port
->pport
)
2355 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); i
++) {
2356 pattr
= stats_attrs
[rocker_port_stats
[i
].type
];
2360 data
[i
] = rocker_tlv_get_u64(pattr
);
2366 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port
*rocker_port
,
2369 return rocker_cmd_exec(rocker_port
, false,
2370 rocker_cmd_get_port_stats_prep
, NULL
,
2371 rocker_cmd_get_port_stats_ethtool_proc
,
2375 static void rocker_port_get_stats(struct net_device
*dev
,
2376 struct ethtool_stats
*stats
, u64
*data
)
2378 struct rocker_port
*rocker_port
= netdev_priv(dev
);
2380 if (rocker_cmd_get_port_stats_ethtool(rocker_port
, data
) != 0) {
2383 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); ++i
)
2388 static int rocker_port_get_sset_count(struct net_device
*netdev
, int sset
)
2392 return ROCKER_PORT_STATS_LEN
;
2398 static const struct ethtool_ops rocker_port_ethtool_ops
= {
2399 .get_drvinfo
= rocker_port_get_drvinfo
,
2400 .get_link
= ethtool_op_get_link
,
2401 .get_strings
= rocker_port_get_strings
,
2402 .get_ethtool_stats
= rocker_port_get_stats
,
2403 .get_sset_count
= rocker_port_get_sset_count
,
2404 .get_link_ksettings
= rocker_port_get_link_ksettings
,
2405 .set_link_ksettings
= rocker_port_set_link_ksettings
,
2412 static struct rocker_port
*rocker_port_napi_tx_get(struct napi_struct
*napi
)
2414 return container_of(napi
, struct rocker_port
, napi_tx
);
2417 static int rocker_port_poll_tx(struct napi_struct
*napi
, int budget
)
2419 struct rocker_port
*rocker_port
= rocker_port_napi_tx_get(napi
);
2420 const struct rocker
*rocker
= rocker_port
->rocker
;
2421 const struct rocker_desc_info
*desc_info
;
2425 /* Cleanup tx descriptors */
2426 while ((desc_info
= rocker_desc_tail_get(&rocker_port
->tx_ring
))) {
2427 struct sk_buff
*skb
;
2429 err
= rocker_desc_err(desc_info
);
2430 if (err
&& net_ratelimit())
2431 netdev_err(rocker_port
->dev
, "tx desc received with err %d\n",
2433 rocker_tx_desc_frags_unmap(rocker_port
, desc_info
);
2435 skb
= rocker_desc_cookie_ptr_get(desc_info
);
2437 rocker_port
->dev
->stats
.tx_packets
++;
2438 rocker_port
->dev
->stats
.tx_bytes
+= skb
->len
;
2440 rocker_port
->dev
->stats
.tx_errors
++;
2443 dev_kfree_skb_any(skb
);
2447 if (credits
&& netif_queue_stopped(rocker_port
->dev
))
2448 netif_wake_queue(rocker_port
->dev
);
2450 napi_complete(napi
);
2451 rocker_dma_ring_credits_set(rocker
, &rocker_port
->tx_ring
, credits
);
2456 static int rocker_port_rx_proc(const struct rocker
*rocker
,
2457 const struct rocker_port
*rocker_port
,
2458 struct rocker_desc_info
*desc_info
)
2460 const struct rocker_tlv
*attrs
[ROCKER_TLV_RX_MAX
+ 1];
2461 struct sk_buff
*skb
= rocker_desc_cookie_ptr_get(desc_info
);
2468 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_RX_MAX
, desc_info
);
2469 if (!attrs
[ROCKER_TLV_RX_FRAG_LEN
])
2471 if (attrs
[ROCKER_TLV_RX_FLAGS
])
2472 rx_flags
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FLAGS
]);
2474 rocker_dma_rx_ring_skb_unmap(rocker
, attrs
);
2476 rx_len
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FRAG_LEN
]);
2477 skb_put(skb
, rx_len
);
2478 skb
->protocol
= eth_type_trans(skb
, rocker_port
->dev
);
2480 if (rx_flags
& ROCKER_RX_FLAGS_FWD_OFFLOAD
)
2481 skb
->offload_fwd_mark
= 1;
2483 rocker_port
->dev
->stats
.rx_packets
++;
2484 rocker_port
->dev
->stats
.rx_bytes
+= skb
->len
;
2486 netif_receive_skb(skb
);
2488 return rocker_dma_rx_ring_skb_alloc(rocker_port
, desc_info
);
2491 static struct rocker_port
*rocker_port_napi_rx_get(struct napi_struct
*napi
)
2493 return container_of(napi
, struct rocker_port
, napi_rx
);
2496 static int rocker_port_poll_rx(struct napi_struct
*napi
, int budget
)
2498 struct rocker_port
*rocker_port
= rocker_port_napi_rx_get(napi
);
2499 const struct rocker
*rocker
= rocker_port
->rocker
;
2500 struct rocker_desc_info
*desc_info
;
2504 /* Process rx descriptors */
2505 while (credits
< budget
&&
2506 (desc_info
= rocker_desc_tail_get(&rocker_port
->rx_ring
))) {
2507 err
= rocker_desc_err(desc_info
);
2509 if (net_ratelimit())
2510 netdev_err(rocker_port
->dev
, "rx desc received with err %d\n",
2513 err
= rocker_port_rx_proc(rocker
, rocker_port
,
2515 if (err
&& net_ratelimit())
2516 netdev_err(rocker_port
->dev
, "rx processing failed with err %d\n",
2520 rocker_port
->dev
->stats
.rx_errors
++;
2522 rocker_desc_gen_clear(desc_info
);
2523 rocker_desc_head_set(rocker
, &rocker_port
->rx_ring
, desc_info
);
2527 if (credits
< budget
)
2528 napi_complete_done(napi
, credits
);
2530 rocker_dma_ring_credits_set(rocker
, &rocker_port
->rx_ring
, credits
);
2539 static void rocker_carrier_init(const struct rocker_port
*rocker_port
)
2541 const struct rocker
*rocker
= rocker_port
->rocker
;
2542 u64 link_status
= rocker_read64(rocker
, PORT_PHYS_LINK_STATUS
);
2545 link_up
= link_status
& (1 << rocker_port
->pport
);
2547 netif_carrier_on(rocker_port
->dev
);
2549 netif_carrier_off(rocker_port
->dev
);
2552 static void rocker_remove_ports(struct rocker
*rocker
)
2554 struct rocker_port
*rocker_port
;
2557 for (i
= 0; i
< rocker
->port_count
; i
++) {
2558 rocker_port
= rocker
->ports
[i
];
2561 rocker_world_port_fini(rocker_port
);
2562 unregister_netdev(rocker_port
->dev
);
2563 rocker_world_port_post_fini(rocker_port
);
2564 free_netdev(rocker_port
->dev
);
2566 rocker_world_fini(rocker
);
2567 kfree(rocker
->ports
);
2570 static void rocker_port_dev_addr_init(struct rocker_port
*rocker_port
)
2572 const struct rocker
*rocker
= rocker_port
->rocker
;
2573 const struct pci_dev
*pdev
= rocker
->pdev
;
2576 err
= rocker_cmd_get_port_settings_macaddr(rocker_port
,
2577 rocker_port
->dev
->dev_addr
);
2579 dev_warn(&pdev
->dev
, "failed to get mac address, using random\n");
2580 eth_hw_addr_random(rocker_port
->dev
);
2584 #define ROCKER_PORT_MIN_MTU ETH_MIN_MTU
2585 #define ROCKER_PORT_MAX_MTU 9000
2586 static int rocker_probe_port(struct rocker
*rocker
, unsigned int port_number
)
2588 struct pci_dev
*pdev
= rocker
->pdev
;
2589 struct rocker_port
*rocker_port
;
2590 struct net_device
*dev
;
2593 dev
= alloc_etherdev(sizeof(struct rocker_port
));
2596 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2597 rocker_port
= netdev_priv(dev
);
2598 rocker_port
->dev
= dev
;
2599 rocker_port
->rocker
= rocker
;
2600 rocker_port
->port_number
= port_number
;
2601 rocker_port
->pport
= port_number
+ 1;
2603 err
= rocker_world_check_init(rocker_port
);
2605 dev_err(&pdev
->dev
, "world init failed\n");
2606 goto err_world_check_init
;
2609 rocker_port_dev_addr_init(rocker_port
);
2610 dev
->netdev_ops
= &rocker_port_netdev_ops
;
2611 dev
->ethtool_ops
= &rocker_port_ethtool_ops
;
2612 netif_tx_napi_add(dev
, &rocker_port
->napi_tx
, rocker_port_poll_tx
,
2614 netif_napi_add(dev
, &rocker_port
->napi_rx
, rocker_port_poll_rx
,
2616 rocker_carrier_init(rocker_port
);
2618 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_SG
;
2620 /* MTU range: 68 - 9000 */
2621 dev
->min_mtu
= ROCKER_PORT_MIN_MTU
;
2622 dev
->max_mtu
= ROCKER_PORT_MAX_MTU
;
2624 err
= rocker_world_port_pre_init(rocker_port
);
2626 dev_err(&pdev
->dev
, "port world pre-init failed\n");
2627 goto err_world_port_pre_init
;
2629 err
= register_netdev(dev
);
2631 dev_err(&pdev
->dev
, "register_netdev failed\n");
2632 goto err_register_netdev
;
2634 rocker
->ports
[port_number
] = rocker_port
;
2636 err
= rocker_world_port_init(rocker_port
);
2638 dev_err(&pdev
->dev
, "port world init failed\n");
2639 goto err_world_port_init
;
2644 err_world_port_init
:
2645 rocker
->ports
[port_number
] = NULL
;
2646 unregister_netdev(dev
);
2647 err_register_netdev
:
2648 rocker_world_port_post_fini(rocker_port
);
2649 err_world_port_pre_init
:
2650 err_world_check_init
:
2655 static int rocker_probe_ports(struct rocker
*rocker
)
2661 alloc_size
= sizeof(struct rocker_port
*) * rocker
->port_count
;
2662 rocker
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
2665 for (i
= 0; i
< rocker
->port_count
; i
++) {
2666 err
= rocker_probe_port(rocker
, i
);
2673 rocker_remove_ports(rocker
);
2677 static int rocker_msix_init(struct rocker
*rocker
)
2679 struct pci_dev
*pdev
= rocker
->pdev
;
2684 msix_entries
= pci_msix_vec_count(pdev
);
2685 if (msix_entries
< 0)
2686 return msix_entries
;
2688 if (msix_entries
!= ROCKER_MSIX_VEC_COUNT(rocker
->port_count
))
2691 rocker
->msix_entries
= kmalloc_array(msix_entries
,
2692 sizeof(struct msix_entry
),
2694 if (!rocker
->msix_entries
)
2697 for (i
= 0; i
< msix_entries
; i
++)
2698 rocker
->msix_entries
[i
].entry
= i
;
2700 err
= pci_enable_msix_exact(pdev
, rocker
->msix_entries
, msix_entries
);
2702 goto err_enable_msix
;
2707 kfree(rocker
->msix_entries
);
2711 static void rocker_msix_fini(const struct rocker
*rocker
)
2713 pci_disable_msix(rocker
->pdev
);
2714 kfree(rocker
->msix_entries
);
2717 static bool rocker_port_dev_check(const struct net_device
*dev
)
2719 return dev
->netdev_ops
== &rocker_port_netdev_ops
;
2723 rocker_switchdev_port_attr_set_event(struct net_device
*netdev
,
2724 struct switchdev_notifier_port_attr_info
*port_attr_info
)
2728 err
= rocker_port_attr_set(netdev
, port_attr_info
->attr
,
2729 port_attr_info
->trans
);
2731 port_attr_info
->handled
= true;
2732 return notifier_from_errno(err
);
2735 struct rocker_switchdev_event_work
{
2736 struct work_struct work
;
2737 struct switchdev_notifier_fdb_info fdb_info
;
2738 struct rocker_port
*rocker_port
;
2739 unsigned long event
;
2743 rocker_fdb_offload_notify(struct rocker_port
*rocker_port
,
2744 struct switchdev_notifier_fdb_info
*recv_info
)
2746 struct switchdev_notifier_fdb_info info
;
2748 info
.addr
= recv_info
->addr
;
2749 info
.vid
= recv_info
->vid
;
2750 info
.offloaded
= true;
2751 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED
,
2752 rocker_port
->dev
, &info
.info
, NULL
);
2755 static void rocker_switchdev_event_work(struct work_struct
*work
)
2757 struct rocker_switchdev_event_work
*switchdev_work
=
2758 container_of(work
, struct rocker_switchdev_event_work
, work
);
2759 struct rocker_port
*rocker_port
= switchdev_work
->rocker_port
;
2760 struct switchdev_notifier_fdb_info
*fdb_info
;
2764 switch (switchdev_work
->event
) {
2765 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
2766 fdb_info
= &switchdev_work
->fdb_info
;
2767 if (!fdb_info
->added_by_user
)
2769 err
= rocker_world_port_fdb_add(rocker_port
, fdb_info
);
2771 netdev_dbg(rocker_port
->dev
, "fdb add failed err=%d\n", err
);
2774 rocker_fdb_offload_notify(rocker_port
, fdb_info
);
2776 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
2777 fdb_info
= &switchdev_work
->fdb_info
;
2778 if (!fdb_info
->added_by_user
)
2780 err
= rocker_world_port_fdb_del(rocker_port
, fdb_info
);
2782 netdev_dbg(rocker_port
->dev
, "fdb add failed err=%d\n", err
);
2787 kfree(switchdev_work
->fdb_info
.addr
);
2788 kfree(switchdev_work
);
2789 dev_put(rocker_port
->dev
);
2792 /* called under rcu_read_lock() */
2793 static int rocker_switchdev_event(struct notifier_block
*unused
,
2794 unsigned long event
, void *ptr
)
2796 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
2797 struct rocker_switchdev_event_work
*switchdev_work
;
2798 struct switchdev_notifier_fdb_info
*fdb_info
= ptr
;
2799 struct rocker_port
*rocker_port
;
2801 if (!rocker_port_dev_check(dev
))
2804 if (event
== SWITCHDEV_PORT_ATTR_SET
)
2805 return rocker_switchdev_port_attr_set_event(dev
, ptr
);
2807 rocker_port
= netdev_priv(dev
);
2808 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
2809 if (WARN_ON(!switchdev_work
))
2812 INIT_WORK(&switchdev_work
->work
, rocker_switchdev_event_work
);
2813 switchdev_work
->rocker_port
= rocker_port
;
2814 switchdev_work
->event
= event
;
2817 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
2818 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
2819 memcpy(&switchdev_work
->fdb_info
, ptr
,
2820 sizeof(switchdev_work
->fdb_info
));
2821 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
2822 if (unlikely(!switchdev_work
->fdb_info
.addr
)) {
2823 kfree(switchdev_work
);
2827 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
2829 /* Take a reference on the rocker device */
2833 kfree(switchdev_work
);
2837 queue_work(rocker_port
->rocker
->rocker_owq
,
2838 &switchdev_work
->work
);
2843 rocker_switchdev_port_obj_event(unsigned long event
, struct net_device
*netdev
,
2844 struct switchdev_notifier_port_obj_info
*port_obj_info
)
2846 int err
= -EOPNOTSUPP
;
2849 case SWITCHDEV_PORT_OBJ_ADD
:
2850 err
= rocker_port_obj_add(netdev
, port_obj_info
->obj
,
2851 port_obj_info
->trans
);
2853 case SWITCHDEV_PORT_OBJ_DEL
:
2854 err
= rocker_port_obj_del(netdev
, port_obj_info
->obj
);
2858 port_obj_info
->handled
= true;
2859 return notifier_from_errno(err
);
2862 static int rocker_switchdev_blocking_event(struct notifier_block
*unused
,
2863 unsigned long event
, void *ptr
)
2865 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
2867 if (!rocker_port_dev_check(dev
))
2871 case SWITCHDEV_PORT_OBJ_ADD
:
2872 case SWITCHDEV_PORT_OBJ_DEL
:
2873 return rocker_switchdev_port_obj_event(event
, dev
, ptr
);
2874 case SWITCHDEV_PORT_ATTR_SET
:
2875 return rocker_switchdev_port_attr_set_event(dev
, ptr
);
2881 static struct notifier_block rocker_switchdev_notifier
= {
2882 .notifier_call
= rocker_switchdev_event
,
2885 static struct notifier_block rocker_switchdev_blocking_notifier
= {
2886 .notifier_call
= rocker_switchdev_blocking_event
,
2889 static int rocker_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2891 struct notifier_block
*nb
;
2892 struct rocker
*rocker
;
2895 rocker
= kzalloc(sizeof(*rocker
), GFP_KERNEL
);
2899 err
= pci_enable_device(pdev
);
2901 dev_err(&pdev
->dev
, "pci_enable_device failed\n");
2902 goto err_pci_enable_device
;
2905 err
= pci_request_regions(pdev
, rocker_driver_name
);
2907 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
2908 goto err_pci_request_regions
;
2911 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
2913 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64));
2915 dev_err(&pdev
->dev
, "dma_set_coherent_mask failed\n");
2916 goto err_pci_set_dma_mask
;
2919 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
2921 dev_err(&pdev
->dev
, "dma_set_mask failed\n");
2922 goto err_pci_set_dma_mask
;
2926 if (pci_resource_len(pdev
, 0) < ROCKER_PCI_BAR0_SIZE
) {
2927 dev_err(&pdev
->dev
, "invalid PCI region size\n");
2929 goto err_pci_resource_len_check
;
2932 rocker
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2933 pci_resource_len(pdev
, 0));
2934 if (!rocker
->hw_addr
) {
2935 dev_err(&pdev
->dev
, "ioremap failed\n");
2939 pci_set_master(pdev
);
2941 rocker
->pdev
= pdev
;
2942 pci_set_drvdata(pdev
, rocker
);
2944 rocker
->port_count
= rocker_read32(rocker
, PORT_PHYS_COUNT
);
2946 err
= rocker_msix_init(rocker
);
2948 dev_err(&pdev
->dev
, "MSI-X init failed\n");
2952 err
= rocker_basic_hw_test(rocker
);
2954 dev_err(&pdev
->dev
, "basic hw test failed\n");
2955 goto err_basic_hw_test
;
2958 rocker_write32(rocker
, CONTROL
, ROCKER_CONTROL_RESET
);
2960 err
= rocker_dma_rings_init(rocker
);
2962 goto err_dma_rings_init
;
2964 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
),
2965 rocker_cmd_irq_handler
, 0,
2966 rocker_driver_name
, rocker
);
2968 dev_err(&pdev
->dev
, "cannot assign cmd irq\n");
2969 goto err_request_cmd_irq
;
2972 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
),
2973 rocker_event_irq_handler
, 0,
2974 rocker_driver_name
, rocker
);
2976 dev_err(&pdev
->dev
, "cannot assign event irq\n");
2977 goto err_request_event_irq
;
2980 rocker
->rocker_owq
= alloc_ordered_workqueue(rocker_driver_name
,
2982 if (!rocker
->rocker_owq
) {
2984 goto err_alloc_ordered_workqueue
;
2987 err
= rocker_probe_ports(rocker
);
2989 dev_err(&pdev
->dev
, "failed to probe ports\n");
2990 goto err_probe_ports
;
2993 /* Only FIBs pointing to our own netdevs are programmed into
2994 * the device, so no need to pass a callback.
2996 rocker
->fib_nb
.notifier_call
= rocker_router_fib_event
;
2997 err
= register_fib_notifier(&init_net
, &rocker
->fib_nb
, NULL
, NULL
);
2999 goto err_register_fib_notifier
;
3001 err
= register_switchdev_notifier(&rocker_switchdev_notifier
);
3003 dev_err(&pdev
->dev
, "Failed to register switchdev notifier\n");
3004 goto err_register_switchdev_notifier
;
3007 nb
= &rocker_switchdev_blocking_notifier
;
3008 err
= register_switchdev_blocking_notifier(nb
);
3010 dev_err(&pdev
->dev
, "Failed to register switchdev blocking notifier\n");
3011 goto err_register_switchdev_blocking_notifier
;
3014 rocker
->hw
.id
= rocker_read64(rocker
, SWITCH_ID
);
3016 dev_info(&pdev
->dev
, "Rocker switch with id %*phN\n",
3017 (int)sizeof(rocker
->hw
.id
), &rocker
->hw
.id
);
3021 err_register_switchdev_blocking_notifier
:
3022 unregister_switchdev_notifier(&rocker_switchdev_notifier
);
3023 err_register_switchdev_notifier
:
3024 unregister_fib_notifier(&init_net
, &rocker
->fib_nb
);
3025 err_register_fib_notifier
:
3026 rocker_remove_ports(rocker
);
3028 destroy_workqueue(rocker
->rocker_owq
);
3029 err_alloc_ordered_workqueue
:
3030 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
), rocker
);
3031 err_request_event_irq
:
3032 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
), rocker
);
3033 err_request_cmd_irq
:
3034 rocker_dma_rings_fini(rocker
);
3037 rocker_msix_fini(rocker
);
3039 iounmap(rocker
->hw_addr
);
3041 err_pci_resource_len_check
:
3042 err_pci_set_dma_mask
:
3043 pci_release_regions(pdev
);
3044 err_pci_request_regions
:
3045 pci_disable_device(pdev
);
3046 err_pci_enable_device
:
3051 static void rocker_remove(struct pci_dev
*pdev
)
3053 struct rocker
*rocker
= pci_get_drvdata(pdev
);
3054 struct notifier_block
*nb
;
3056 nb
= &rocker_switchdev_blocking_notifier
;
3057 unregister_switchdev_blocking_notifier(nb
);
3059 unregister_switchdev_notifier(&rocker_switchdev_notifier
);
3060 unregister_fib_notifier(&init_net
, &rocker
->fib_nb
);
3061 rocker_remove_ports(rocker
);
3062 rocker_write32(rocker
, CONTROL
, ROCKER_CONTROL_RESET
);
3063 destroy_workqueue(rocker
->rocker_owq
);
3064 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
), rocker
);
3065 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
), rocker
);
3066 rocker_dma_rings_fini(rocker
);
3067 rocker_msix_fini(rocker
);
3068 iounmap(rocker
->hw_addr
);
3069 pci_release_regions(rocker
->pdev
);
3070 pci_disable_device(rocker
->pdev
);
3074 static struct pci_driver rocker_pci_driver
= {
3075 .name
= rocker_driver_name
,
3076 .id_table
= rocker_pci_id_table
,
3077 .probe
= rocker_probe
,
3078 .remove
= rocker_remove
,
3081 /************************************
3082 * Net device notifier event handler
3083 ************************************/
3085 static bool rocker_port_dev_check_under(const struct net_device
*dev
,
3086 struct rocker
*rocker
)
3088 struct rocker_port
*rocker_port
;
3090 if (!rocker_port_dev_check(dev
))
3093 rocker_port
= netdev_priv(dev
);
3094 if (rocker_port
->rocker
!= rocker
)
3100 struct rocker_walk_data
{
3101 struct rocker
*rocker
;
3102 struct rocker_port
*port
;
3105 static int rocker_lower_dev_walk(struct net_device
*lower_dev
,
3106 struct netdev_nested_priv
*priv
)
3108 struct rocker_walk_data
*data
= (struct rocker_walk_data
*)priv
->data
;
3111 if (rocker_port_dev_check_under(lower_dev
, data
->rocker
)) {
3112 data
->port
= netdev_priv(lower_dev
);
3119 struct rocker_port
*rocker_port_dev_lower_find(struct net_device
*dev
,
3120 struct rocker
*rocker
)
3122 struct netdev_nested_priv priv
;
3123 struct rocker_walk_data data
;
3125 if (rocker_port_dev_check_under(dev
, rocker
))
3126 return netdev_priv(dev
);
3128 data
.rocker
= rocker
;
3130 priv
.data
= (void *)&data
;
3131 netdev_walk_all_lower_dev(dev
, rocker_lower_dev_walk
, &priv
);
3136 static int rocker_netdevice_event(struct notifier_block
*unused
,
3137 unsigned long event
, void *ptr
)
3139 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3140 struct netdev_notifier_changeupper_info
*info
;
3141 struct rocker_port
*rocker_port
;
3144 if (!rocker_port_dev_check(dev
))
3148 case NETDEV_CHANGEUPPER
:
3152 rocker_port
= netdev_priv(dev
);
3153 if (info
->linking
) {
3154 err
= rocker_world_port_master_linked(rocker_port
,
3157 netdev_warn(dev
, "failed to reflect master linked (err %d)\n",
3160 err
= rocker_world_port_master_unlinked(rocker_port
,
3163 netdev_warn(dev
, "failed to reflect master unlinked (err %d)\n",
3171 static struct notifier_block rocker_netdevice_nb __read_mostly
= {
3172 .notifier_call
= rocker_netdevice_event
,
3175 /************************************
3176 * Net event notifier event handler
3177 ************************************/
3179 static int rocker_netevent_event(struct notifier_block
*unused
,
3180 unsigned long event
, void *ptr
)
3182 struct rocker_port
*rocker_port
;
3183 struct net_device
*dev
;
3184 struct neighbour
*n
= ptr
;
3188 case NETEVENT_NEIGH_UPDATE
:
3189 if (n
->tbl
!= &arp_tbl
)
3192 if (!rocker_port_dev_check(dev
))
3194 rocker_port
= netdev_priv(dev
);
3195 err
= rocker_world_port_neigh_update(rocker_port
, n
);
3197 netdev_warn(dev
, "failed to handle neigh update (err %d)\n",
3205 static struct notifier_block rocker_netevent_nb __read_mostly
= {
3206 .notifier_call
= rocker_netevent_event
,
3209 /***********************
3210 * Module init and exit
3211 ***********************/
3213 static int __init
rocker_module_init(void)
3217 register_netdevice_notifier(&rocker_netdevice_nb
);
3218 register_netevent_notifier(&rocker_netevent_nb
);
3219 err
= pci_register_driver(&rocker_pci_driver
);
3221 goto err_pci_register_driver
;
3224 err_pci_register_driver
:
3225 unregister_netevent_notifier(&rocker_netevent_nb
);
3226 unregister_netdevice_notifier(&rocker_netdevice_nb
);
3230 static void __exit
rocker_module_exit(void)
3232 unregister_netevent_notifier(&rocker_netevent_nb
);
3233 unregister_netdevice_notifier(&rocker_netdevice_nb
);
3234 pci_unregister_driver(&rocker_pci_driver
);
3237 module_init(rocker_module_init
);
3238 module_exit(rocker_module_exit
);
3240 MODULE_LICENSE("GPL v2");
3241 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3242 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
3243 MODULE_DESCRIPTION("Rocker switch device driver");
3244 MODULE_DEVICE_TABLE(pci
, rocker_pci_id_table
);