1 // SPDX-License-Identifier: GPL-2.0
2 /* niu.c: Neptune ethernet driver.
4 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
26 #include <linux/ipv6.h>
27 #include <linux/log2.h>
28 #include <linux/jiffies.h>
29 #include <linux/crc32.h>
30 #include <linux/list.h>
31 #include <linux/slab.h>
34 #include <linux/of_device.h>
38 #define DRV_MODULE_NAME "niu"
39 #define DRV_MODULE_VERSION "1.1"
40 #define DRV_MODULE_RELDATE "Apr 22, 2010"
42 static char version
[] =
43 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
45 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
46 MODULE_DESCRIPTION("NIU ethernet driver");
47 MODULE_LICENSE("GPL");
48 MODULE_VERSION(DRV_MODULE_VERSION
);
51 static u64
readq(void __iomem
*reg
)
53 return ((u64
) readl(reg
)) | (((u64
) readl(reg
+ 4UL)) << 32);
56 static void writeq(u64 val
, void __iomem
*reg
)
58 writel(val
& 0xffffffff, reg
);
59 writel(val
>> 32, reg
+ 0x4UL
);
63 static const struct pci_device_id niu_pci_tbl
[] = {
64 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
68 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
70 #define NIU_TX_TIMEOUT (5 * HZ)
72 #define nr64(reg) readq(np->regs + (reg))
73 #define nw64(reg, val) writeq((val), np->regs + (reg))
75 #define nr64_mac(reg) readq(np->mac_regs + (reg))
76 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
78 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
79 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
81 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
82 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
84 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
85 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
87 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
90 static int debug
= -1;
91 module_param(debug
, int, 0);
92 MODULE_PARM_DESC(debug
, "NIU debug level");
94 #define niu_lock_parent(np, flags) \
95 spin_lock_irqsave(&np->parent->lock, flags)
96 #define niu_unlock_parent(np, flags) \
97 spin_unlock_irqrestore(&np->parent->lock, flags)
99 static int serdes_init_10g_serdes(struct niu
*np
);
101 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
102 u64 bits
, int limit
, int delay
)
104 while (--limit
>= 0) {
105 u64 val
= nr64_mac(reg
);
116 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
117 u64 bits
, int limit
, int delay
,
118 const char *reg_name
)
123 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
125 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
126 (unsigned long long)bits
, reg_name
,
127 (unsigned long long)nr64_mac(reg
));
131 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
132 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
133 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
136 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
137 u64 bits
, int limit
, int delay
)
139 while (--limit
>= 0) {
140 u64 val
= nr64_ipp(reg
);
151 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
152 u64 bits
, int limit
, int delay
,
153 const char *reg_name
)
162 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
164 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
165 (unsigned long long)bits
, reg_name
,
166 (unsigned long long)nr64_ipp(reg
));
170 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
171 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
172 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
175 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
176 u64 bits
, int limit
, int delay
)
178 while (--limit
>= 0) {
190 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
191 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
192 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
195 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
196 u64 bits
, int limit
, int delay
,
197 const char *reg_name
)
202 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
204 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
205 (unsigned long long)bits
, reg_name
,
206 (unsigned long long)nr64(reg
));
210 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
211 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
212 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
215 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
217 u64 val
= (u64
) lp
->timer
;
220 val
|= LDG_IMGMT_ARM
;
222 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
225 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
227 unsigned long mask_reg
, bits
;
230 if (ldn
< 0 || ldn
> LDN_MAX
)
234 mask_reg
= LD_IM0(ldn
);
237 mask_reg
= LD_IM1(ldn
- 64);
241 val
= nr64(mask_reg
);
251 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
253 struct niu_parent
*parent
= np
->parent
;
256 for (i
= 0; i
<= LDN_MAX
; i
++) {
259 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
262 err
= niu_ldn_irq_enable(np
, i
, on
);
269 static int niu_enable_interrupts(struct niu
*np
, int on
)
273 for (i
= 0; i
< np
->num_ldg
; i
++) {
274 struct niu_ldg
*lp
= &np
->ldg
[i
];
277 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
281 for (i
= 0; i
< np
->num_ldg
; i
++)
282 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
287 static u32
phy_encode(u32 type
, int port
)
289 return type
<< (port
* 2);
292 static u32
phy_decode(u32 val
, int port
)
294 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
297 static int mdio_wait(struct niu
*np
)
302 while (--limit
> 0) {
303 val
= nr64(MIF_FRAME_OUTPUT
);
304 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
305 return val
& MIF_FRAME_OUTPUT_DATA
;
313 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
317 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
322 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
323 return mdio_wait(np
);
326 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
330 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
335 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
343 static int mii_read(struct niu
*np
, int port
, int reg
)
345 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
346 return mdio_wait(np
);
349 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
353 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
361 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
365 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
366 ESR2_TI_PLL_TX_CFG_L(channel
),
369 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
370 ESR2_TI_PLL_TX_CFG_H(channel
),
375 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
379 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
380 ESR2_TI_PLL_RX_CFG_L(channel
),
383 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
384 ESR2_TI_PLL_RX_CFG_H(channel
),
389 /* Mode is always 10G fiber. */
390 static int serdes_init_niu_10g_fiber(struct niu
*np
)
392 struct niu_link_config
*lp
= &np
->link_config
;
396 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
397 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
398 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
399 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
401 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
402 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
404 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
405 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
407 tx_cfg
|= PLL_TX_CFG_ENTEST
;
408 rx_cfg
|= PLL_RX_CFG_ENTEST
;
411 /* Initialize all 4 lanes of the SERDES. */
412 for (i
= 0; i
< 4; i
++) {
413 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
418 for (i
= 0; i
< 4; i
++) {
419 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
427 static int serdes_init_niu_1g_serdes(struct niu
*np
)
429 struct niu_link_config
*lp
= &np
->link_config
;
430 u16 pll_cfg
, pll_sts
;
432 u64
uninitialized_var(sig
), mask
, val
;
437 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
|
438 PLL_TX_CFG_RATE_HALF
);
439 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
440 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
441 PLL_RX_CFG_RATE_HALF
);
444 rx_cfg
|= PLL_RX_CFG_EQ_LP_ADAPTIVE
;
446 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
447 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
449 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
450 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
452 tx_cfg
|= PLL_TX_CFG_ENTEST
;
453 rx_cfg
|= PLL_RX_CFG_ENTEST
;
456 /* Initialize PLL for 1G */
457 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_8X
);
459 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
460 ESR2_TI_PLL_CFG_L
, pll_cfg
);
462 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
467 pll_sts
= PLL_CFG_ENPLL
;
469 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
470 ESR2_TI_PLL_STS_L
, pll_sts
);
472 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
479 /* Initialize all 4 lanes of the SERDES. */
480 for (i
= 0; i
< 4; i
++) {
481 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
486 for (i
= 0; i
< 4; i
++) {
487 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
494 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
499 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
507 while (max_retry
--) {
508 sig
= nr64(ESR_INT_SIGNALS
);
509 if ((sig
& mask
) == val
)
515 if ((sig
& mask
) != val
) {
516 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
517 np
->port
, (int)(sig
& mask
), (int)val
);
524 static int serdes_init_niu_10g_serdes(struct niu
*np
)
526 struct niu_link_config
*lp
= &np
->link_config
;
527 u32 tx_cfg
, rx_cfg
, pll_cfg
, pll_sts
;
529 u64
uninitialized_var(sig
), mask
, val
;
533 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
534 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
535 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
536 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
538 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
539 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
541 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
542 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
544 tx_cfg
|= PLL_TX_CFG_ENTEST
;
545 rx_cfg
|= PLL_RX_CFG_ENTEST
;
548 /* Initialize PLL for 10G */
549 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_10X
);
551 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
552 ESR2_TI_PLL_CFG_L
, pll_cfg
& 0xffff);
554 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
559 pll_sts
= PLL_CFG_ENPLL
;
561 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
562 ESR2_TI_PLL_STS_L
, pll_sts
& 0xffff);
564 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
571 /* Initialize all 4 lanes of the SERDES. */
572 for (i
= 0; i
< 4; i
++) {
573 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
578 for (i
= 0; i
< 4; i
++) {
579 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
584 /* check if serdes is ready */
588 mask
= ESR_INT_SIGNALS_P0_BITS
;
589 val
= (ESR_INT_SRDY0_P0
|
599 mask
= ESR_INT_SIGNALS_P1_BITS
;
600 val
= (ESR_INT_SRDY0_P1
|
613 while (max_retry
--) {
614 sig
= nr64(ESR_INT_SIGNALS
);
615 if ((sig
& mask
) == val
)
621 if ((sig
& mask
) != val
) {
622 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
623 np
->port
, (int)(sig
& mask
), (int)val
);
625 /* 10G failed, try initializing at 1G */
626 err
= serdes_init_niu_1g_serdes(np
);
628 np
->flags
&= ~NIU_FLAGS_10G
;
629 np
->mac_xcvr
= MAC_XCVR_PCS
;
631 netdev_err(np
->dev
, "Port %u 10G/1G SERDES Link Failed\n",
639 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
643 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
645 *val
= (err
& 0xffff);
646 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
647 ESR_RXTX_CTRL_H(chan
));
649 *val
|= ((err
& 0xffff) << 16);
655 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
659 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
660 ESR_GLUE_CTRL0_L(chan
));
662 *val
= (err
& 0xffff);
663 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
664 ESR_GLUE_CTRL0_H(chan
));
666 *val
|= ((err
& 0xffff) << 16);
673 static int esr_read_reset(struct niu
*np
, u32
*val
)
677 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
678 ESR_RXTX_RESET_CTRL_L
);
680 *val
= (err
& 0xffff);
681 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
682 ESR_RXTX_RESET_CTRL_H
);
684 *val
|= ((err
& 0xffff) << 16);
691 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
695 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
696 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
698 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
699 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
703 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
707 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
708 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
710 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
711 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
715 static int esr_reset(struct niu
*np
)
717 u32
uninitialized_var(reset
);
720 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
721 ESR_RXTX_RESET_CTRL_L
, 0x0000);
724 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
725 ESR_RXTX_RESET_CTRL_H
, 0xffff);
730 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
731 ESR_RXTX_RESET_CTRL_L
, 0xffff);
736 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
737 ESR_RXTX_RESET_CTRL_H
, 0x0000);
742 err
= esr_read_reset(np
, &reset
);
746 netdev_err(np
->dev
, "Port %u ESR_RESET did not clear [%08x]\n",
754 static int serdes_init_10g(struct niu
*np
)
756 struct niu_link_config
*lp
= &np
->link_config
;
757 unsigned long ctrl_reg
, test_cfg_reg
, i
;
758 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
763 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
764 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
767 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
768 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
774 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
775 ENET_SERDES_CTRL_SDET_1
|
776 ENET_SERDES_CTRL_SDET_2
|
777 ENET_SERDES_CTRL_SDET_3
|
778 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
779 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
780 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
781 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
782 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
783 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
784 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
785 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
788 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
789 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
790 ENET_SERDES_TEST_MD_0_SHIFT
) |
791 (ENET_TEST_MD_PAD_LOOPBACK
<<
792 ENET_SERDES_TEST_MD_1_SHIFT
) |
793 (ENET_TEST_MD_PAD_LOOPBACK
<<
794 ENET_SERDES_TEST_MD_2_SHIFT
) |
795 (ENET_TEST_MD_PAD_LOOPBACK
<<
796 ENET_SERDES_TEST_MD_3_SHIFT
));
799 nw64(ctrl_reg
, ctrl_val
);
800 nw64(test_cfg_reg
, test_cfg_val
);
802 /* Initialize all 4 lanes of the SERDES. */
803 for (i
= 0; i
< 4; i
++) {
804 u32 rxtx_ctrl
, glue0
;
806 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
809 err
= esr_read_glue0(np
, i
, &glue0
);
813 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
814 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
815 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
817 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
818 ESR_GLUE_CTRL0_THCNT
|
819 ESR_GLUE_CTRL0_BLTIME
);
820 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
821 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
822 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
823 (BLTIME_300_CYCLES
<<
824 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
826 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
829 err
= esr_write_glue0(np
, i
, glue0
);
838 sig
= nr64(ESR_INT_SIGNALS
);
841 mask
= ESR_INT_SIGNALS_P0_BITS
;
842 val
= (ESR_INT_SRDY0_P0
|
852 mask
= ESR_INT_SIGNALS_P1_BITS
;
853 val
= (ESR_INT_SRDY0_P1
|
866 if ((sig
& mask
) != val
) {
867 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
868 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
871 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
872 np
->port
, (int)(sig
& mask
), (int)val
);
875 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
876 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
880 static int serdes_init_1g(struct niu
*np
)
884 val
= nr64(ENET_SERDES_1_PLL_CFG
);
885 val
&= ~ENET_SERDES_PLL_FBDIV2
;
888 val
|= ENET_SERDES_PLL_HRATE0
;
891 val
|= ENET_SERDES_PLL_HRATE1
;
894 val
|= ENET_SERDES_PLL_HRATE2
;
897 val
|= ENET_SERDES_PLL_HRATE3
;
902 nw64(ENET_SERDES_1_PLL_CFG
, val
);
907 static int serdes_init_1g_serdes(struct niu
*np
)
909 struct niu_link_config
*lp
= &np
->link_config
;
910 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
911 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
913 u64 reset_val
, val_rd
;
915 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
916 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
917 ENET_SERDES_PLL_FBDIV0
;
920 reset_val
= ENET_SERDES_RESET_0
;
921 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
922 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
923 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
926 reset_val
= ENET_SERDES_RESET_1
;
927 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
928 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
929 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
935 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
936 ENET_SERDES_CTRL_SDET_1
|
937 ENET_SERDES_CTRL_SDET_2
|
938 ENET_SERDES_CTRL_SDET_3
|
939 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
940 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
941 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
942 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
943 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
944 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
945 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
946 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
949 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
950 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
951 ENET_SERDES_TEST_MD_0_SHIFT
) |
952 (ENET_TEST_MD_PAD_LOOPBACK
<<
953 ENET_SERDES_TEST_MD_1_SHIFT
) |
954 (ENET_TEST_MD_PAD_LOOPBACK
<<
955 ENET_SERDES_TEST_MD_2_SHIFT
) |
956 (ENET_TEST_MD_PAD_LOOPBACK
<<
957 ENET_SERDES_TEST_MD_3_SHIFT
));
960 nw64(ENET_SERDES_RESET
, reset_val
);
962 val_rd
= nr64(ENET_SERDES_RESET
);
963 val_rd
&= ~reset_val
;
965 nw64(ctrl_reg
, ctrl_val
);
966 nw64(test_cfg_reg
, test_cfg_val
);
967 nw64(ENET_SERDES_RESET
, val_rd
);
970 /* Initialize all 4 lanes of the SERDES. */
971 for (i
= 0; i
< 4; i
++) {
972 u32 rxtx_ctrl
, glue0
;
974 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
977 err
= esr_read_glue0(np
, i
, &glue0
);
981 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
982 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
983 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
985 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
986 ESR_GLUE_CTRL0_THCNT
|
987 ESR_GLUE_CTRL0_BLTIME
);
988 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
989 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
990 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
991 (BLTIME_300_CYCLES
<<
992 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
994 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
997 err
= esr_write_glue0(np
, i
, glue0
);
1003 sig
= nr64(ESR_INT_SIGNALS
);
1006 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
1011 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
1019 if ((sig
& mask
) != val
) {
1020 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
1021 np
->port
, (int)(sig
& mask
), (int)val
);
1028 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
1030 struct niu_link_config
*lp
= &np
->link_config
;
1034 unsigned long flags
;
1038 current_speed
= SPEED_INVALID
;
1039 current_duplex
= DUPLEX_INVALID
;
1041 spin_lock_irqsave(&np
->lock
, flags
);
1043 val
= nr64_pcs(PCS_MII_STAT
);
1045 if (val
& PCS_MII_STAT_LINK_STATUS
) {
1047 current_speed
= SPEED_1000
;
1048 current_duplex
= DUPLEX_FULL
;
1051 lp
->active_speed
= current_speed
;
1052 lp
->active_duplex
= current_duplex
;
1053 spin_unlock_irqrestore(&np
->lock
, flags
);
1055 *link_up_p
= link_up
;
1059 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
1061 unsigned long flags
;
1062 struct niu_link_config
*lp
= &np
->link_config
;
1069 if (!(np
->flags
& NIU_FLAGS_10G
))
1070 return link_status_1g_serdes(np
, link_up_p
);
1072 current_speed
= SPEED_INVALID
;
1073 current_duplex
= DUPLEX_INVALID
;
1074 spin_lock_irqsave(&np
->lock
, flags
);
1076 val
= nr64_xpcs(XPCS_STATUS(0));
1077 val2
= nr64_mac(XMAC_INTER2
);
1078 if (val2
& 0x01000000)
1081 if ((val
& 0x1000ULL
) && link_ok
) {
1083 current_speed
= SPEED_10000
;
1084 current_duplex
= DUPLEX_FULL
;
1086 lp
->active_speed
= current_speed
;
1087 lp
->active_duplex
= current_duplex
;
1088 spin_unlock_irqrestore(&np
->lock
, flags
);
1089 *link_up_p
= link_up
;
1093 static int link_status_mii(struct niu
*np
, int *link_up_p
)
1095 struct niu_link_config
*lp
= &np
->link_config
;
1097 int bmsr
, advert
, ctrl1000
, stat1000
, lpa
, bmcr
, estatus
;
1098 int supported
, advertising
, active_speed
, active_duplex
;
1100 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1101 if (unlikely(err
< 0))
1105 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1106 if (unlikely(err
< 0))
1110 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1111 if (unlikely(err
< 0))
1115 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1116 if (unlikely(err
< 0))
1120 if (likely(bmsr
& BMSR_ESTATEN
)) {
1121 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1122 if (unlikely(err
< 0))
1126 err
= mii_read(np
, np
->phy_addr
, MII_CTRL1000
);
1127 if (unlikely(err
< 0))
1131 err
= mii_read(np
, np
->phy_addr
, MII_STAT1000
);
1132 if (unlikely(err
< 0))
1136 estatus
= ctrl1000
= stat1000
= 0;
1139 if (bmsr
& BMSR_ANEGCAPABLE
)
1140 supported
|= SUPPORTED_Autoneg
;
1141 if (bmsr
& BMSR_10HALF
)
1142 supported
|= SUPPORTED_10baseT_Half
;
1143 if (bmsr
& BMSR_10FULL
)
1144 supported
|= SUPPORTED_10baseT_Full
;
1145 if (bmsr
& BMSR_100HALF
)
1146 supported
|= SUPPORTED_100baseT_Half
;
1147 if (bmsr
& BMSR_100FULL
)
1148 supported
|= SUPPORTED_100baseT_Full
;
1149 if (estatus
& ESTATUS_1000_THALF
)
1150 supported
|= SUPPORTED_1000baseT_Half
;
1151 if (estatus
& ESTATUS_1000_TFULL
)
1152 supported
|= SUPPORTED_1000baseT_Full
;
1153 lp
->supported
= supported
;
1155 advertising
= mii_adv_to_ethtool_adv_t(advert
);
1156 advertising
|= mii_ctrl1000_to_ethtool_adv_t(ctrl1000
);
1158 if (bmcr
& BMCR_ANENABLE
) {
1161 lp
->active_autoneg
= 1;
1162 advertising
|= ADVERTISED_Autoneg
;
1165 neg1000
= (ctrl1000
<< 2) & stat1000
;
1167 if (neg1000
& (LPA_1000FULL
| LPA_1000HALF
))
1168 active_speed
= SPEED_1000
;
1169 else if (neg
& LPA_100
)
1170 active_speed
= SPEED_100
;
1171 else if (neg
& (LPA_10HALF
| LPA_10FULL
))
1172 active_speed
= SPEED_10
;
1174 active_speed
= SPEED_INVALID
;
1176 if ((neg1000
& LPA_1000FULL
) || (neg
& LPA_DUPLEX
))
1177 active_duplex
= DUPLEX_FULL
;
1178 else if (active_speed
!= SPEED_INVALID
)
1179 active_duplex
= DUPLEX_HALF
;
1181 active_duplex
= DUPLEX_INVALID
;
1183 lp
->active_autoneg
= 0;
1185 if ((bmcr
& BMCR_SPEED1000
) && !(bmcr
& BMCR_SPEED100
))
1186 active_speed
= SPEED_1000
;
1187 else if (bmcr
& BMCR_SPEED100
)
1188 active_speed
= SPEED_100
;
1190 active_speed
= SPEED_10
;
1192 if (bmcr
& BMCR_FULLDPLX
)
1193 active_duplex
= DUPLEX_FULL
;
1195 active_duplex
= DUPLEX_HALF
;
1198 lp
->active_advertising
= advertising
;
1199 lp
->active_speed
= active_speed
;
1200 lp
->active_duplex
= active_duplex
;
1201 *link_up_p
= !!(bmsr
& BMSR_LSTATUS
);
1206 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
1208 struct niu_link_config
*lp
= &np
->link_config
;
1209 u16 current_speed
, bmsr
;
1210 unsigned long flags
;
1215 current_speed
= SPEED_INVALID
;
1216 current_duplex
= DUPLEX_INVALID
;
1218 spin_lock_irqsave(&np
->lock
, flags
);
1222 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1227 if (bmsr
& BMSR_LSTATUS
) {
1230 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1235 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1240 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1244 current_speed
= SPEED_1000
;
1245 current_duplex
= DUPLEX_FULL
;
1248 lp
->active_speed
= current_speed
;
1249 lp
->active_duplex
= current_duplex
;
1253 spin_unlock_irqrestore(&np
->lock
, flags
);
1255 *link_up_p
= link_up
;
1259 static int link_status_1g(struct niu
*np
, int *link_up_p
)
1261 struct niu_link_config
*lp
= &np
->link_config
;
1262 unsigned long flags
;
1265 spin_lock_irqsave(&np
->lock
, flags
);
1267 err
= link_status_mii(np
, link_up_p
);
1268 lp
->supported
|= SUPPORTED_TP
;
1269 lp
->active_advertising
|= ADVERTISED_TP
;
1271 spin_unlock_irqrestore(&np
->lock
, flags
);
1275 static int bcm8704_reset(struct niu
*np
)
1279 err
= mdio_read(np
, np
->phy_addr
,
1280 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1281 if (err
< 0 || err
== 0xffff)
1284 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1290 while (--limit
>= 0) {
1291 err
= mdio_read(np
, np
->phy_addr
,
1292 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1295 if (!(err
& BMCR_RESET
))
1299 netdev_err(np
->dev
, "Port %u PHY will not reset (bmcr=%04x)\n",
1300 np
->port
, (err
& 0xffff));
1306 /* When written, certain PHY registers need to be read back twice
1307 * in order for the bits to settle properly.
1309 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
1311 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1314 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1320 static int bcm8706_init_user_dev3(struct niu
*np
)
1325 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1326 BCM8704_USER_OPT_DIGITAL_CTRL
);
1329 err
&= ~USER_ODIG_CTRL_GPIOS
;
1330 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1331 err
|= USER_ODIG_CTRL_RESV2
;
1332 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1333 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1342 static int bcm8704_init_user_dev3(struct niu
*np
)
1346 err
= mdio_write(np
, np
->phy_addr
,
1347 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1348 (USER_CONTROL_OPTXRST_LVL
|
1349 USER_CONTROL_OPBIASFLT_LVL
|
1350 USER_CONTROL_OBTMPFLT_LVL
|
1351 USER_CONTROL_OPPRFLT_LVL
|
1352 USER_CONTROL_OPTXFLT_LVL
|
1353 USER_CONTROL_OPRXLOS_LVL
|
1354 USER_CONTROL_OPRXFLT_LVL
|
1355 USER_CONTROL_OPTXON_LVL
|
1356 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1360 err
= mdio_write(np
, np
->phy_addr
,
1361 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1362 (USER_PMD_TX_CTL_XFP_CLKEN
|
1363 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1364 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1365 USER_PMD_TX_CTL_TSCK_LPWREN
));
1369 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1372 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1376 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1377 BCM8704_USER_OPT_DIGITAL_CTRL
);
1380 err
&= ~USER_ODIG_CTRL_GPIOS
;
1381 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1382 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1383 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1392 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1396 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1397 MRVL88X2011_LED_8_TO_11_CTL
);
1401 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1402 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1404 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1405 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1408 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1412 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1413 MRVL88X2011_LED_BLINK_CTL
);
1415 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1418 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1419 MRVL88X2011_LED_BLINK_CTL
, err
);
1425 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1429 /* Set LED functions */
1430 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1435 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1439 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1440 MRVL88X2011_GENERAL_CTL
);
1444 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1446 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1447 MRVL88X2011_GENERAL_CTL
, err
);
1451 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1452 MRVL88X2011_PMA_PMD_CTL_1
);
1456 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1457 err
|= MRVL88X2011_LOOPBACK
;
1459 err
&= ~MRVL88X2011_LOOPBACK
;
1461 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1462 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1467 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1468 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1472 static int xcvr_diag_bcm870x(struct niu
*np
)
1474 u16 analog_stat0
, tx_alarm_status
;
1478 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1482 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np
->port
, err
);
1484 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1487 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np
->port
, err
);
1489 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1493 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np
->port
, err
);
1496 /* XXX dig this out it might not be so useful XXX */
1497 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1498 BCM8704_USER_ANALOG_STATUS0
);
1501 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1502 BCM8704_USER_ANALOG_STATUS0
);
1507 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1508 BCM8704_USER_TX_ALARM_STATUS
);
1511 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1512 BCM8704_USER_TX_ALARM_STATUS
);
1515 tx_alarm_status
= err
;
1517 if (analog_stat0
!= 0x03fc) {
1518 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1519 pr_info("Port %u cable not connected or bad cable\n",
1521 } else if (analog_stat0
== 0x639c) {
1522 pr_info("Port %u optical module is bad or missing\n",
1530 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1532 struct niu_link_config
*lp
= &np
->link_config
;
1535 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1540 err
&= ~BMCR_LOOPBACK
;
1542 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1543 err
|= BMCR_LOOPBACK
;
1545 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1553 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1558 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1559 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1562 val
= nr64_mac(XMAC_CONFIG
);
1563 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1564 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1565 nw64_mac(XMAC_CONFIG
, val
);
1567 val
= nr64(MIF_CONFIG
);
1568 val
|= MIF_CONFIG_INDIRECT_MODE
;
1569 nw64(MIF_CONFIG
, val
);
1571 err
= bcm8704_reset(np
);
1575 err
= xcvr_10g_set_lb_bcm870x(np
);
1579 err
= bcm8706_init_user_dev3(np
);
1583 err
= xcvr_diag_bcm870x(np
);
1590 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1594 err
= bcm8704_reset(np
);
1598 err
= bcm8704_init_user_dev3(np
);
1602 err
= xcvr_10g_set_lb_bcm870x(np
);
1606 err
= xcvr_diag_bcm870x(np
);
1613 static int xcvr_init_10g(struct niu
*np
)
1618 val
= nr64_mac(XMAC_CONFIG
);
1619 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1620 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1621 nw64_mac(XMAC_CONFIG
, val
);
1623 /* XXX shared resource, lock parent XXX */
1624 val
= nr64(MIF_CONFIG
);
1625 val
|= MIF_CONFIG_INDIRECT_MODE
;
1626 nw64(MIF_CONFIG
, val
);
1628 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1629 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1631 /* handle different phy types */
1632 switch (phy_id
& NIU_PHY_ID_MASK
) {
1633 case NIU_PHY_ID_MRVL88X2011
:
1634 err
= xcvr_init_10g_mrvl88x2011(np
);
1637 default: /* bcom 8704 */
1638 err
= xcvr_init_10g_bcm8704(np
);
1645 static int mii_reset(struct niu
*np
)
1649 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1654 while (--limit
>= 0) {
1656 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1659 if (!(err
& BMCR_RESET
))
1663 netdev_err(np
->dev
, "Port %u MII would not reset, bmcr[%04x]\n",
1671 static int xcvr_init_1g_rgmii(struct niu
*np
)
1675 u16 bmcr
, bmsr
, estat
;
1677 val
= nr64(MIF_CONFIG
);
1678 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1679 nw64(MIF_CONFIG
, val
);
1681 err
= mii_reset(np
);
1685 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1691 if (bmsr
& BMSR_ESTATEN
) {
1692 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1699 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1703 if (bmsr
& BMSR_ESTATEN
) {
1706 if (estat
& ESTATUS_1000_TFULL
)
1707 ctrl1000
|= ADVERTISE_1000FULL
;
1708 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1713 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1715 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1719 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1722 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1724 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1731 static int mii_init_common(struct niu
*np
)
1733 struct niu_link_config
*lp
= &np
->link_config
;
1734 u16 bmcr
, bmsr
, adv
, estat
;
1737 err
= mii_reset(np
);
1741 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1747 if (bmsr
& BMSR_ESTATEN
) {
1748 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1755 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1759 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1760 bmcr
|= BMCR_LOOPBACK
;
1761 if (lp
->active_speed
== SPEED_1000
)
1762 bmcr
|= BMCR_SPEED1000
;
1763 if (lp
->active_duplex
== DUPLEX_FULL
)
1764 bmcr
|= BMCR_FULLDPLX
;
1767 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1770 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1771 BCM5464R_AUX_CTL_WRITE_1
);
1772 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1780 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1781 if ((bmsr
& BMSR_10HALF
) &&
1782 (lp
->advertising
& ADVERTISED_10baseT_Half
))
1783 adv
|= ADVERTISE_10HALF
;
1784 if ((bmsr
& BMSR_10FULL
) &&
1785 (lp
->advertising
& ADVERTISED_10baseT_Full
))
1786 adv
|= ADVERTISE_10FULL
;
1787 if ((bmsr
& BMSR_100HALF
) &&
1788 (lp
->advertising
& ADVERTISED_100baseT_Half
))
1789 adv
|= ADVERTISE_100HALF
;
1790 if ((bmsr
& BMSR_100FULL
) &&
1791 (lp
->advertising
& ADVERTISED_100baseT_Full
))
1792 adv
|= ADVERTISE_100FULL
;
1793 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1797 if (likely(bmsr
& BMSR_ESTATEN
)) {
1799 if ((estat
& ESTATUS_1000_THALF
) &&
1800 (lp
->advertising
& ADVERTISED_1000baseT_Half
))
1801 ctrl1000
|= ADVERTISE_1000HALF
;
1802 if ((estat
& ESTATUS_1000_TFULL
) &&
1803 (lp
->advertising
& ADVERTISED_1000baseT_Full
))
1804 ctrl1000
|= ADVERTISE_1000FULL
;
1805 err
= mii_write(np
, np
->phy_addr
,
1806 MII_CTRL1000
, ctrl1000
);
1811 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1816 if (lp
->duplex
== DUPLEX_FULL
) {
1817 bmcr
|= BMCR_FULLDPLX
;
1819 } else if (lp
->duplex
== DUPLEX_HALF
)
1824 if (lp
->speed
== SPEED_1000
) {
1825 /* if X-full requested while not supported, or
1826 X-half requested while not supported... */
1827 if ((fulldpx
&& !(estat
& ESTATUS_1000_TFULL
)) ||
1828 (!fulldpx
&& !(estat
& ESTATUS_1000_THALF
)))
1830 bmcr
|= BMCR_SPEED1000
;
1831 } else if (lp
->speed
== SPEED_100
) {
1832 if ((fulldpx
&& !(bmsr
& BMSR_100FULL
)) ||
1833 (!fulldpx
&& !(bmsr
& BMSR_100HALF
)))
1835 bmcr
|= BMCR_SPEED100
;
1836 } else if (lp
->speed
== SPEED_10
) {
1837 if ((fulldpx
&& !(bmsr
& BMSR_10FULL
)) ||
1838 (!fulldpx
&& !(bmsr
& BMSR_10HALF
)))
1844 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1849 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1854 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1859 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1860 np
->port
, bmcr
, bmsr
);
1866 static int xcvr_init_1g(struct niu
*np
)
1870 /* XXX shared resource, lock parent XXX */
1871 val
= nr64(MIF_CONFIG
);
1872 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1873 nw64(MIF_CONFIG
, val
);
1875 return mii_init_common(np
);
1878 static int niu_xcvr_init(struct niu
*np
)
1880 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1885 err
= ops
->xcvr_init(np
);
1890 static int niu_serdes_init(struct niu
*np
)
1892 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1896 if (ops
->serdes_init
)
1897 err
= ops
->serdes_init(np
);
1902 static void niu_init_xif(struct niu
*);
1903 static void niu_handle_led(struct niu
*, int status
);
1905 static int niu_link_status_common(struct niu
*np
, int link_up
)
1907 struct niu_link_config
*lp
= &np
->link_config
;
1908 struct net_device
*dev
= np
->dev
;
1909 unsigned long flags
;
1911 if (!netif_carrier_ok(dev
) && link_up
) {
1912 netif_info(np
, link
, dev
, "Link is up at %s, %s duplex\n",
1913 lp
->active_speed
== SPEED_10000
? "10Gb/sec" :
1914 lp
->active_speed
== SPEED_1000
? "1Gb/sec" :
1915 lp
->active_speed
== SPEED_100
? "100Mbit/sec" :
1917 lp
->active_duplex
== DUPLEX_FULL
? "full" : "half");
1919 spin_lock_irqsave(&np
->lock
, flags
);
1921 niu_handle_led(np
, 1);
1922 spin_unlock_irqrestore(&np
->lock
, flags
);
1924 netif_carrier_on(dev
);
1925 } else if (netif_carrier_ok(dev
) && !link_up
) {
1926 netif_warn(np
, link
, dev
, "Link is down\n");
1927 spin_lock_irqsave(&np
->lock
, flags
);
1928 niu_handle_led(np
, 0);
1929 spin_unlock_irqrestore(&np
->lock
, flags
);
1930 netif_carrier_off(dev
);
1936 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1938 int err
, link_up
, pma_status
, pcs_status
;
1942 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1943 MRVL88X2011_10G_PMD_STATUS_2
);
1947 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1948 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1949 MRVL88X2011_PMA_PMD_STATUS_1
);
1953 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1955 /* Check PMC Register : 3.0001.2 == 1: read twice */
1956 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1957 MRVL88X2011_PMA_PMD_STATUS_1
);
1961 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1962 MRVL88X2011_PMA_PMD_STATUS_1
);
1966 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1968 /* Check XGXS Register : 4.0018.[0-3,12] */
1969 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
1970 MRVL88X2011_10G_XGXS_LANE_STAT
);
1974 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
1975 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
1976 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
1978 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
1980 np
->link_config
.active_speed
= SPEED_10000
;
1981 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1984 mrvl88x2011_act_led(np
, (link_up
?
1985 MRVL88X2011_LED_CTL_PCS_ACT
:
1986 MRVL88X2011_LED_CTL_OFF
));
1988 *link_up_p
= link_up
;
1992 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
1997 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1998 BCM8704_PMD_RCV_SIGDET
);
1999 if (err
< 0 || err
== 0xffff)
2001 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2006 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2007 BCM8704_PCS_10G_R_STATUS
);
2011 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2016 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2017 BCM8704_PHYXS_XGXS_LANE_STAT
);
2020 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2021 PHYXS_XGXS_LANE_STAT_MAGIC
|
2022 PHYXS_XGXS_LANE_STAT_PATTEST
|
2023 PHYXS_XGXS_LANE_STAT_LANE3
|
2024 PHYXS_XGXS_LANE_STAT_LANE2
|
2025 PHYXS_XGXS_LANE_STAT_LANE1
|
2026 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2028 np
->link_config
.active_speed
= SPEED_INVALID
;
2029 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
2034 np
->link_config
.active_speed
= SPEED_10000
;
2035 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2039 *link_up_p
= link_up
;
2043 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
2049 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2050 BCM8704_PMD_RCV_SIGDET
);
2053 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2058 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2059 BCM8704_PCS_10G_R_STATUS
);
2062 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2067 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2068 BCM8704_PHYXS_XGXS_LANE_STAT
);
2072 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2073 PHYXS_XGXS_LANE_STAT_MAGIC
|
2074 PHYXS_XGXS_LANE_STAT_LANE3
|
2075 PHYXS_XGXS_LANE_STAT_LANE2
|
2076 PHYXS_XGXS_LANE_STAT_LANE1
|
2077 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2083 np
->link_config
.active_speed
= SPEED_10000
;
2084 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2088 *link_up_p
= link_up
;
2092 static int link_status_10g(struct niu
*np
, int *link_up_p
)
2094 unsigned long flags
;
2097 spin_lock_irqsave(&np
->lock
, flags
);
2099 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2102 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
2103 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
2105 /* handle different phy types */
2106 switch (phy_id
& NIU_PHY_ID_MASK
) {
2107 case NIU_PHY_ID_MRVL88X2011
:
2108 err
= link_status_10g_mrvl(np
, link_up_p
);
2111 default: /* bcom 8704 */
2112 err
= link_status_10g_bcom(np
, link_up_p
);
2117 spin_unlock_irqrestore(&np
->lock
, flags
);
2122 static int niu_10g_phy_present(struct niu
*np
)
2126 sig
= nr64(ESR_INT_SIGNALS
);
2129 mask
= ESR_INT_SIGNALS_P0_BITS
;
2130 val
= (ESR_INT_SRDY0_P0
|
2133 ESR_INT_XDP_P0_CH3
|
2134 ESR_INT_XDP_P0_CH2
|
2135 ESR_INT_XDP_P0_CH1
|
2136 ESR_INT_XDP_P0_CH0
);
2140 mask
= ESR_INT_SIGNALS_P1_BITS
;
2141 val
= (ESR_INT_SRDY0_P1
|
2144 ESR_INT_XDP_P1_CH3
|
2145 ESR_INT_XDP_P1_CH2
|
2146 ESR_INT_XDP_P1_CH1
|
2147 ESR_INT_XDP_P1_CH0
);
2154 if ((sig
& mask
) != val
)
2159 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
2161 unsigned long flags
;
2164 int phy_present_prev
;
2166 spin_lock_irqsave(&np
->lock
, flags
);
2168 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2169 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
2171 phy_present
= niu_10g_phy_present(np
);
2172 if (phy_present
!= phy_present_prev
) {
2175 /* A NEM was just plugged in */
2176 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2177 if (np
->phy_ops
->xcvr_init
)
2178 err
= np
->phy_ops
->xcvr_init(np
);
2180 err
= mdio_read(np
, np
->phy_addr
,
2181 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
2182 if (err
== 0xffff) {
2183 /* No mdio, back-to-back XAUI */
2187 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2190 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2192 netif_warn(np
, link
, np
->dev
,
2193 "Hotplug PHY Removed\n");
2197 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) {
2198 err
= link_status_10g_bcm8706(np
, link_up_p
);
2199 if (err
== 0xffff) {
2200 /* No mdio, back-to-back XAUI: it is C10NEM */
2202 np
->link_config
.active_speed
= SPEED_10000
;
2203 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2208 spin_unlock_irqrestore(&np
->lock
, flags
);
2213 static int niu_link_status(struct niu
*np
, int *link_up_p
)
2215 const struct niu_phy_ops
*ops
= np
->phy_ops
;
2219 if (ops
->link_status
)
2220 err
= ops
->link_status(np
, link_up_p
);
2225 static void niu_timer(struct timer_list
*t
)
2227 struct niu
*np
= from_timer(np
, t
, timer
);
2231 err
= niu_link_status(np
, &link_up
);
2233 niu_link_status_common(np
, link_up
);
2235 if (netif_carrier_ok(np
->dev
))
2239 np
->timer
.expires
= jiffies
+ off
;
2241 add_timer(&np
->timer
);
2244 static const struct niu_phy_ops phy_ops_10g_serdes
= {
2245 .serdes_init
= serdes_init_10g_serdes
,
2246 .link_status
= link_status_10g_serdes
,
2249 static const struct niu_phy_ops phy_ops_10g_serdes_niu
= {
2250 .serdes_init
= serdes_init_niu_10g_serdes
,
2251 .link_status
= link_status_10g_serdes
,
2254 static const struct niu_phy_ops phy_ops_1g_serdes_niu
= {
2255 .serdes_init
= serdes_init_niu_1g_serdes
,
2256 .link_status
= link_status_1g_serdes
,
2259 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
2260 .xcvr_init
= xcvr_init_1g_rgmii
,
2261 .link_status
= link_status_1g_rgmii
,
2264 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
2265 .serdes_init
= serdes_init_niu_10g_fiber
,
2266 .xcvr_init
= xcvr_init_10g
,
2267 .link_status
= link_status_10g
,
2270 static const struct niu_phy_ops phy_ops_10g_fiber
= {
2271 .serdes_init
= serdes_init_10g
,
2272 .xcvr_init
= xcvr_init_10g
,
2273 .link_status
= link_status_10g
,
2276 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
2277 .serdes_init
= serdes_init_10g
,
2278 .xcvr_init
= xcvr_init_10g_bcm8706
,
2279 .link_status
= link_status_10g_hotplug
,
2282 static const struct niu_phy_ops phy_ops_niu_10g_hotplug
= {
2283 .serdes_init
= serdes_init_niu_10g_fiber
,
2284 .xcvr_init
= xcvr_init_10g_bcm8706
,
2285 .link_status
= link_status_10g_hotplug
,
2288 static const struct niu_phy_ops phy_ops_10g_copper
= {
2289 .serdes_init
= serdes_init_10g
,
2290 .link_status
= link_status_10g
, /* XXX */
2293 static const struct niu_phy_ops phy_ops_1g_fiber
= {
2294 .serdes_init
= serdes_init_1g
,
2295 .xcvr_init
= xcvr_init_1g
,
2296 .link_status
= link_status_1g
,
2299 static const struct niu_phy_ops phy_ops_1g_copper
= {
2300 .xcvr_init
= xcvr_init_1g
,
2301 .link_status
= link_status_1g
,
2304 struct niu_phy_template
{
2305 const struct niu_phy_ops
*ops
;
2309 static const struct niu_phy_template phy_template_niu_10g_fiber
= {
2310 .ops
= &phy_ops_10g_fiber_niu
,
2311 .phy_addr_base
= 16,
2314 static const struct niu_phy_template phy_template_niu_10g_serdes
= {
2315 .ops
= &phy_ops_10g_serdes_niu
,
2319 static const struct niu_phy_template phy_template_niu_1g_serdes
= {
2320 .ops
= &phy_ops_1g_serdes_niu
,
2324 static const struct niu_phy_template phy_template_10g_fiber
= {
2325 .ops
= &phy_ops_10g_fiber
,
2329 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2330 .ops
= &phy_ops_10g_fiber_hotplug
,
2334 static const struct niu_phy_template phy_template_niu_10g_hotplug
= {
2335 .ops
= &phy_ops_niu_10g_hotplug
,
2339 static const struct niu_phy_template phy_template_10g_copper
= {
2340 .ops
= &phy_ops_10g_copper
,
2341 .phy_addr_base
= 10,
2344 static const struct niu_phy_template phy_template_1g_fiber
= {
2345 .ops
= &phy_ops_1g_fiber
,
2349 static const struct niu_phy_template phy_template_1g_copper
= {
2350 .ops
= &phy_ops_1g_copper
,
2354 static const struct niu_phy_template phy_template_1g_rgmii
= {
2355 .ops
= &phy_ops_1g_rgmii
,
2359 static const struct niu_phy_template phy_template_10g_serdes
= {
2360 .ops
= &phy_ops_10g_serdes
,
2364 static int niu_atca_port_num
[4] = {
2368 static int serdes_init_10g_serdes(struct niu
*np
)
2370 struct niu_link_config
*lp
= &np
->link_config
;
2371 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2372 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2376 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2377 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2378 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2381 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2382 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2383 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2389 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2390 ENET_SERDES_CTRL_SDET_1
|
2391 ENET_SERDES_CTRL_SDET_2
|
2392 ENET_SERDES_CTRL_SDET_3
|
2393 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2394 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2395 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2396 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2397 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2398 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2399 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2400 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2403 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2404 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2405 ENET_SERDES_TEST_MD_0_SHIFT
) |
2406 (ENET_TEST_MD_PAD_LOOPBACK
<<
2407 ENET_SERDES_TEST_MD_1_SHIFT
) |
2408 (ENET_TEST_MD_PAD_LOOPBACK
<<
2409 ENET_SERDES_TEST_MD_2_SHIFT
) |
2410 (ENET_TEST_MD_PAD_LOOPBACK
<<
2411 ENET_SERDES_TEST_MD_3_SHIFT
));
2415 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2416 nw64(ctrl_reg
, ctrl_val
);
2417 nw64(test_cfg_reg
, test_cfg_val
);
2419 /* Initialize all 4 lanes of the SERDES. */
2420 for (i
= 0; i
< 4; i
++) {
2421 u32 rxtx_ctrl
, glue0
;
2424 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2427 err
= esr_read_glue0(np
, i
, &glue0
);
2431 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2432 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2433 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2435 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2436 ESR_GLUE_CTRL0_THCNT
|
2437 ESR_GLUE_CTRL0_BLTIME
);
2438 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2439 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2440 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2441 (BLTIME_300_CYCLES
<<
2442 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2444 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2447 err
= esr_write_glue0(np
, i
, glue0
);
2453 sig
= nr64(ESR_INT_SIGNALS
);
2456 mask
= ESR_INT_SIGNALS_P0_BITS
;
2457 val
= (ESR_INT_SRDY0_P0
|
2460 ESR_INT_XDP_P0_CH3
|
2461 ESR_INT_XDP_P0_CH2
|
2462 ESR_INT_XDP_P0_CH1
|
2463 ESR_INT_XDP_P0_CH0
);
2467 mask
= ESR_INT_SIGNALS_P1_BITS
;
2468 val
= (ESR_INT_SRDY0_P1
|
2471 ESR_INT_XDP_P1_CH3
|
2472 ESR_INT_XDP_P1_CH2
|
2473 ESR_INT_XDP_P1_CH1
|
2474 ESR_INT_XDP_P1_CH0
);
2481 if ((sig
& mask
) != val
) {
2483 err
= serdes_init_1g_serdes(np
);
2485 np
->flags
&= ~NIU_FLAGS_10G
;
2486 np
->mac_xcvr
= MAC_XCVR_PCS
;
2488 netdev_err(np
->dev
, "Port %u 10G/1G SERDES Link Failed\n",
2497 static int niu_determine_phy_disposition(struct niu
*np
)
2499 struct niu_parent
*parent
= np
->parent
;
2500 u8 plat_type
= parent
->plat_type
;
2501 const struct niu_phy_template
*tp
;
2502 u32 phy_addr_off
= 0;
2504 if (plat_type
== PLAT_TYPE_NIU
) {
2508 NIU_FLAGS_XCVR_SERDES
)) {
2509 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2511 tp
= &phy_template_niu_10g_serdes
;
2513 case NIU_FLAGS_XCVR_SERDES
:
2515 tp
= &phy_template_niu_1g_serdes
;
2517 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2520 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2521 tp
= &phy_template_niu_10g_hotplug
;
2527 tp
= &phy_template_niu_10g_fiber
;
2528 phy_addr_off
+= np
->port
;
2536 NIU_FLAGS_XCVR_SERDES
)) {
2539 tp
= &phy_template_1g_copper
;
2540 if (plat_type
== PLAT_TYPE_VF_P0
)
2542 else if (plat_type
== PLAT_TYPE_VF_P1
)
2545 phy_addr_off
+= (np
->port
^ 0x3);
2550 tp
= &phy_template_10g_copper
;
2553 case NIU_FLAGS_FIBER
:
2555 tp
= &phy_template_1g_fiber
;
2558 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2560 tp
= &phy_template_10g_fiber
;
2561 if (plat_type
== PLAT_TYPE_VF_P0
||
2562 plat_type
== PLAT_TYPE_VF_P1
)
2564 phy_addr_off
+= np
->port
;
2565 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2566 tp
= &phy_template_10g_fiber_hotplug
;
2574 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2575 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2576 case NIU_FLAGS_XCVR_SERDES
:
2580 tp
= &phy_template_10g_serdes
;
2584 tp
= &phy_template_1g_rgmii
;
2589 phy_addr_off
= niu_atca_port_num
[np
->port
];
2597 np
->phy_ops
= tp
->ops
;
2598 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2603 static int niu_init_link(struct niu
*np
)
2605 struct niu_parent
*parent
= np
->parent
;
2608 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2609 err
= niu_xcvr_init(np
);
2614 err
= niu_serdes_init(np
);
2615 if (err
&& !(np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2618 err
= niu_xcvr_init(np
);
2619 if (!err
|| (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2620 niu_link_status(np
, &ignore
);
2624 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2626 u16 reg0
= addr
[4] << 8 | addr
[5];
2627 u16 reg1
= addr
[2] << 8 | addr
[3];
2628 u16 reg2
= addr
[0] << 8 | addr
[1];
2630 if (np
->flags
& NIU_FLAGS_XMAC
) {
2631 nw64_mac(XMAC_ADDR0
, reg0
);
2632 nw64_mac(XMAC_ADDR1
, reg1
);
2633 nw64_mac(XMAC_ADDR2
, reg2
);
2635 nw64_mac(BMAC_ADDR0
, reg0
);
2636 nw64_mac(BMAC_ADDR1
, reg1
);
2637 nw64_mac(BMAC_ADDR2
, reg2
);
2641 static int niu_num_alt_addr(struct niu
*np
)
2643 if (np
->flags
& NIU_FLAGS_XMAC
)
2644 return XMAC_NUM_ALT_ADDR
;
2646 return BMAC_NUM_ALT_ADDR
;
2649 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2651 u16 reg0
= addr
[4] << 8 | addr
[5];
2652 u16 reg1
= addr
[2] << 8 | addr
[3];
2653 u16 reg2
= addr
[0] << 8 | addr
[1];
2655 if (index
>= niu_num_alt_addr(np
))
2658 if (np
->flags
& NIU_FLAGS_XMAC
) {
2659 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2660 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2661 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2663 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2664 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2665 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2671 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2676 if (index
>= niu_num_alt_addr(np
))
2679 if (np
->flags
& NIU_FLAGS_XMAC
) {
2680 reg
= XMAC_ADDR_CMPEN
;
2683 reg
= BMAC_ADDR_CMPEN
;
2684 mask
= 1 << (index
+ 1);
2687 val
= nr64_mac(reg
);
2697 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2698 int num
, int mac_pref
)
2700 u64 val
= nr64_mac(reg
);
2701 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2704 val
|= HOST_INFO_MPR
;
2708 static int __set_rdc_table_num(struct niu
*np
,
2709 int xmac_index
, int bmac_index
,
2710 int rdc_table_num
, int mac_pref
)
2714 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2716 if (np
->flags
& NIU_FLAGS_XMAC
)
2717 reg
= XMAC_HOST_INFO(xmac_index
);
2719 reg
= BMAC_HOST_INFO(bmac_index
);
2720 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2724 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2727 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2730 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2733 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2736 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2737 int table_num
, int mac_pref
)
2739 if (idx
>= niu_num_alt_addr(np
))
2741 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2744 static u64
vlan_entry_set_parity(u64 reg_val
)
2749 port01_mask
= 0x00ff;
2750 port23_mask
= 0xff00;
2752 if (hweight64(reg_val
& port01_mask
) & 1)
2753 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2755 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2757 if (hweight64(reg_val
& port23_mask
) & 1)
2758 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2760 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2765 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2766 int port
, int vpr
, int rdc_table
)
2768 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2770 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2771 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2772 ENET_VLAN_TBL_SHIFT(port
));
2774 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2775 ENET_VLAN_TBL_SHIFT(port
));
2776 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2778 reg_val
= vlan_entry_set_parity(reg_val
);
2780 nw64(ENET_VLAN_TBL(index
), reg_val
);
2783 static void vlan_tbl_clear(struct niu
*np
)
2787 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2788 nw64(ENET_VLAN_TBL(i
), 0);
2791 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2795 while (--limit
> 0) {
2796 if (nr64(TCAM_CTL
) & bit
)
2806 static int tcam_flush(struct niu
*np
, int index
)
2808 nw64(TCAM_KEY_0
, 0x00);
2809 nw64(TCAM_KEY_MASK_0
, 0xff);
2810 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2812 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2816 static int tcam_read(struct niu
*np
, int index
,
2817 u64
*key
, u64
*mask
)
2821 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2822 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2824 key
[0] = nr64(TCAM_KEY_0
);
2825 key
[1] = nr64(TCAM_KEY_1
);
2826 key
[2] = nr64(TCAM_KEY_2
);
2827 key
[3] = nr64(TCAM_KEY_3
);
2828 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2829 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2830 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2831 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2837 static int tcam_write(struct niu
*np
, int index
,
2838 u64
*key
, u64
*mask
)
2840 nw64(TCAM_KEY_0
, key
[0]);
2841 nw64(TCAM_KEY_1
, key
[1]);
2842 nw64(TCAM_KEY_2
, key
[2]);
2843 nw64(TCAM_KEY_3
, key
[3]);
2844 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2845 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2846 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2847 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2848 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2850 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2854 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2858 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2859 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2861 *data
= nr64(TCAM_KEY_1
);
2867 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2869 nw64(TCAM_KEY_1
, assoc_data
);
2870 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2872 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2875 static void tcam_enable(struct niu
*np
, int on
)
2877 u64 val
= nr64(FFLP_CFG_1
);
2880 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2882 val
|= FFLP_CFG_1_TCAM_DIS
;
2883 nw64(FFLP_CFG_1
, val
);
2886 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2888 u64 val
= nr64(FFLP_CFG_1
);
2890 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2892 FFLP_CFG_1_CAMRATIO
);
2893 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2894 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2895 nw64(FFLP_CFG_1
, val
);
2897 val
= nr64(FFLP_CFG_1
);
2898 val
|= FFLP_CFG_1_FFLPINITDONE
;
2899 nw64(FFLP_CFG_1
, val
);
2902 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2908 if (class < CLASS_CODE_ETHERTYPE1
||
2909 class > CLASS_CODE_ETHERTYPE2
)
2912 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2924 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2930 if (class < CLASS_CODE_ETHERTYPE1
||
2931 class > CLASS_CODE_ETHERTYPE2
||
2932 (ether_type
& ~(u64
)0xffff) != 0)
2935 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2937 val
&= ~L2_CLS_ETYPE
;
2938 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2945 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
2951 if (class < CLASS_CODE_USER_PROG1
||
2952 class > CLASS_CODE_USER_PROG4
)
2955 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2958 val
|= L3_CLS_VALID
;
2960 val
&= ~L3_CLS_VALID
;
2966 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
2967 int ipv6
, u64 protocol_id
,
2968 u64 tos_mask
, u64 tos_val
)
2973 if (class < CLASS_CODE_USER_PROG1
||
2974 class > CLASS_CODE_USER_PROG4
||
2975 (protocol_id
& ~(u64
)0xff) != 0 ||
2976 (tos_mask
& ~(u64
)0xff) != 0 ||
2977 (tos_val
& ~(u64
)0xff) != 0)
2980 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2982 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
2983 L3_CLS_TOSMASK
| L3_CLS_TOS
);
2985 val
|= L3_CLS_IPVER
;
2986 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
2987 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
2988 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
2994 static int tcam_early_init(struct niu
*np
)
3000 tcam_set_lat_and_ratio(np
,
3001 DEFAULT_TCAM_LATENCY
,
3002 DEFAULT_TCAM_ACCESS_RATIO
);
3003 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
3004 err
= tcam_user_eth_class_enable(np
, i
, 0);
3008 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
3009 err
= tcam_user_ip_class_enable(np
, i
, 0);
3017 static int tcam_flush_all(struct niu
*np
)
3021 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
3022 int err
= tcam_flush(np
, i
);
3029 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
3031 return (u64
)index
| (num_entries
== 1 ? HASH_TBL_ADDR_AUTOINC
: 0);
3035 static int hash_read(struct niu
*np
, unsigned long partition
,
3036 unsigned long index
, unsigned long num_entries
,
3039 u64 val
= hash_addr_regval(index
, num_entries
);
3042 if (partition
>= FCRAM_NUM_PARTITIONS
||
3043 index
+ num_entries
> FCRAM_SIZE
)
3046 nw64(HASH_TBL_ADDR(partition
), val
);
3047 for (i
= 0; i
< num_entries
; i
++)
3048 data
[i
] = nr64(HASH_TBL_DATA(partition
));
3054 static int hash_write(struct niu
*np
, unsigned long partition
,
3055 unsigned long index
, unsigned long num_entries
,
3058 u64 val
= hash_addr_regval(index
, num_entries
);
3061 if (partition
>= FCRAM_NUM_PARTITIONS
||
3062 index
+ (num_entries
* 8) > FCRAM_SIZE
)
3065 nw64(HASH_TBL_ADDR(partition
), val
);
3066 for (i
= 0; i
< num_entries
; i
++)
3067 nw64(HASH_TBL_DATA(partition
), data
[i
]);
3072 static void fflp_reset(struct niu
*np
)
3076 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
3078 nw64(FFLP_CFG_1
, 0);
3080 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
3081 nw64(FFLP_CFG_1
, val
);
3084 static void fflp_set_timings(struct niu
*np
)
3086 u64 val
= nr64(FFLP_CFG_1
);
3088 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
3089 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
3090 nw64(FFLP_CFG_1
, val
);
3092 val
= nr64(FFLP_CFG_1
);
3093 val
|= FFLP_CFG_1_FFLPINITDONE
;
3094 nw64(FFLP_CFG_1
, val
);
3096 val
= nr64(FCRAM_REF_TMR
);
3097 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
3098 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
3099 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
3100 nw64(FCRAM_REF_TMR
, val
);
3103 static int fflp_set_partition(struct niu
*np
, u64 partition
,
3104 u64 mask
, u64 base
, int enable
)
3109 if (partition
>= FCRAM_NUM_PARTITIONS
||
3110 (mask
& ~(u64
)0x1f) != 0 ||
3111 (base
& ~(u64
)0x1f) != 0)
3114 reg
= FLW_PRT_SEL(partition
);
3117 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
3118 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
3119 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
3121 val
|= FLW_PRT_SEL_EXT
;
3127 static int fflp_disable_all_partitions(struct niu
*np
)
3131 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
3132 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
3139 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
3141 u64 val
= nr64(FFLP_CFG_1
);
3144 val
|= FFLP_CFG_1_LLCSNAP
;
3146 val
&= ~FFLP_CFG_1_LLCSNAP
;
3147 nw64(FFLP_CFG_1
, val
);
3150 static void fflp_errors_enable(struct niu
*np
, int on
)
3152 u64 val
= nr64(FFLP_CFG_1
);
3155 val
&= ~FFLP_CFG_1_ERRORDIS
;
3157 val
|= FFLP_CFG_1_ERRORDIS
;
3158 nw64(FFLP_CFG_1
, val
);
3161 static int fflp_hash_clear(struct niu
*np
)
3163 struct fcram_hash_ipv4 ent
;
3166 /* IPV4 hash entry with valid bit clear, rest is don't care. */
3167 memset(&ent
, 0, sizeof(ent
));
3168 ent
.header
= HASH_HEADER_EXT
;
3170 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
3171 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
3178 static int fflp_early_init(struct niu
*np
)
3180 struct niu_parent
*parent
;
3181 unsigned long flags
;
3184 niu_lock_parent(np
, flags
);
3186 parent
= np
->parent
;
3188 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
3189 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3191 fflp_set_timings(np
);
3192 err
= fflp_disable_all_partitions(np
);
3194 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3195 "fflp_disable_all_partitions failed, err=%d\n",
3201 err
= tcam_early_init(np
);
3203 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3204 "tcam_early_init failed, err=%d\n", err
);
3207 fflp_llcsnap_enable(np
, 1);
3208 fflp_errors_enable(np
, 0);
3212 err
= tcam_flush_all(np
);
3214 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3215 "tcam_flush_all failed, err=%d\n", err
);
3218 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3219 err
= fflp_hash_clear(np
);
3221 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3222 "fflp_hash_clear failed, err=%d\n",
3230 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
3233 niu_unlock_parent(np
, flags
);
3237 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3239 if (class_code
< CLASS_CODE_USER_PROG1
||
3240 class_code
> CLASS_CODE_SCTP_IPV6
)
3243 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3247 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3249 if (class_code
< CLASS_CODE_USER_PROG1
||
3250 class_code
> CLASS_CODE_SCTP_IPV6
)
3253 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3257 /* Entries for the ports are interleaved in the TCAM */
3258 static u16
tcam_get_index(struct niu
*np
, u16 idx
)
3260 /* One entry reserved for IP fragment rule */
3261 if (idx
>= (np
->clas
.tcam_sz
- 1))
3263 return np
->clas
.tcam_top
+ ((idx
+1) * np
->parent
->num_ports
);
3266 static u16
tcam_get_size(struct niu
*np
)
3268 /* One entry reserved for IP fragment rule */
3269 return np
->clas
.tcam_sz
- 1;
3272 static u16
tcam_get_valid_entry_cnt(struct niu
*np
)
3274 /* One entry reserved for IP fragment rule */
3275 return np
->clas
.tcam_valid_entries
- 1;
3278 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
3279 u32 offset
, u32 size
, u32 truesize
)
3281 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
, page
, offset
, size
);
3284 skb
->data_len
+= size
;
3285 skb
->truesize
+= truesize
;
3288 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
3291 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
3293 return a
& (MAX_RBR_RING_SIZE
- 1);
3296 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
3297 struct page
***link
)
3299 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
3300 struct page
*p
, **pp
;
3303 pp
= &rp
->rxhash
[h
];
3304 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
3305 if (p
->index
== addr
) {
3316 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
3318 unsigned int h
= niu_hash_rxaddr(rp
, base
);
3321 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
3322 rp
->rxhash
[h
] = page
;
3325 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
3326 gfp_t mask
, int start_index
)
3332 page
= alloc_page(mask
);
3336 addr
= np
->ops
->map_page(np
->device
, page
, 0,
3337 PAGE_SIZE
, DMA_FROM_DEVICE
);
3343 niu_hash_page(rp
, page
, addr
);
3344 if (rp
->rbr_blocks_per_page
> 1)
3345 page_ref_add(page
, rp
->rbr_blocks_per_page
- 1);
3347 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
3348 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
3350 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
3351 addr
+= rp
->rbr_block_size
;
3357 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3359 int index
= rp
->rbr_index
;
3362 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3363 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3365 if (unlikely(err
)) {
3370 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3371 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3372 if (rp
->rbr_index
== rp
->rbr_table_size
)
3375 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3376 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3377 rp
->rbr_pending
= 0;
3382 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3384 unsigned int index
= rp
->rcr_index
;
3389 struct page
*page
, **link
;
3395 val
= le64_to_cpup(&rp
->rcr
[index
]);
3396 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3397 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3398 page
= niu_find_rxpage(rp
, addr
, &link
);
3400 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3401 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3402 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3403 *link
= (struct page
*) page
->mapping
;
3404 np
->ops
->unmap_page(np
->device
, page
->index
,
3405 PAGE_SIZE
, DMA_FROM_DEVICE
);
3407 page
->mapping
= NULL
;
3409 rp
->rbr_refill_pending
++;
3412 index
= NEXT_RCR(rp
, index
);
3413 if (!(val
& RCR_ENTRY_MULTI
))
3417 rp
->rcr_index
= index
;
3422 static int niu_process_rx_pkt(struct napi_struct
*napi
, struct niu
*np
,
3423 struct rx_ring_info
*rp
)
3425 unsigned int index
= rp
->rcr_index
;
3426 struct rx_pkt_hdr1
*rh
;
3427 struct sk_buff
*skb
;
3430 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3432 return niu_rx_pkt_ignore(np
, rp
);
3436 struct page
*page
, **link
;
3437 u32 rcr_size
, append_size
;
3442 val
= le64_to_cpup(&rp
->rcr
[index
]);
3444 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3445 RCR_ENTRY_L2_LEN_SHIFT
;
3446 append_size
= len
+ ETH_HLEN
+ ETH_FCS_LEN
;
3448 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3449 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3450 page
= niu_find_rxpage(rp
, addr
, &link
);
3452 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3453 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3455 off
= addr
& ~PAGE_MASK
;
3459 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3460 if ((ptype
== RCR_PKT_TYPE_TCP
||
3461 ptype
== RCR_PKT_TYPE_UDP
) &&
3462 !(val
& (RCR_ENTRY_NOPORT
|
3464 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3466 skb_checksum_none_assert(skb
);
3467 } else if (!(val
& RCR_ENTRY_MULTI
))
3468 append_size
= append_size
- skb
->len
;
3470 niu_rx_skb_append(skb
, page
, off
, append_size
, rcr_size
);
3471 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3472 *link
= (struct page
*) page
->mapping
;
3473 np
->ops
->unmap_page(np
->device
, page
->index
,
3474 PAGE_SIZE
, DMA_FROM_DEVICE
);
3476 page
->mapping
= NULL
;
3477 rp
->rbr_refill_pending
++;
3481 index
= NEXT_RCR(rp
, index
);
3482 if (!(val
& RCR_ENTRY_MULTI
))
3486 rp
->rcr_index
= index
;
3489 len
= min_t(int, len
, sizeof(*rh
) + VLAN_ETH_HLEN
);
3490 __pskb_pull_tail(skb
, len
);
3492 rh
= (struct rx_pkt_hdr1
*) skb
->data
;
3493 if (np
->dev
->features
& NETIF_F_RXHASH
)
3495 ((u32
)rh
->hashval2_0
<< 24 |
3496 (u32
)rh
->hashval2_1
<< 16 |
3497 (u32
)rh
->hashval1_1
<< 8 |
3498 (u32
)rh
->hashval1_2
<< 0),
3500 skb_pull(skb
, sizeof(*rh
));
3503 rp
->rx_bytes
+= skb
->len
;
3505 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3506 skb_record_rx_queue(skb
, rp
->rx_channel
);
3507 napi_gro_receive(napi
, skb
);
3512 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3514 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3515 int err
, index
= rp
->rbr_index
;
3518 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3519 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3523 index
+= blocks_per_page
;
3526 rp
->rbr_index
= index
;
3530 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3534 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3537 page
= rp
->rxhash
[i
];
3539 struct page
*next
= (struct page
*) page
->mapping
;
3540 u64 base
= page
->index
;
3542 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3545 page
->mapping
= NULL
;
3553 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3554 rp
->rbr
[i
] = cpu_to_le32(0);
3558 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3560 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3561 struct sk_buff
*skb
= tb
->skb
;
3562 struct tx_pkt_hdr
*tp
;
3566 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3567 tx_flags
= le64_to_cpup(&tp
->flags
);
3570 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3571 ((tx_flags
& TXHDR_PAD
) / 2));
3573 len
= skb_headlen(skb
);
3574 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3575 len
, DMA_TO_DEVICE
);
3577 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3582 idx
= NEXT_TX(rp
, idx
);
3583 len
-= MAX_TX_DESC_LEN
;
3586 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3587 tb
= &rp
->tx_buffs
[idx
];
3588 BUG_ON(tb
->skb
!= NULL
);
3589 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3590 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
3592 idx
= NEXT_TX(rp
, idx
);
3600 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3602 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3604 struct netdev_queue
*txq
;
3609 index
= (rp
- np
->tx_rings
);
3610 txq
= netdev_get_tx_queue(np
->dev
, index
);
3613 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3616 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3617 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3618 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3620 rp
->last_pkt_cnt
= tmp
;
3624 netif_printk(np
, tx_done
, KERN_DEBUG
, np
->dev
,
3625 "%s() pkt_cnt[%u] cons[%d]\n", __func__
, pkt_cnt
, cons
);
3628 cons
= release_tx_packet(np
, rp
, cons
);
3634 if (unlikely(netif_tx_queue_stopped(txq
) &&
3635 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3636 __netif_tx_lock(txq
, smp_processor_id());
3637 if (netif_tx_queue_stopped(txq
) &&
3638 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3639 netif_tx_wake_queue(txq
);
3640 __netif_tx_unlock(txq
);
3644 static inline void niu_sync_rx_discard_stats(struct niu
*np
,
3645 struct rx_ring_info
*rp
,
3648 /* This elaborate scheme is needed for reading the RX discard
3649 * counters, as they are only 16-bit and can overflow quickly,
3650 * and because the overflow indication bit is not usable as
3651 * the counter value does not wrap, but remains at max value
3654 * In theory and in practice counters can be lost in between
3655 * reading nr64() and clearing the counter nw64(). For this
3656 * reason, the number of counter clearings nw64() is
3657 * limited/reduced though the limit parameter.
3659 int rx_channel
= rp
->rx_channel
;
3662 /* RXMISC (Receive Miscellaneous Discard Count), covers the
3663 * following discard events: IPP (Input Port Process),
3664 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3665 * Block Ring) prefetch buffer is empty.
3667 misc
= nr64(RXMISC(rx_channel
));
3668 if (unlikely((misc
& RXMISC_COUNT
) > limit
)) {
3669 nw64(RXMISC(rx_channel
), 0);
3670 rp
->rx_errors
+= misc
& RXMISC_COUNT
;
3672 if (unlikely(misc
& RXMISC_OFLOW
))
3673 dev_err(np
->device
, "rx-%d: Counter overflow RXMISC discard\n",
3676 netif_printk(np
, rx_err
, KERN_DEBUG
, np
->dev
,
3677 "rx-%d: MISC drop=%u over=%u\n",
3678 rx_channel
, misc
, misc
-limit
);
3681 /* WRED (Weighted Random Early Discard) by hardware */
3682 wred
= nr64(RED_DIS_CNT(rx_channel
));
3683 if (unlikely((wred
& RED_DIS_CNT_COUNT
) > limit
)) {
3684 nw64(RED_DIS_CNT(rx_channel
), 0);
3685 rp
->rx_dropped
+= wred
& RED_DIS_CNT_COUNT
;
3687 if (unlikely(wred
& RED_DIS_CNT_OFLOW
))
3688 dev_err(np
->device
, "rx-%d: Counter overflow WRED discard\n", rx_channel
);
3690 netif_printk(np
, rx_err
, KERN_DEBUG
, np
->dev
,
3691 "rx-%d: WRED drop=%u over=%u\n",
3692 rx_channel
, wred
, wred
-limit
);
3696 static int niu_rx_work(struct napi_struct
*napi
, struct niu
*np
,
3697 struct rx_ring_info
*rp
, int budget
)
3699 int qlen
, rcr_done
= 0, work_done
= 0;
3700 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3704 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3705 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3707 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3708 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3710 mbox
->rx_dma_ctl_stat
= 0;
3711 mbox
->rcrstat_a
= 0;
3713 netif_printk(np
, rx_status
, KERN_DEBUG
, np
->dev
,
3714 "%s(chan[%d]), stat[%llx] qlen=%d\n",
3715 __func__
, rp
->rx_channel
, (unsigned long long)stat
, qlen
);
3717 rcr_done
= work_done
= 0;
3718 qlen
= min(qlen
, budget
);
3719 while (work_done
< qlen
) {
3720 rcr_done
+= niu_process_rx_pkt(napi
, np
, rp
);
3724 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3727 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3728 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3729 rp
->rbr_refill_pending
= 0;
3732 stat
= (RX_DMA_CTL_STAT_MEX
|
3733 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3734 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3736 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3738 /* Only sync discards stats when qlen indicate potential for drops */
3740 niu_sync_rx_discard_stats(np
, rp
, 0x7FFF);
3745 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3748 u32 tx_vec
= (v0
>> 32);
3749 u32 rx_vec
= (v0
& 0xffffffff);
3750 int i
, work_done
= 0;
3752 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
3753 "%s() v0[%016llx]\n", __func__
, (unsigned long long)v0
);
3755 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3756 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3757 if (tx_vec
& (1 << rp
->tx_channel
))
3758 niu_tx_work(np
, rp
);
3759 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3762 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3763 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3765 if (rx_vec
& (1 << rp
->rx_channel
)) {
3768 this_work_done
= niu_rx_work(&lp
->napi
, np
, rp
,
3771 budget
-= this_work_done
;
3772 work_done
+= this_work_done
;
3774 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3780 static int niu_poll(struct napi_struct
*napi
, int budget
)
3782 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3783 struct niu
*np
= lp
->np
;
3786 work_done
= niu_poll_core(np
, lp
, budget
);
3788 if (work_done
< budget
) {
3789 napi_complete_done(napi
, work_done
);
3790 niu_ldg_rearm(np
, lp
, 1);
3795 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3798 netdev_err(np
->dev
, "RX channel %u errors ( ", rp
->rx_channel
);
3800 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3801 pr_cont("RBR_TMOUT ");
3802 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3803 pr_cont("RSP_CNT ");
3804 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3805 pr_cont("BYTE_EN_BUS ");
3806 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3807 pr_cont("RSP_DAT ");
3808 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3809 pr_cont("RCR_ACK ");
3810 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3811 pr_cont("RCR_SHA_PAR ");
3812 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3813 pr_cont("RBR_PRE_PAR ");
3814 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3816 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3817 pr_cont("RCRINCON ");
3818 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3819 pr_cont("RCRFULL ");
3820 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3821 pr_cont("RBRFULL ");
3822 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3823 pr_cont("RBRLOGPAGE ");
3824 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3825 pr_cont("CFIGLOGPAGE ");
3826 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3827 pr_cont("DC_FIDO ");
3832 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3834 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3838 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3839 RX_DMA_CTL_STAT_PORT_FATAL
))
3843 netdev_err(np
->dev
, "RX channel %u error, stat[%llx]\n",
3845 (unsigned long long) stat
);
3847 niu_log_rxchan_errors(np
, rp
, stat
);
3850 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3851 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3856 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3859 netdev_err(np
->dev
, "TX channel %u errors ( ", rp
->tx_channel
);
3861 if (cs
& TX_CS_MBOX_ERR
)
3863 if (cs
& TX_CS_PKT_SIZE_ERR
)
3864 pr_cont("PKT_SIZE ");
3865 if (cs
& TX_CS_TX_RING_OFLOW
)
3866 pr_cont("TX_RING_OFLOW ");
3867 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3868 pr_cont("PREF_BUF_PAR ");
3869 if (cs
& TX_CS_NACK_PREF
)
3870 pr_cont("NACK_PREF ");
3871 if (cs
& TX_CS_NACK_PKT_RD
)
3872 pr_cont("NACK_PKT_RD ");
3873 if (cs
& TX_CS_CONF_PART_ERR
)
3874 pr_cont("CONF_PART ");
3875 if (cs
& TX_CS_PKT_PRT_ERR
)
3876 pr_cont("PKT_PTR ");
3881 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3885 cs
= nr64(TX_CS(rp
->tx_channel
));
3886 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3887 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3889 netdev_err(np
->dev
, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3891 (unsigned long long)cs
,
3892 (unsigned long long)logh
,
3893 (unsigned long long)logl
);
3895 niu_log_txchan_errors(np
, rp
, cs
);
3900 static int niu_mif_interrupt(struct niu
*np
)
3902 u64 mif_status
= nr64(MIF_STATUS
);
3905 if (np
->flags
& NIU_FLAGS_XMAC
) {
3906 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3908 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3912 netdev_err(np
->dev
, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3913 (unsigned long long)mif_status
, phy_mdint
);
3918 static void niu_xmac_interrupt(struct niu
*np
)
3920 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3923 val
= nr64_mac(XTXMAC_STATUS
);
3924 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3925 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3926 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3927 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3928 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3929 mp
->tx_fifo_errors
++;
3930 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3931 mp
->tx_overflow_errors
++;
3932 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3933 mp
->tx_max_pkt_size_errors
++;
3934 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3935 mp
->tx_underflow_errors
++;
3937 val
= nr64_mac(XRXMAC_STATUS
);
3938 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3939 mp
->rx_local_faults
++;
3940 if (val
& XRXMAC_STATUS_RFLT_DET
)
3941 mp
->rx_remote_faults
++;
3942 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3943 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3944 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3945 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3946 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3947 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3948 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3949 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
3950 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3951 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3952 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3953 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3954 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
3955 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
3956 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
3957 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
3958 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
3959 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
3960 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
3961 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
3962 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
3963 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
3964 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
3965 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
3966 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
3967 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
3968 if (val
& XRXMAC_STATUS_RXOCTET_CNT_EXP
)
3969 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
3970 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
3971 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
3972 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
3973 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
3974 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
3975 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
3976 if (val
& XRXMAC_STATUS_RXUFLOW
)
3977 mp
->rx_underflows
++;
3978 if (val
& XRXMAC_STATUS_RXOFLOW
)
3981 val
= nr64_mac(XMAC_FC_STAT
);
3982 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
3983 mp
->pause_off_state
++;
3984 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
3985 mp
->pause_on_state
++;
3986 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
3987 mp
->pause_received
++;
3990 static void niu_bmac_interrupt(struct niu
*np
)
3992 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
3995 val
= nr64_mac(BTXMAC_STATUS
);
3996 if (val
& BTXMAC_STATUS_UNDERRUN
)
3997 mp
->tx_underflow_errors
++;
3998 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
3999 mp
->tx_max_pkt_size_errors
++;
4000 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
4001 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
4002 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
4003 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
4005 val
= nr64_mac(BRXMAC_STATUS
);
4006 if (val
& BRXMAC_STATUS_OVERFLOW
)
4008 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
4009 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
4010 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
4011 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4012 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
4013 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4014 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
4015 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
4017 val
= nr64_mac(BMAC_CTRL_STATUS
);
4018 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
4019 mp
->pause_off_state
++;
4020 if (val
& BMAC_CTRL_STATUS_PAUSE
)
4021 mp
->pause_on_state
++;
4022 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
4023 mp
->pause_received
++;
4026 static int niu_mac_interrupt(struct niu
*np
)
4028 if (np
->flags
& NIU_FLAGS_XMAC
)
4029 niu_xmac_interrupt(np
);
4031 niu_bmac_interrupt(np
);
4036 static void niu_log_device_error(struct niu
*np
, u64 stat
)
4038 netdev_err(np
->dev
, "Core device errors ( ");
4040 if (stat
& SYS_ERR_MASK_META2
)
4042 if (stat
& SYS_ERR_MASK_META1
)
4044 if (stat
& SYS_ERR_MASK_PEU
)
4046 if (stat
& SYS_ERR_MASK_TXC
)
4048 if (stat
& SYS_ERR_MASK_RDMC
)
4050 if (stat
& SYS_ERR_MASK_TDMC
)
4052 if (stat
& SYS_ERR_MASK_ZCP
)
4054 if (stat
& SYS_ERR_MASK_FFLP
)
4056 if (stat
& SYS_ERR_MASK_IPP
)
4058 if (stat
& SYS_ERR_MASK_MAC
)
4060 if (stat
& SYS_ERR_MASK_SMX
)
4066 static int niu_device_error(struct niu
*np
)
4068 u64 stat
= nr64(SYS_ERR_STAT
);
4070 netdev_err(np
->dev
, "Core device error, stat[%llx]\n",
4071 (unsigned long long)stat
);
4073 niu_log_device_error(np
, stat
);
4078 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
4079 u64 v0
, u64 v1
, u64 v2
)
4088 if (v1
& 0x00000000ffffffffULL
) {
4089 u32 rx_vec
= (v1
& 0xffffffff);
4091 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4092 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4094 if (rx_vec
& (1 << rp
->rx_channel
)) {
4095 int r
= niu_rx_error(np
, rp
);
4100 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
4101 RX_DMA_CTL_STAT_MEX
);
4106 if (v1
& 0x7fffffff00000000ULL
) {
4107 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
4109 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4110 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4112 if (tx_vec
& (1 << rp
->tx_channel
)) {
4113 int r
= niu_tx_error(np
, rp
);
4119 if ((v0
| v1
) & 0x8000000000000000ULL
) {
4120 int r
= niu_mif_interrupt(np
);
4126 int r
= niu_mac_interrupt(np
);
4131 int r
= niu_device_error(np
);
4138 niu_enable_interrupts(np
, 0);
4143 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
4146 struct rxdma_mailbox
*mbox
= rp
->mbox
;
4147 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
4149 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
4150 RX_DMA_CTL_STAT_RCRTO
);
4151 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
4153 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
4154 "%s() stat[%llx]\n", __func__
, (unsigned long long)stat
);
4157 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
4160 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
4162 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
4163 "%s() cs[%llx]\n", __func__
, (unsigned long long)rp
->tx_cs
);
4166 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
4168 struct niu_parent
*parent
= np
->parent
;
4172 tx_vec
= (v0
>> 32);
4173 rx_vec
= (v0
& 0xffffffff);
4175 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4176 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4177 int ldn
= LDN_RXDMA(rp
->rx_channel
);
4179 if (parent
->ldg_map
[ldn
] != ldg
)
4182 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4183 if (rx_vec
& (1 << rp
->rx_channel
))
4184 niu_rxchan_intr(np
, rp
, ldn
);
4187 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4188 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4189 int ldn
= LDN_TXDMA(rp
->tx_channel
);
4191 if (parent
->ldg_map
[ldn
] != ldg
)
4194 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4195 if (tx_vec
& (1 << rp
->tx_channel
))
4196 niu_txchan_intr(np
, rp
, ldn
);
4200 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
4201 u64 v0
, u64 v1
, u64 v2
)
4203 if (likely(napi_schedule_prep(&lp
->napi
))) {
4207 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
4208 __napi_schedule(&lp
->napi
);
4212 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
4214 struct niu_ldg
*lp
= dev_id
;
4215 struct niu
*np
= lp
->np
;
4216 int ldg
= lp
->ldg_num
;
4217 unsigned long flags
;
4220 if (netif_msg_intr(np
))
4221 printk(KERN_DEBUG KBUILD_MODNAME
": " "%s() ldg[%p](%d)",
4224 spin_lock_irqsave(&np
->lock
, flags
);
4226 v0
= nr64(LDSV0(ldg
));
4227 v1
= nr64(LDSV1(ldg
));
4228 v2
= nr64(LDSV2(ldg
));
4230 if (netif_msg_intr(np
))
4231 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4232 (unsigned long long) v0
,
4233 (unsigned long long) v1
,
4234 (unsigned long long) v2
);
4236 if (unlikely(!v0
&& !v1
&& !v2
)) {
4237 spin_unlock_irqrestore(&np
->lock
, flags
);
4241 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
4242 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
4246 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
4247 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
4249 niu_ldg_rearm(np
, lp
, 1);
4251 spin_unlock_irqrestore(&np
->lock
, flags
);
4256 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
4259 np
->ops
->free_coherent(np
->device
,
4260 sizeof(struct rxdma_mailbox
),
4261 rp
->mbox
, rp
->mbox_dma
);
4265 np
->ops
->free_coherent(np
->device
,
4266 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4267 rp
->rcr
, rp
->rcr_dma
);
4269 rp
->rcr_table_size
= 0;
4273 niu_rbr_free(np
, rp
);
4275 np
->ops
->free_coherent(np
->device
,
4276 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4277 rp
->rbr
, rp
->rbr_dma
);
4279 rp
->rbr_table_size
= 0;
4286 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
4289 np
->ops
->free_coherent(np
->device
,
4290 sizeof(struct txdma_mailbox
),
4291 rp
->mbox
, rp
->mbox_dma
);
4297 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
4298 if (rp
->tx_buffs
[i
].skb
)
4299 (void) release_tx_packet(np
, rp
, i
);
4302 np
->ops
->free_coherent(np
->device
,
4303 MAX_TX_RING_SIZE
* sizeof(__le64
),
4304 rp
->descr
, rp
->descr_dma
);
4313 static void niu_free_channels(struct niu
*np
)
4318 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4319 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4321 niu_free_rx_ring_info(np
, rp
);
4323 kfree(np
->rx_rings
);
4324 np
->rx_rings
= NULL
;
4325 np
->num_rx_rings
= 0;
4329 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4330 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4332 niu_free_tx_ring_info(np
, rp
);
4334 kfree(np
->tx_rings
);
4335 np
->tx_rings
= NULL
;
4336 np
->num_tx_rings
= 0;
4340 static int niu_alloc_rx_ring_info(struct niu
*np
,
4341 struct rx_ring_info
*rp
)
4343 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
4345 rp
->rxhash
= kcalloc(MAX_RBR_RING_SIZE
, sizeof(struct page
*),
4350 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4351 sizeof(struct rxdma_mailbox
),
4352 &rp
->mbox_dma
, GFP_KERNEL
);
4355 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4356 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4361 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
4362 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4363 &rp
->rcr_dma
, GFP_KERNEL
);
4366 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
4367 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4371 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
4374 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
4375 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4376 &rp
->rbr_dma
, GFP_KERNEL
);
4379 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
4380 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4384 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
4386 rp
->rbr_pending
= 0;
4391 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
4393 int mtu
= np
->dev
->mtu
;
4395 /* These values are recommended by the HW designers for fair
4396 * utilization of DRR amongst the rings.
4398 rp
->max_burst
= mtu
+ 32;
4399 if (rp
->max_burst
> 4096)
4400 rp
->max_burst
= 4096;
4403 static int niu_alloc_tx_ring_info(struct niu
*np
,
4404 struct tx_ring_info
*rp
)
4406 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
4408 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4409 sizeof(struct txdma_mailbox
),
4410 &rp
->mbox_dma
, GFP_KERNEL
);
4413 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4414 netdev_err(np
->dev
, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4419 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4420 MAX_TX_RING_SIZE
* sizeof(__le64
),
4421 &rp
->descr_dma
, GFP_KERNEL
);
4424 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4425 netdev_err(np
->dev
, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4430 rp
->pending
= MAX_TX_RING_SIZE
;
4435 /* XXX make these configurable... XXX */
4436 rp
->mark_freq
= rp
->pending
/ 4;
4438 niu_set_max_burst(np
, rp
);
4443 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4447 bss
= min(PAGE_SHIFT
, 15);
4449 rp
->rbr_block_size
= 1 << bss
;
4450 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4452 rp
->rbr_sizes
[0] = 256;
4453 rp
->rbr_sizes
[1] = 1024;
4454 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4455 switch (PAGE_SIZE
) {
4457 rp
->rbr_sizes
[2] = 4096;
4461 rp
->rbr_sizes
[2] = 8192;
4465 rp
->rbr_sizes
[2] = 2048;
4467 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4470 static int niu_alloc_channels(struct niu
*np
)
4472 struct niu_parent
*parent
= np
->parent
;
4473 int first_rx_channel
, first_tx_channel
;
4474 int num_rx_rings
, num_tx_rings
;
4475 struct rx_ring_info
*rx_rings
;
4476 struct tx_ring_info
*tx_rings
;
4480 first_rx_channel
= first_tx_channel
= 0;
4481 for (i
= 0; i
< port
; i
++) {
4482 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4483 first_tx_channel
+= parent
->txchan_per_port
[i
];
4486 num_rx_rings
= parent
->rxchan_per_port
[port
];
4487 num_tx_rings
= parent
->txchan_per_port
[port
];
4489 rx_rings
= kcalloc(num_rx_rings
, sizeof(struct rx_ring_info
),
4495 np
->num_rx_rings
= num_rx_rings
;
4497 np
->rx_rings
= rx_rings
;
4499 netif_set_real_num_rx_queues(np
->dev
, num_rx_rings
);
4501 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4502 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4505 rp
->rx_channel
= first_rx_channel
+ i
;
4507 err
= niu_alloc_rx_ring_info(np
, rp
);
4511 niu_size_rbr(np
, rp
);
4513 /* XXX better defaults, configurable, etc... XXX */
4514 rp
->nonsyn_window
= 64;
4515 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4516 rp
->syn_window
= 64;
4517 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4518 rp
->rcr_pkt_threshold
= 16;
4519 rp
->rcr_timeout
= 8;
4520 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4521 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4522 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4524 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4529 tx_rings
= kcalloc(num_tx_rings
, sizeof(struct tx_ring_info
),
4535 np
->num_tx_rings
= num_tx_rings
;
4537 np
->tx_rings
= tx_rings
;
4539 netif_set_real_num_tx_queues(np
->dev
, num_tx_rings
);
4541 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4542 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4545 rp
->tx_channel
= first_tx_channel
+ i
;
4547 err
= niu_alloc_tx_ring_info(np
, rp
);
4555 niu_free_channels(np
);
4559 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4563 while (--limit
> 0) {
4564 u64 val
= nr64(TX_CS(channel
));
4565 if (val
& TX_CS_SNG_STATE
)
4571 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4573 u64 val
= nr64(TX_CS(channel
));
4575 val
|= TX_CS_STOP_N_GO
;
4576 nw64(TX_CS(channel
), val
);
4578 return niu_tx_cs_sng_poll(np
, channel
);
4581 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4585 while (--limit
> 0) {
4586 u64 val
= nr64(TX_CS(channel
));
4587 if (!(val
& TX_CS_RST
))
4593 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4595 u64 val
= nr64(TX_CS(channel
));
4599 nw64(TX_CS(channel
), val
);
4601 err
= niu_tx_cs_reset_poll(np
, channel
);
4603 nw64(TX_RING_KICK(channel
), 0);
4608 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4612 nw64(TX_LOG_MASK1(channel
), 0);
4613 nw64(TX_LOG_VAL1(channel
), 0);
4614 nw64(TX_LOG_MASK2(channel
), 0);
4615 nw64(TX_LOG_VAL2(channel
), 0);
4616 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4617 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4618 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4620 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4621 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4622 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4624 /* XXX TXDMA 32bit mode? XXX */
4629 static void niu_txc_enable_port(struct niu
*np
, int on
)
4631 unsigned long flags
;
4634 niu_lock_parent(np
, flags
);
4635 val
= nr64(TXC_CONTROL
);
4636 mask
= (u64
)1 << np
->port
;
4638 val
|= TXC_CONTROL_ENABLE
| mask
;
4641 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4642 val
&= ~TXC_CONTROL_ENABLE
;
4644 nw64(TXC_CONTROL
, val
);
4645 niu_unlock_parent(np
, flags
);
4648 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4650 unsigned long flags
;
4653 niu_lock_parent(np
, flags
);
4654 val
= nr64(TXC_INT_MASK
);
4655 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4656 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4657 niu_unlock_parent(np
, flags
);
4660 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4667 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4668 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4670 nw64(TXC_PORT_DMA(np
->port
), val
);
4673 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4675 int err
, channel
= rp
->tx_channel
;
4678 err
= niu_tx_channel_stop(np
, channel
);
4682 err
= niu_tx_channel_reset(np
, channel
);
4686 err
= niu_tx_channel_lpage_init(np
, channel
);
4690 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4691 nw64(TX_ENT_MSK(channel
), 0);
4693 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4694 TX_RNG_CFIG_STADDR
)) {
4695 netdev_err(np
->dev
, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4696 channel
, (unsigned long long)rp
->descr_dma
);
4700 /* The length field in TX_RNG_CFIG is measured in 64-byte
4701 * blocks. rp->pending is the number of TX descriptors in
4702 * our ring, 8 bytes each, thus we divide by 8 bytes more
4703 * to get the proper value the chip wants.
4705 ring_len
= (rp
->pending
/ 8);
4707 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4709 nw64(TX_RNG_CFIG(channel
), val
);
4711 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4712 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4713 netdev_err(np
->dev
, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4714 channel
, (unsigned long long)rp
->mbox_dma
);
4717 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4718 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4720 nw64(TX_CS(channel
), 0);
4722 rp
->last_pkt_cnt
= 0;
4727 static void niu_init_rdc_groups(struct niu
*np
)
4729 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4730 int i
, first_table_num
= tp
->first_table_num
;
4732 for (i
= 0; i
< tp
->num_tables
; i
++) {
4733 struct rdc_table
*tbl
= &tp
->tables
[i
];
4734 int this_table
= first_table_num
+ i
;
4737 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4738 nw64(RDC_TBL(this_table
, slot
),
4739 tbl
->rxdma_channel
[slot
]);
4742 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4745 static void niu_init_drr_weight(struct niu
*np
)
4747 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4752 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4757 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4760 nw64(PT_DRR_WT(np
->port
), val
);
4763 static int niu_init_hostinfo(struct niu
*np
)
4765 struct niu_parent
*parent
= np
->parent
;
4766 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4767 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4768 int first_rdc_table
= tp
->first_table_num
;
4770 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4774 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4778 for (i
= 0; i
< num_alt
; i
++) {
4779 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4787 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4789 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4790 RXDMA_CFIG1_RST
, 1000, 10,
4794 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4798 nw64(RX_LOG_MASK1(channel
), 0);
4799 nw64(RX_LOG_VAL1(channel
), 0);
4800 nw64(RX_LOG_MASK2(channel
), 0);
4801 nw64(RX_LOG_VAL2(channel
), 0);
4802 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4803 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4804 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4806 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4807 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4808 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4813 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4817 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4818 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4819 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4820 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4821 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4824 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4829 switch (rp
->rbr_block_size
) {
4831 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4834 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4837 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4840 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4845 val
|= RBR_CFIG_B_VLD2
;
4846 switch (rp
->rbr_sizes
[2]) {
4848 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4851 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4854 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4857 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4863 val
|= RBR_CFIG_B_VLD1
;
4864 switch (rp
->rbr_sizes
[1]) {
4866 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4869 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4872 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4875 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4881 val
|= RBR_CFIG_B_VLD0
;
4882 switch (rp
->rbr_sizes
[0]) {
4884 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4887 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4890 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4893 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4904 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4906 u64 val
= nr64(RXDMA_CFIG1(channel
));
4910 val
|= RXDMA_CFIG1_EN
;
4912 val
&= ~RXDMA_CFIG1_EN
;
4913 nw64(RXDMA_CFIG1(channel
), val
);
4916 while (--limit
> 0) {
4917 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4926 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4928 int err
, channel
= rp
->rx_channel
;
4931 err
= niu_rx_channel_reset(np
, channel
);
4935 err
= niu_rx_channel_lpage_init(np
, channel
);
4939 niu_rx_channel_wred_init(np
, rp
);
4941 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4942 nw64(RX_DMA_CTL_STAT(channel
),
4943 (RX_DMA_CTL_STAT_MEX
|
4944 RX_DMA_CTL_STAT_RCRTHRES
|
4945 RX_DMA_CTL_STAT_RCRTO
|
4946 RX_DMA_CTL_STAT_RBR_EMPTY
));
4947 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4948 nw64(RXDMA_CFIG2(channel
),
4949 ((rp
->mbox_dma
& RXDMA_CFIG2_MBADDR_L
) |
4950 RXDMA_CFIG2_FULL_HDR
));
4951 nw64(RBR_CFIG_A(channel
),
4952 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4953 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4954 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4957 nw64(RBR_CFIG_B(channel
), val
);
4958 nw64(RCRCFIG_A(channel
),
4959 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
4960 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
4961 nw64(RCRCFIG_B(channel
),
4962 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
4964 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
4966 err
= niu_enable_rx_channel(np
, channel
, 1);
4970 nw64(RBR_KICK(channel
), rp
->rbr_index
);
4972 val
= nr64(RX_DMA_CTL_STAT(channel
));
4973 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
4974 nw64(RX_DMA_CTL_STAT(channel
), val
);
4979 static int niu_init_rx_channels(struct niu
*np
)
4981 unsigned long flags
;
4982 u64 seed
= jiffies_64
;
4985 niu_lock_parent(np
, flags
);
4986 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
4987 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
4988 niu_unlock_parent(np
, flags
);
4990 /* XXX RXDMA 32bit mode? XXX */
4992 niu_init_rdc_groups(np
);
4993 niu_init_drr_weight(np
);
4995 err
= niu_init_hostinfo(np
);
4999 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5000 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5002 err
= niu_init_one_rx_channel(np
, rp
);
5010 static int niu_set_ip_frag_rule(struct niu
*np
)
5012 struct niu_parent
*parent
= np
->parent
;
5013 struct niu_classifier
*cp
= &np
->clas
;
5014 struct niu_tcam_entry
*tp
;
5017 index
= cp
->tcam_top
;
5018 tp
= &parent
->tcam
[index
];
5020 /* Note that the noport bit is the same in both ipv4 and
5021 * ipv6 format TCAM entries.
5023 memset(tp
, 0, sizeof(*tp
));
5024 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
5025 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
5026 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
5027 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
5028 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
5031 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
5035 cp
->tcam_valid_entries
++;
5040 static int niu_init_classifier_hw(struct niu
*np
)
5042 struct niu_parent
*parent
= np
->parent
;
5043 struct niu_classifier
*cp
= &np
->clas
;
5046 nw64(H1POLY
, cp
->h1_init
);
5047 nw64(H2POLY
, cp
->h2_init
);
5049 err
= niu_init_hostinfo(np
);
5053 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
5054 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
5056 vlan_tbl_write(np
, i
, np
->port
,
5057 vp
->vlan_pref
, vp
->rdc_num
);
5060 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
5061 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
5063 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
5064 ap
->rdc_num
, ap
->mac_pref
);
5069 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
5070 int index
= i
- CLASS_CODE_USER_PROG1
;
5072 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
5075 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
5080 err
= niu_set_ip_frag_rule(np
);
5089 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
5091 nw64(ZCP_RAM_DATA0
, data
[0]);
5092 nw64(ZCP_RAM_DATA1
, data
[1]);
5093 nw64(ZCP_RAM_DATA2
, data
[2]);
5094 nw64(ZCP_RAM_DATA3
, data
[3]);
5095 nw64(ZCP_RAM_DATA4
, data
[4]);
5096 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
5098 (ZCP_RAM_ACC_WRITE
|
5099 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5100 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5102 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5106 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
5110 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5113 netdev_err(np
->dev
, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5114 (unsigned long long)nr64(ZCP_RAM_ACC
));
5120 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5121 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5123 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5126 netdev_err(np
->dev
, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5127 (unsigned long long)nr64(ZCP_RAM_ACC
));
5131 data
[0] = nr64(ZCP_RAM_DATA0
);
5132 data
[1] = nr64(ZCP_RAM_DATA1
);
5133 data
[2] = nr64(ZCP_RAM_DATA2
);
5134 data
[3] = nr64(ZCP_RAM_DATA3
);
5135 data
[4] = nr64(ZCP_RAM_DATA4
);
5140 static void niu_zcp_cfifo_reset(struct niu
*np
)
5142 u64 val
= nr64(RESET_CFIFO
);
5144 val
|= RESET_CFIFO_RST(np
->port
);
5145 nw64(RESET_CFIFO
, val
);
5148 val
&= ~RESET_CFIFO_RST(np
->port
);
5149 nw64(RESET_CFIFO
, val
);
5152 static int niu_init_zcp(struct niu
*np
)
5154 u64 data
[5], rbuf
[5];
5157 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5158 if (np
->port
== 0 || np
->port
== 1)
5159 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
5161 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
5163 max
= NIU_CFIFO_ENTRIES
;
5171 for (i
= 0; i
< max
; i
++) {
5172 err
= niu_zcp_write(np
, i
, data
);
5175 err
= niu_zcp_read(np
, i
, rbuf
);
5180 niu_zcp_cfifo_reset(np
);
5181 nw64(CFIFO_ECC(np
->port
), 0);
5182 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
5183 (void) nr64(ZCP_INT_STAT
);
5184 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
5189 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
5191 u64 val
= nr64_ipp(IPP_CFIG
);
5193 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
5194 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
5195 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
5196 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
5197 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
5198 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
5199 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
5200 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
5203 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
5205 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
5206 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
5207 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
5208 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
5209 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
5210 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
5213 static int niu_ipp_reset(struct niu
*np
)
5215 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
5216 1000, 100, "IPP_CFIG");
5219 static int niu_init_ipp(struct niu
*np
)
5221 u64 data
[5], rbuf
[5], val
;
5224 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5225 if (np
->port
== 0 || np
->port
== 1)
5226 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
5228 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
5230 max
= NIU_DFIFO_ENTRIES
;
5238 for (i
= 0; i
< max
; i
++) {
5239 niu_ipp_write(np
, i
, data
);
5240 niu_ipp_read(np
, i
, rbuf
);
5243 (void) nr64_ipp(IPP_INT_STAT
);
5244 (void) nr64_ipp(IPP_INT_STAT
);
5246 err
= niu_ipp_reset(np
);
5250 (void) nr64_ipp(IPP_PKT_DIS
);
5251 (void) nr64_ipp(IPP_BAD_CS_CNT
);
5252 (void) nr64_ipp(IPP_ECC
);
5254 (void) nr64_ipp(IPP_INT_STAT
);
5256 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
5258 val
= nr64_ipp(IPP_CFIG
);
5259 val
&= ~IPP_CFIG_IP_MAX_PKT
;
5260 val
|= (IPP_CFIG_IPP_ENABLE
|
5261 IPP_CFIG_DFIFO_ECC_EN
|
5262 IPP_CFIG_DROP_BAD_CRC
|
5264 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
5265 nw64_ipp(IPP_CFIG
, val
);
5270 static void niu_handle_led(struct niu
*np
, int status
)
5273 val
= nr64_mac(XMAC_CONFIG
);
5275 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
5276 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
5278 val
|= XMAC_CONFIG_LED_POLARITY
;
5279 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
5281 val
|= XMAC_CONFIG_FORCE_LED_ON
;
5282 val
&= ~XMAC_CONFIG_LED_POLARITY
;
5286 nw64_mac(XMAC_CONFIG
, val
);
5289 static void niu_init_xif_xmac(struct niu
*np
)
5291 struct niu_link_config
*lp
= &np
->link_config
;
5294 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
5295 val
= nr64(MIF_CONFIG
);
5296 val
|= MIF_CONFIG_ATCA_GE
;
5297 nw64(MIF_CONFIG
, val
);
5300 val
= nr64_mac(XMAC_CONFIG
);
5301 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5303 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
5305 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
5306 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5307 val
|= XMAC_CONFIG_LOOPBACK
;
5309 val
&= ~XMAC_CONFIG_LOOPBACK
;
5312 if (np
->flags
& NIU_FLAGS_10G
) {
5313 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
5315 val
|= XMAC_CONFIG_LFS_DISABLE
;
5316 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
5317 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
5318 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
5320 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
5323 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5325 if (lp
->active_speed
== SPEED_100
)
5326 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
5328 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
5330 nw64_mac(XMAC_CONFIG
, val
);
5332 val
= nr64_mac(XMAC_CONFIG
);
5333 val
&= ~XMAC_CONFIG_MODE_MASK
;
5334 if (np
->flags
& NIU_FLAGS_10G
) {
5335 val
|= XMAC_CONFIG_MODE_XGMII
;
5337 if (lp
->active_speed
== SPEED_1000
)
5338 val
|= XMAC_CONFIG_MODE_GMII
;
5340 val
|= XMAC_CONFIG_MODE_MII
;
5343 nw64_mac(XMAC_CONFIG
, val
);
5346 static void niu_init_xif_bmac(struct niu
*np
)
5348 struct niu_link_config
*lp
= &np
->link_config
;
5351 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
5353 if (lp
->loopback_mode
== LOOPBACK_MAC
)
5354 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
5356 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
5358 if (lp
->active_speed
== SPEED_1000
)
5359 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
5361 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
5363 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
5364 BMAC_XIF_CONFIG_LED_POLARITY
);
5366 if (!(np
->flags
& NIU_FLAGS_10G
) &&
5367 !(np
->flags
& NIU_FLAGS_FIBER
) &&
5368 lp
->active_speed
== SPEED_100
)
5369 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5371 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5373 nw64_mac(BMAC_XIF_CONFIG
, val
);
5376 static void niu_init_xif(struct niu
*np
)
5378 if (np
->flags
& NIU_FLAGS_XMAC
)
5379 niu_init_xif_xmac(np
);
5381 niu_init_xif_bmac(np
);
5384 static void niu_pcs_mii_reset(struct niu
*np
)
5387 u64 val
= nr64_pcs(PCS_MII_CTL
);
5388 val
|= PCS_MII_CTL_RST
;
5389 nw64_pcs(PCS_MII_CTL
, val
);
5390 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
5392 val
= nr64_pcs(PCS_MII_CTL
);
5396 static void niu_xpcs_reset(struct niu
*np
)
5399 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
5400 val
|= XPCS_CONTROL1_RESET
;
5401 nw64_xpcs(XPCS_CONTROL1
, val
);
5402 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
5404 val
= nr64_xpcs(XPCS_CONTROL1
);
5408 static int niu_init_pcs(struct niu
*np
)
5410 struct niu_link_config
*lp
= &np
->link_config
;
5413 switch (np
->flags
& (NIU_FLAGS_10G
|
5415 NIU_FLAGS_XCVR_SERDES
)) {
5416 case NIU_FLAGS_FIBER
:
5418 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5419 nw64_pcs(PCS_DPATH_MODE
, 0);
5420 niu_pcs_mii_reset(np
);
5424 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
5425 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
5427 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5430 /* 10G copper or fiber */
5431 val
= nr64_mac(XMAC_CONFIG
);
5432 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5433 nw64_mac(XMAC_CONFIG
, val
);
5437 val
= nr64_xpcs(XPCS_CONTROL1
);
5438 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5439 val
|= XPCS_CONTROL1_LOOPBACK
;
5441 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5442 nw64_xpcs(XPCS_CONTROL1
, val
);
5444 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5445 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5446 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5450 case NIU_FLAGS_XCVR_SERDES
:
5452 niu_pcs_mii_reset(np
);
5453 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5454 nw64_pcs(PCS_DPATH_MODE
, 0);
5459 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5460 /* 1G RGMII FIBER */
5461 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5462 niu_pcs_mii_reset(np
);
5472 static int niu_reset_tx_xmac(struct niu
*np
)
5474 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5475 (XTXMAC_SW_RST_REG_RS
|
5476 XTXMAC_SW_RST_SOFT_RST
),
5477 1000, 100, "XTXMAC_SW_RST");
5480 static int niu_reset_tx_bmac(struct niu
*np
)
5484 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5486 while (--limit
>= 0) {
5487 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5492 dev_err(np
->device
, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5494 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5501 static int niu_reset_tx_mac(struct niu
*np
)
5503 if (np
->flags
& NIU_FLAGS_XMAC
)
5504 return niu_reset_tx_xmac(np
);
5506 return niu_reset_tx_bmac(np
);
5509 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5513 val
= nr64_mac(XMAC_MIN
);
5514 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5515 XMAC_MIN_RX_MIN_PKT_SIZE
);
5516 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5517 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5518 nw64_mac(XMAC_MIN
, val
);
5520 nw64_mac(XMAC_MAX
, max
);
5522 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5524 val
= nr64_mac(XMAC_IPG
);
5525 if (np
->flags
& NIU_FLAGS_10G
) {
5526 val
&= ~XMAC_IPG_IPG_XGMII
;
5527 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5529 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5530 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5532 nw64_mac(XMAC_IPG
, val
);
5534 val
= nr64_mac(XMAC_CONFIG
);
5535 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5536 XMAC_CONFIG_STRETCH_MODE
|
5537 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5538 XMAC_CONFIG_TX_ENABLE
);
5539 nw64_mac(XMAC_CONFIG
, val
);
5541 nw64_mac(TXMAC_FRM_CNT
, 0);
5542 nw64_mac(TXMAC_BYTE_CNT
, 0);
5545 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5549 nw64_mac(BMAC_MIN_FRAME
, min
);
5550 nw64_mac(BMAC_MAX_FRAME
, max
);
5552 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5553 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5554 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5556 val
= nr64_mac(BTXMAC_CONFIG
);
5557 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5558 BTXMAC_CONFIG_ENABLE
);
5559 nw64_mac(BTXMAC_CONFIG
, val
);
5562 static void niu_init_tx_mac(struct niu
*np
)
5567 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5572 /* The XMAC_MIN register only accepts values for TX min which
5573 * have the low 3 bits cleared.
5577 if (np
->flags
& NIU_FLAGS_XMAC
)
5578 niu_init_tx_xmac(np
, min
, max
);
5580 niu_init_tx_bmac(np
, min
, max
);
5583 static int niu_reset_rx_xmac(struct niu
*np
)
5587 nw64_mac(XRXMAC_SW_RST
,
5588 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5590 while (--limit
>= 0) {
5591 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5592 XRXMAC_SW_RST_SOFT_RST
)))
5597 dev_err(np
->device
, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5599 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5606 static int niu_reset_rx_bmac(struct niu
*np
)
5610 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5612 while (--limit
>= 0) {
5613 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5618 dev_err(np
->device
, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5620 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5627 static int niu_reset_rx_mac(struct niu
*np
)
5629 if (np
->flags
& NIU_FLAGS_XMAC
)
5630 return niu_reset_rx_xmac(np
);
5632 return niu_reset_rx_bmac(np
);
5635 static void niu_init_rx_xmac(struct niu
*np
)
5637 struct niu_parent
*parent
= np
->parent
;
5638 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5639 int first_rdc_table
= tp
->first_table_num
;
5643 nw64_mac(XMAC_ADD_FILT0
, 0);
5644 nw64_mac(XMAC_ADD_FILT1
, 0);
5645 nw64_mac(XMAC_ADD_FILT2
, 0);
5646 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5647 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5648 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5649 nw64_mac(XMAC_HASH_TBL(i
), 0);
5650 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5651 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5652 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5654 val
= nr64_mac(XMAC_CONFIG
);
5655 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5656 XMAC_CONFIG_PROMISCUOUS
|
5657 XMAC_CONFIG_PROMISC_GROUP
|
5658 XMAC_CONFIG_ERR_CHK_DIS
|
5659 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5660 XMAC_CONFIG_RESERVED_MULTICAST
|
5661 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5662 XMAC_CONFIG_ADDR_FILTER_EN
|
5663 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5664 XMAC_CONFIG_STRIP_CRC
|
5665 XMAC_CONFIG_PASS_FLOW_CTRL
|
5666 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5667 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5668 nw64_mac(XMAC_CONFIG
, val
);
5670 nw64_mac(RXMAC_BT_CNT
, 0);
5671 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5672 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5673 nw64_mac(RXMAC_FRAG_CNT
, 0);
5674 nw64_mac(RXMAC_HIST_CNT1
, 0);
5675 nw64_mac(RXMAC_HIST_CNT2
, 0);
5676 nw64_mac(RXMAC_HIST_CNT3
, 0);
5677 nw64_mac(RXMAC_HIST_CNT4
, 0);
5678 nw64_mac(RXMAC_HIST_CNT5
, 0);
5679 nw64_mac(RXMAC_HIST_CNT6
, 0);
5680 nw64_mac(RXMAC_HIST_CNT7
, 0);
5681 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5682 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5683 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5684 nw64_mac(LINK_FAULT_CNT
, 0);
5687 static void niu_init_rx_bmac(struct niu
*np
)
5689 struct niu_parent
*parent
= np
->parent
;
5690 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5691 int first_rdc_table
= tp
->first_table_num
;
5695 nw64_mac(BMAC_ADD_FILT0
, 0);
5696 nw64_mac(BMAC_ADD_FILT1
, 0);
5697 nw64_mac(BMAC_ADD_FILT2
, 0);
5698 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5699 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5700 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5701 nw64_mac(BMAC_HASH_TBL(i
), 0);
5702 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5703 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5704 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5706 val
= nr64_mac(BRXMAC_CONFIG
);
5707 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5708 BRXMAC_CONFIG_STRIP_PAD
|
5709 BRXMAC_CONFIG_STRIP_FCS
|
5710 BRXMAC_CONFIG_PROMISC
|
5711 BRXMAC_CONFIG_PROMISC_GRP
|
5712 BRXMAC_CONFIG_ADDR_FILT_EN
|
5713 BRXMAC_CONFIG_DISCARD_DIS
);
5714 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5715 nw64_mac(BRXMAC_CONFIG
, val
);
5717 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5718 val
|= BMAC_ADDR_CMPEN_EN0
;
5719 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5722 static void niu_init_rx_mac(struct niu
*np
)
5724 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5726 if (np
->flags
& NIU_FLAGS_XMAC
)
5727 niu_init_rx_xmac(np
);
5729 niu_init_rx_bmac(np
);
5732 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5734 u64 val
= nr64_mac(XMAC_CONFIG
);
5737 val
|= XMAC_CONFIG_TX_ENABLE
;
5739 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5740 nw64_mac(XMAC_CONFIG
, val
);
5743 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5745 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5748 val
|= BTXMAC_CONFIG_ENABLE
;
5750 val
&= ~BTXMAC_CONFIG_ENABLE
;
5751 nw64_mac(BTXMAC_CONFIG
, val
);
5754 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5756 if (np
->flags
& NIU_FLAGS_XMAC
)
5757 niu_enable_tx_xmac(np
, on
);
5759 niu_enable_tx_bmac(np
, on
);
5762 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5764 u64 val
= nr64_mac(XMAC_CONFIG
);
5766 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5767 XMAC_CONFIG_PROMISCUOUS
);
5769 if (np
->flags
& NIU_FLAGS_MCAST
)
5770 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5771 if (np
->flags
& NIU_FLAGS_PROMISC
)
5772 val
|= XMAC_CONFIG_PROMISCUOUS
;
5775 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5777 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5778 nw64_mac(XMAC_CONFIG
, val
);
5781 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5783 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5785 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5786 BRXMAC_CONFIG_PROMISC
);
5788 if (np
->flags
& NIU_FLAGS_MCAST
)
5789 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5790 if (np
->flags
& NIU_FLAGS_PROMISC
)
5791 val
|= BRXMAC_CONFIG_PROMISC
;
5794 val
|= BRXMAC_CONFIG_ENABLE
;
5796 val
&= ~BRXMAC_CONFIG_ENABLE
;
5797 nw64_mac(BRXMAC_CONFIG
, val
);
5800 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5802 if (np
->flags
& NIU_FLAGS_XMAC
)
5803 niu_enable_rx_xmac(np
, on
);
5805 niu_enable_rx_bmac(np
, on
);
5808 static int niu_init_mac(struct niu
*np
)
5813 err
= niu_init_pcs(np
);
5817 err
= niu_reset_tx_mac(np
);
5820 niu_init_tx_mac(np
);
5821 err
= niu_reset_rx_mac(np
);
5824 niu_init_rx_mac(np
);
5826 /* This looks hookey but the RX MAC reset we just did will
5827 * undo some of the state we setup in niu_init_tx_mac() so we
5828 * have to call it again. In particular, the RX MAC reset will
5829 * set the XMAC_MAX register back to it's default value.
5831 niu_init_tx_mac(np
);
5832 niu_enable_tx_mac(np
, 1);
5834 niu_enable_rx_mac(np
, 1);
5839 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5841 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5844 static void niu_stop_tx_channels(struct niu
*np
)
5848 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5849 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5851 niu_stop_one_tx_channel(np
, rp
);
5855 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5857 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5860 static void niu_reset_tx_channels(struct niu
*np
)
5864 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5865 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5867 niu_reset_one_tx_channel(np
, rp
);
5871 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5873 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5876 static void niu_stop_rx_channels(struct niu
*np
)
5880 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5881 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5883 niu_stop_one_rx_channel(np
, rp
);
5887 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5889 int channel
= rp
->rx_channel
;
5891 (void) niu_rx_channel_reset(np
, channel
);
5892 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5893 nw64(RX_DMA_CTL_STAT(channel
), 0);
5894 (void) niu_enable_rx_channel(np
, channel
, 0);
5897 static void niu_reset_rx_channels(struct niu
*np
)
5901 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5902 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5904 niu_reset_one_rx_channel(np
, rp
);
5908 static void niu_disable_ipp(struct niu
*np
)
5913 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5914 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5916 while (--limit
>= 0 && (rd
!= wr
)) {
5917 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5918 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5921 (rd
!= 0 && wr
!= 1)) {
5922 netdev_err(np
->dev
, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5923 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR
),
5924 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR
));
5927 val
= nr64_ipp(IPP_CFIG
);
5928 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5929 IPP_CFIG_DFIFO_ECC_EN
|
5930 IPP_CFIG_DROP_BAD_CRC
|
5932 nw64_ipp(IPP_CFIG
, val
);
5934 (void) niu_ipp_reset(np
);
5937 static int niu_init_hw(struct niu
*np
)
5941 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize TXC\n");
5942 niu_txc_enable_port(np
, 1);
5943 niu_txc_port_dma_enable(np
, 1);
5944 niu_txc_set_imask(np
, 0);
5946 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize TX channels\n");
5947 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5948 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5950 err
= niu_init_one_tx_channel(np
, rp
);
5955 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize RX channels\n");
5956 err
= niu_init_rx_channels(np
);
5958 goto out_uninit_tx_channels
;
5960 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize classifier\n");
5961 err
= niu_init_classifier_hw(np
);
5963 goto out_uninit_rx_channels
;
5965 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize ZCP\n");
5966 err
= niu_init_zcp(np
);
5968 goto out_uninit_rx_channels
;
5970 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize IPP\n");
5971 err
= niu_init_ipp(np
);
5973 goto out_uninit_rx_channels
;
5975 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize MAC\n");
5976 err
= niu_init_mac(np
);
5978 goto out_uninit_ipp
;
5983 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit IPP\n");
5984 niu_disable_ipp(np
);
5986 out_uninit_rx_channels
:
5987 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit RX channels\n");
5988 niu_stop_rx_channels(np
);
5989 niu_reset_rx_channels(np
);
5991 out_uninit_tx_channels
:
5992 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit TX channels\n");
5993 niu_stop_tx_channels(np
);
5994 niu_reset_tx_channels(np
);
5999 static void niu_stop_hw(struct niu
*np
)
6001 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable interrupts\n");
6002 niu_enable_interrupts(np
, 0);
6004 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable RX MAC\n");
6005 niu_enable_rx_mac(np
, 0);
6007 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable IPP\n");
6008 niu_disable_ipp(np
);
6010 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Stop TX channels\n");
6011 niu_stop_tx_channels(np
);
6013 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Stop RX channels\n");
6014 niu_stop_rx_channels(np
);
6016 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Reset TX channels\n");
6017 niu_reset_tx_channels(np
);
6019 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Reset RX channels\n");
6020 niu_reset_rx_channels(np
);
6023 static void niu_set_irq_name(struct niu
*np
)
6025 int port
= np
->port
;
6028 sprintf(np
->irq_name
[0], "%s:MAC", np
->dev
->name
);
6031 sprintf(np
->irq_name
[1], "%s:MIF", np
->dev
->name
);
6032 sprintf(np
->irq_name
[2], "%s:SYSERR", np
->dev
->name
);
6036 for (i
= 0; i
< np
->num_ldg
- j
; i
++) {
6037 if (i
< np
->num_rx_rings
)
6038 sprintf(np
->irq_name
[i
+j
], "%s-rx-%d",
6040 else if (i
< np
->num_tx_rings
+ np
->num_rx_rings
)
6041 sprintf(np
->irq_name
[i
+j
], "%s-tx-%d", np
->dev
->name
,
6042 i
- np
->num_rx_rings
);
6046 static int niu_request_irq(struct niu
*np
)
6050 niu_set_irq_name(np
);
6053 for (i
= 0; i
< np
->num_ldg
; i
++) {
6054 struct niu_ldg
*lp
= &np
->ldg
[i
];
6056 err
= request_irq(lp
->irq
, niu_interrupt
, IRQF_SHARED
,
6057 np
->irq_name
[i
], lp
);
6066 for (j
= 0; j
< i
; j
++) {
6067 struct niu_ldg
*lp
= &np
->ldg
[j
];
6069 free_irq(lp
->irq
, lp
);
6074 static void niu_free_irq(struct niu
*np
)
6078 for (i
= 0; i
< np
->num_ldg
; i
++) {
6079 struct niu_ldg
*lp
= &np
->ldg
[i
];
6081 free_irq(lp
->irq
, lp
);
6085 static void niu_enable_napi(struct niu
*np
)
6089 for (i
= 0; i
< np
->num_ldg
; i
++)
6090 napi_enable(&np
->ldg
[i
].napi
);
6093 static void niu_disable_napi(struct niu
*np
)
6097 for (i
= 0; i
< np
->num_ldg
; i
++)
6098 napi_disable(&np
->ldg
[i
].napi
);
6101 static int niu_open(struct net_device
*dev
)
6103 struct niu
*np
= netdev_priv(dev
);
6106 netif_carrier_off(dev
);
6108 err
= niu_alloc_channels(np
);
6112 err
= niu_enable_interrupts(np
, 0);
6114 goto out_free_channels
;
6116 err
= niu_request_irq(np
);
6118 goto out_free_channels
;
6120 niu_enable_napi(np
);
6122 spin_lock_irq(&np
->lock
);
6124 err
= niu_init_hw(np
);
6126 timer_setup(&np
->timer
, niu_timer
, 0);
6127 np
->timer
.expires
= jiffies
+ HZ
;
6129 err
= niu_enable_interrupts(np
, 1);
6134 spin_unlock_irq(&np
->lock
);
6137 niu_disable_napi(np
);
6141 netif_tx_start_all_queues(dev
);
6143 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6144 netif_carrier_on(dev
);
6146 add_timer(&np
->timer
);
6154 niu_free_channels(np
);
6160 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
6162 cancel_work_sync(&np
->reset_task
);
6164 niu_disable_napi(np
);
6165 netif_tx_stop_all_queues(dev
);
6167 del_timer_sync(&np
->timer
);
6169 spin_lock_irq(&np
->lock
);
6173 spin_unlock_irq(&np
->lock
);
6176 static int niu_close(struct net_device
*dev
)
6178 struct niu
*np
= netdev_priv(dev
);
6180 niu_full_shutdown(np
, dev
);
6184 niu_free_channels(np
);
6186 niu_handle_led(np
, 0);
6191 static void niu_sync_xmac_stats(struct niu
*np
)
6193 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
6195 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
6196 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
6198 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
6199 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
6200 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
6201 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
6202 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
6203 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
6204 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
6205 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
6206 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
6207 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
6208 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
6209 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
6210 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
6211 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
6212 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
6213 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
6216 static void niu_sync_bmac_stats(struct niu
*np
)
6218 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
6220 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
6221 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
6223 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
6224 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6225 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6226 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
6229 static void niu_sync_mac_stats(struct niu
*np
)
6231 if (np
->flags
& NIU_FLAGS_XMAC
)
6232 niu_sync_xmac_stats(np
);
6234 niu_sync_bmac_stats(np
);
6237 static void niu_get_rx_stats(struct niu
*np
,
6238 struct rtnl_link_stats64
*stats
)
6240 u64 pkts
, dropped
, errors
, bytes
;
6241 struct rx_ring_info
*rx_rings
;
6244 pkts
= dropped
= errors
= bytes
= 0;
6246 rx_rings
= READ_ONCE(np
->rx_rings
);
6250 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6251 struct rx_ring_info
*rp
= &rx_rings
[i
];
6253 niu_sync_rx_discard_stats(np
, rp
, 0);
6255 pkts
+= rp
->rx_packets
;
6256 bytes
+= rp
->rx_bytes
;
6257 dropped
+= rp
->rx_dropped
;
6258 errors
+= rp
->rx_errors
;
6262 stats
->rx_packets
= pkts
;
6263 stats
->rx_bytes
= bytes
;
6264 stats
->rx_dropped
= dropped
;
6265 stats
->rx_errors
= errors
;
6268 static void niu_get_tx_stats(struct niu
*np
,
6269 struct rtnl_link_stats64
*stats
)
6271 u64 pkts
, errors
, bytes
;
6272 struct tx_ring_info
*tx_rings
;
6275 pkts
= errors
= bytes
= 0;
6277 tx_rings
= READ_ONCE(np
->tx_rings
);
6281 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6282 struct tx_ring_info
*rp
= &tx_rings
[i
];
6284 pkts
+= rp
->tx_packets
;
6285 bytes
+= rp
->tx_bytes
;
6286 errors
+= rp
->tx_errors
;
6290 stats
->tx_packets
= pkts
;
6291 stats
->tx_bytes
= bytes
;
6292 stats
->tx_errors
= errors
;
6295 static void niu_get_stats(struct net_device
*dev
,
6296 struct rtnl_link_stats64
*stats
)
6298 struct niu
*np
= netdev_priv(dev
);
6300 if (netif_running(dev
)) {
6301 niu_get_rx_stats(np
, stats
);
6302 niu_get_tx_stats(np
, stats
);
6306 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
6310 for (i
= 0; i
< 16; i
++)
6311 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
6314 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
6318 for (i
= 0; i
< 16; i
++)
6319 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
6322 static void niu_load_hash(struct niu
*np
, u16
*hash
)
6324 if (np
->flags
& NIU_FLAGS_XMAC
)
6325 niu_load_hash_xmac(np
, hash
);
6327 niu_load_hash_bmac(np
, hash
);
6330 static void niu_set_rx_mode(struct net_device
*dev
)
6332 struct niu
*np
= netdev_priv(dev
);
6333 int i
, alt_cnt
, err
;
6334 struct netdev_hw_addr
*ha
;
6335 unsigned long flags
;
6336 u16 hash
[16] = { 0, };
6338 spin_lock_irqsave(&np
->lock
, flags
);
6339 niu_enable_rx_mac(np
, 0);
6341 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
6342 if (dev
->flags
& IFF_PROMISC
)
6343 np
->flags
|= NIU_FLAGS_PROMISC
;
6344 if ((dev
->flags
& IFF_ALLMULTI
) || (!netdev_mc_empty(dev
)))
6345 np
->flags
|= NIU_FLAGS_MCAST
;
6347 alt_cnt
= netdev_uc_count(dev
);
6348 if (alt_cnt
> niu_num_alt_addr(np
)) {
6350 np
->flags
|= NIU_FLAGS_PROMISC
;
6356 netdev_for_each_uc_addr(ha
, dev
) {
6357 err
= niu_set_alt_mac(np
, index
, ha
->addr
);
6359 netdev_warn(dev
, "Error %d adding alt mac %d\n",
6361 err
= niu_enable_alt_mac(np
, index
, 1);
6363 netdev_warn(dev
, "Error %d enabling alt mac %d\n",
6370 if (np
->flags
& NIU_FLAGS_XMAC
)
6374 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
6375 err
= niu_enable_alt_mac(np
, i
, 0);
6377 netdev_warn(dev
, "Error %d disabling alt mac %d\n",
6381 if (dev
->flags
& IFF_ALLMULTI
) {
6382 for (i
= 0; i
< 16; i
++)
6384 } else if (!netdev_mc_empty(dev
)) {
6385 netdev_for_each_mc_addr(ha
, dev
) {
6386 u32 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
6389 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
6393 if (np
->flags
& NIU_FLAGS_MCAST
)
6394 niu_load_hash(np
, hash
);
6396 niu_enable_rx_mac(np
, 1);
6397 spin_unlock_irqrestore(&np
->lock
, flags
);
6400 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
6402 struct niu
*np
= netdev_priv(dev
);
6403 struct sockaddr
*addr
= p
;
6404 unsigned long flags
;
6406 if (!is_valid_ether_addr(addr
->sa_data
))
6407 return -EADDRNOTAVAIL
;
6409 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
6411 if (!netif_running(dev
))
6414 spin_lock_irqsave(&np
->lock
, flags
);
6415 niu_enable_rx_mac(np
, 0);
6416 niu_set_primary_mac(np
, dev
->dev_addr
);
6417 niu_enable_rx_mac(np
, 1);
6418 spin_unlock_irqrestore(&np
->lock
, flags
);
6423 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6428 static void niu_netif_stop(struct niu
*np
)
6430 netif_trans_update(np
->dev
); /* prevent tx timeout */
6432 niu_disable_napi(np
);
6434 netif_tx_disable(np
->dev
);
6437 static void niu_netif_start(struct niu
*np
)
6439 /* NOTE: unconditional netif_wake_queue is only appropriate
6440 * so long as all callers are assured to have free tx slots
6441 * (such as after niu_init_hw).
6443 netif_tx_wake_all_queues(np
->dev
);
6445 niu_enable_napi(np
);
6447 niu_enable_interrupts(np
, 1);
6450 static void niu_reset_buffers(struct niu
*np
)
6455 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6456 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6458 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
6461 page
= rp
->rxhash
[j
];
6464 (struct page
*) page
->mapping
;
6465 u64 base
= page
->index
;
6466 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6467 rp
->rbr
[k
++] = cpu_to_le32(base
);
6471 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6472 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6477 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6479 rp
->rbr_pending
= 0;
6480 rp
->rbr_refill_pending
= 0;
6484 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6485 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6487 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6488 if (rp
->tx_buffs
[j
].skb
)
6489 (void) release_tx_packet(np
, rp
, j
);
6492 rp
->pending
= MAX_TX_RING_SIZE
;
6500 static void niu_reset_task(struct work_struct
*work
)
6502 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6503 unsigned long flags
;
6506 spin_lock_irqsave(&np
->lock
, flags
);
6507 if (!netif_running(np
->dev
)) {
6508 spin_unlock_irqrestore(&np
->lock
, flags
);
6512 spin_unlock_irqrestore(&np
->lock
, flags
);
6514 del_timer_sync(&np
->timer
);
6518 spin_lock_irqsave(&np
->lock
, flags
);
6522 spin_unlock_irqrestore(&np
->lock
, flags
);
6524 niu_reset_buffers(np
);
6526 spin_lock_irqsave(&np
->lock
, flags
);
6528 err
= niu_init_hw(np
);
6530 np
->timer
.expires
= jiffies
+ HZ
;
6531 add_timer(&np
->timer
);
6532 niu_netif_start(np
);
6535 spin_unlock_irqrestore(&np
->lock
, flags
);
6538 static void niu_tx_timeout(struct net_device
*dev
)
6540 struct niu
*np
= netdev_priv(dev
);
6542 dev_err(np
->device
, "%s: Transmit timed out, resetting\n",
6545 schedule_work(&np
->reset_task
);
6548 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6549 u64 mapping
, u64 len
, u64 mark
,
6552 __le64
*desc
= &rp
->descr
[index
];
6554 *desc
= cpu_to_le64(mark
|
6555 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6556 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6557 (mapping
& TX_DESC_SAD
));
6560 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6561 u64 pad_bytes
, u64 len
)
6563 u16 eth_proto
, eth_proto_inner
;
6564 u64 csum_bits
, l3off
, ihl
, ret
;
6568 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6569 eth_proto_inner
= eth_proto
;
6570 if (eth_proto
== ETH_P_8021Q
) {
6571 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6572 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6574 eth_proto_inner
= be16_to_cpu(val
);
6578 switch (skb
->protocol
) {
6579 case cpu_to_be16(ETH_P_IP
):
6580 ip_proto
= ip_hdr(skb
)->protocol
;
6581 ihl
= ip_hdr(skb
)->ihl
;
6583 case cpu_to_be16(ETH_P_IPV6
):
6584 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6593 csum_bits
= TXHDR_CSUM_NONE
;
6594 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6597 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6599 (ip_proto
== IPPROTO_UDP
?
6600 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6602 start
= skb_checksum_start_offset(skb
) -
6603 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6604 stuff
= start
+ skb
->csum_offset
;
6606 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6607 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6610 l3off
= skb_network_offset(skb
) -
6611 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6613 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6614 (len
<< TXHDR_LEN_SHIFT
) |
6615 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6616 (ihl
<< TXHDR_IHL_SHIFT
) |
6617 ((eth_proto_inner
< ETH_P_802_3_MIN
) ? TXHDR_LLC
: 0) |
6618 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6619 (ipv6
? TXHDR_IP_VER
: 0) |
6625 static netdev_tx_t
niu_start_xmit(struct sk_buff
*skb
,
6626 struct net_device
*dev
)
6628 struct niu
*np
= netdev_priv(dev
);
6629 unsigned long align
, headroom
;
6630 struct netdev_queue
*txq
;
6631 struct tx_ring_info
*rp
;
6632 struct tx_pkt_hdr
*tp
;
6633 unsigned int len
, nfg
;
6634 struct ethhdr
*ehdr
;
6638 i
= skb_get_queue_mapping(skb
);
6639 rp
= &np
->tx_rings
[i
];
6640 txq
= netdev_get_tx_queue(dev
, i
);
6642 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6643 netif_tx_stop_queue(txq
);
6644 dev_err(np
->device
, "%s: BUG! Tx ring full when queue awake!\n", dev
->name
);
6646 return NETDEV_TX_BUSY
;
6649 if (eth_skb_pad(skb
))
6652 len
= sizeof(struct tx_pkt_hdr
) + 15;
6653 if (skb_headroom(skb
) < len
) {
6654 struct sk_buff
*skb_new
;
6656 skb_new
= skb_realloc_headroom(skb
, len
);
6664 align
= ((unsigned long) skb
->data
& (16 - 1));
6665 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6667 ehdr
= (struct ethhdr
*) skb
->data
;
6668 tp
= skb_push(skb
, headroom
);
6670 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6671 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6674 len
= skb_headlen(skb
);
6675 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6676 len
, DMA_TO_DEVICE
);
6680 rp
->tx_buffs
[prod
].skb
= skb
;
6681 rp
->tx_buffs
[prod
].mapping
= mapping
;
6684 if (++rp
->mark_counter
== rp
->mark_freq
) {
6685 rp
->mark_counter
= 0;
6686 mrk
|= TX_DESC_MARK
;
6691 nfg
= skb_shinfo(skb
)->nr_frags
;
6693 tlen
-= MAX_TX_DESC_LEN
;
6698 unsigned int this_len
= len
;
6700 if (this_len
> MAX_TX_DESC_LEN
)
6701 this_len
= MAX_TX_DESC_LEN
;
6703 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6706 prod
= NEXT_TX(rp
, prod
);
6707 mapping
+= this_len
;
6711 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6712 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6714 len
= skb_frag_size(frag
);
6715 mapping
= np
->ops
->map_page(np
->device
, skb_frag_page(frag
),
6716 frag
->page_offset
, len
,
6719 rp
->tx_buffs
[prod
].skb
= NULL
;
6720 rp
->tx_buffs
[prod
].mapping
= mapping
;
6722 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6724 prod
= NEXT_TX(rp
, prod
);
6727 if (prod
< rp
->prod
)
6728 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6731 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6733 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6734 netif_tx_stop_queue(txq
);
6735 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6736 netif_tx_wake_queue(txq
);
6740 return NETDEV_TX_OK
;
6748 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6750 struct niu
*np
= netdev_priv(dev
);
6751 int err
, orig_jumbo
, new_jumbo
;
6753 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6754 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6758 if (!netif_running(dev
) ||
6759 (orig_jumbo
== new_jumbo
))
6762 niu_full_shutdown(np
, dev
);
6764 niu_free_channels(np
);
6766 niu_enable_napi(np
);
6768 err
= niu_alloc_channels(np
);
6772 spin_lock_irq(&np
->lock
);
6774 err
= niu_init_hw(np
);
6776 timer_setup(&np
->timer
, niu_timer
, 0);
6777 np
->timer
.expires
= jiffies
+ HZ
;
6779 err
= niu_enable_interrupts(np
, 1);
6784 spin_unlock_irq(&np
->lock
);
6787 netif_tx_start_all_queues(dev
);
6788 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6789 netif_carrier_on(dev
);
6791 add_timer(&np
->timer
);
6797 static void niu_get_drvinfo(struct net_device
*dev
,
6798 struct ethtool_drvinfo
*info
)
6800 struct niu
*np
= netdev_priv(dev
);
6801 struct niu_vpd
*vpd
= &np
->vpd
;
6803 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
6804 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
6805 snprintf(info
->fw_version
, sizeof(info
->fw_version
), "%d.%d",
6806 vpd
->fcode_major
, vpd
->fcode_minor
);
6807 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6808 strlcpy(info
->bus_info
, pci_name(np
->pdev
),
6809 sizeof(info
->bus_info
));
6812 static int niu_get_link_ksettings(struct net_device
*dev
,
6813 struct ethtool_link_ksettings
*cmd
)
6815 struct niu
*np
= netdev_priv(dev
);
6816 struct niu_link_config
*lp
;
6818 lp
= &np
->link_config
;
6820 memset(cmd
, 0, sizeof(*cmd
));
6821 cmd
->base
.phy_address
= np
->phy_addr
;
6822 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
6824 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
6825 lp
->active_advertising
);
6826 cmd
->base
.autoneg
= lp
->active_autoneg
;
6827 cmd
->base
.speed
= lp
->active_speed
;
6828 cmd
->base
.duplex
= lp
->active_duplex
;
6829 cmd
->base
.port
= (np
->flags
& NIU_FLAGS_FIBER
) ? PORT_FIBRE
: PORT_TP
;
6834 static int niu_set_link_ksettings(struct net_device
*dev
,
6835 const struct ethtool_link_ksettings
*cmd
)
6837 struct niu
*np
= netdev_priv(dev
);
6838 struct niu_link_config
*lp
= &np
->link_config
;
6840 ethtool_convert_link_mode_to_legacy_u32(&lp
->advertising
,
6841 cmd
->link_modes
.advertising
);
6842 lp
->speed
= cmd
->base
.speed
;
6843 lp
->duplex
= cmd
->base
.duplex
;
6844 lp
->autoneg
= cmd
->base
.autoneg
;
6845 return niu_init_link(np
);
6848 static u32
niu_get_msglevel(struct net_device
*dev
)
6850 struct niu
*np
= netdev_priv(dev
);
6851 return np
->msg_enable
;
6854 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6856 struct niu
*np
= netdev_priv(dev
);
6857 np
->msg_enable
= value
;
6860 static int niu_nway_reset(struct net_device
*dev
)
6862 struct niu
*np
= netdev_priv(dev
);
6864 if (np
->link_config
.autoneg
)
6865 return niu_init_link(np
);
6870 static int niu_get_eeprom_len(struct net_device
*dev
)
6872 struct niu
*np
= netdev_priv(dev
);
6874 return np
->eeprom_len
;
6877 static int niu_get_eeprom(struct net_device
*dev
,
6878 struct ethtool_eeprom
*eeprom
, u8
*data
)
6880 struct niu
*np
= netdev_priv(dev
);
6881 u32 offset
, len
, val
;
6883 offset
= eeprom
->offset
;
6886 if (offset
+ len
< offset
)
6888 if (offset
>= np
->eeprom_len
)
6890 if (offset
+ len
> np
->eeprom_len
)
6891 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6894 u32 b_offset
, b_count
;
6896 b_offset
= offset
& 3;
6897 b_count
= 4 - b_offset
;
6901 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6902 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6908 val
= nr64(ESPC_NCR(offset
/ 4));
6909 memcpy(data
, &val
, 4);
6915 val
= nr64(ESPC_NCR(offset
/ 4));
6916 memcpy(data
, &val
, len
);
6921 static void niu_ethflow_to_l3proto(int flow_type
, u8
*pid
)
6923 switch (flow_type
) {
6934 *pid
= IPPROTO_SCTP
;
6950 static int niu_class_to_ethflow(u64
class, int *flow_type
)
6953 case CLASS_CODE_TCP_IPV4
:
6954 *flow_type
= TCP_V4_FLOW
;
6956 case CLASS_CODE_UDP_IPV4
:
6957 *flow_type
= UDP_V4_FLOW
;
6959 case CLASS_CODE_AH_ESP_IPV4
:
6960 *flow_type
= AH_V4_FLOW
;
6962 case CLASS_CODE_SCTP_IPV4
:
6963 *flow_type
= SCTP_V4_FLOW
;
6965 case CLASS_CODE_TCP_IPV6
:
6966 *flow_type
= TCP_V6_FLOW
;
6968 case CLASS_CODE_UDP_IPV6
:
6969 *flow_type
= UDP_V6_FLOW
;
6971 case CLASS_CODE_AH_ESP_IPV6
:
6972 *flow_type
= AH_V6_FLOW
;
6974 case CLASS_CODE_SCTP_IPV6
:
6975 *flow_type
= SCTP_V6_FLOW
;
6977 case CLASS_CODE_USER_PROG1
:
6978 case CLASS_CODE_USER_PROG2
:
6979 case CLASS_CODE_USER_PROG3
:
6980 case CLASS_CODE_USER_PROG4
:
6981 *flow_type
= IP_USER_FLOW
;
6990 static int niu_ethflow_to_class(int flow_type
, u64
*class)
6992 switch (flow_type
) {
6994 *class = CLASS_CODE_TCP_IPV4
;
6997 *class = CLASS_CODE_UDP_IPV4
;
6999 case AH_ESP_V4_FLOW
:
7002 *class = CLASS_CODE_AH_ESP_IPV4
;
7005 *class = CLASS_CODE_SCTP_IPV4
;
7008 *class = CLASS_CODE_TCP_IPV6
;
7011 *class = CLASS_CODE_UDP_IPV6
;
7013 case AH_ESP_V6_FLOW
:
7016 *class = CLASS_CODE_AH_ESP_IPV6
;
7019 *class = CLASS_CODE_SCTP_IPV6
;
7028 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
7032 if (flow_key
& FLOW_KEY_L2DA
)
7033 ethflow
|= RXH_L2DA
;
7034 if (flow_key
& FLOW_KEY_VLAN
)
7035 ethflow
|= RXH_VLAN
;
7036 if (flow_key
& FLOW_KEY_IPSA
)
7037 ethflow
|= RXH_IP_SRC
;
7038 if (flow_key
& FLOW_KEY_IPDA
)
7039 ethflow
|= RXH_IP_DST
;
7040 if (flow_key
& FLOW_KEY_PROTO
)
7041 ethflow
|= RXH_L3_PROTO
;
7042 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
7043 ethflow
|= RXH_L4_B_0_1
;
7044 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
7045 ethflow
|= RXH_L4_B_2_3
;
7051 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
7055 if (ethflow
& RXH_L2DA
)
7056 key
|= FLOW_KEY_L2DA
;
7057 if (ethflow
& RXH_VLAN
)
7058 key
|= FLOW_KEY_VLAN
;
7059 if (ethflow
& RXH_IP_SRC
)
7060 key
|= FLOW_KEY_IPSA
;
7061 if (ethflow
& RXH_IP_DST
)
7062 key
|= FLOW_KEY_IPDA
;
7063 if (ethflow
& RXH_L3_PROTO
)
7064 key
|= FLOW_KEY_PROTO
;
7065 if (ethflow
& RXH_L4_B_0_1
)
7066 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
7067 if (ethflow
& RXH_L4_B_2_3
)
7068 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
7076 static int niu_get_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7082 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7085 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7087 nfc
->data
= RXH_DISCARD
;
7089 nfc
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
7090 CLASS_CODE_USER_PROG1
]);
7094 static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry
*tp
,
7095 struct ethtool_rx_flow_spec
*fsp
)
7100 tmp
= (tp
->key
[3] & TCAM_V4KEY3_SADDR
) >> TCAM_V4KEY3_SADDR_SHIFT
;
7101 fsp
->h_u
.tcp_ip4_spec
.ip4src
= cpu_to_be32(tmp
);
7103 tmp
= (tp
->key
[3] & TCAM_V4KEY3_DADDR
) >> TCAM_V4KEY3_DADDR_SHIFT
;
7104 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= cpu_to_be32(tmp
);
7106 tmp
= (tp
->key_mask
[3] & TCAM_V4KEY3_SADDR
) >> TCAM_V4KEY3_SADDR_SHIFT
;
7107 fsp
->m_u
.tcp_ip4_spec
.ip4src
= cpu_to_be32(tmp
);
7109 tmp
= (tp
->key_mask
[3] & TCAM_V4KEY3_DADDR
) >> TCAM_V4KEY3_DADDR_SHIFT
;
7110 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= cpu_to_be32(tmp
);
7112 fsp
->h_u
.tcp_ip4_spec
.tos
= (tp
->key
[2] & TCAM_V4KEY2_TOS
) >>
7113 TCAM_V4KEY2_TOS_SHIFT
;
7114 fsp
->m_u
.tcp_ip4_spec
.tos
= (tp
->key_mask
[2] & TCAM_V4KEY2_TOS
) >>
7115 TCAM_V4KEY2_TOS_SHIFT
;
7117 switch (fsp
->flow_type
) {
7121 prt
= ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7122 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7123 fsp
->h_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(prt
);
7125 prt
= ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7126 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7127 fsp
->h_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(prt
);
7129 prt
= ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7130 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7131 fsp
->m_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(prt
);
7133 prt
= ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7134 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7135 fsp
->m_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(prt
);
7139 tmp
= (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7140 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7141 fsp
->h_u
.ah_ip4_spec
.spi
= cpu_to_be32(tmp
);
7143 tmp
= (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7144 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7145 fsp
->m_u
.ah_ip4_spec
.spi
= cpu_to_be32(tmp
);
7148 tmp
= (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7149 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7150 fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
= cpu_to_be32(tmp
);
7152 tmp
= (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7153 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7154 fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
= cpu_to_be32(tmp
);
7156 fsp
->h_u
.usr_ip4_spec
.proto
=
7157 (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7158 TCAM_V4KEY2_PROTO_SHIFT
;
7159 fsp
->m_u
.usr_ip4_spec
.proto
=
7160 (tp
->key_mask
[2] & TCAM_V4KEY2_PROTO
) >>
7161 TCAM_V4KEY2_PROTO_SHIFT
;
7163 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
7170 static int niu_get_ethtool_tcam_entry(struct niu
*np
,
7171 struct ethtool_rxnfc
*nfc
)
7173 struct niu_parent
*parent
= np
->parent
;
7174 struct niu_tcam_entry
*tp
;
7175 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7180 idx
= tcam_get_index(np
, (u16
)nfc
->fs
.location
);
7182 tp
= &parent
->tcam
[idx
];
7184 netdev_info(np
->dev
, "niu%d: entry [%d] invalid for idx[%d]\n",
7185 parent
->index
, (u16
)nfc
->fs
.location
, idx
);
7189 /* fill the flow spec entry */
7190 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7191 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7192 ret
= niu_class_to_ethflow(class, &fsp
->flow_type
);
7194 netdev_info(np
->dev
, "niu%d: niu_class_to_ethflow failed\n",
7199 if (fsp
->flow_type
== AH_V4_FLOW
|| fsp
->flow_type
== AH_V6_FLOW
) {
7200 u32 proto
= (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7201 TCAM_V4KEY2_PROTO_SHIFT
;
7202 if (proto
== IPPROTO_ESP
) {
7203 if (fsp
->flow_type
== AH_V4_FLOW
)
7204 fsp
->flow_type
= ESP_V4_FLOW
;
7206 fsp
->flow_type
= ESP_V6_FLOW
;
7210 switch (fsp
->flow_type
) {
7216 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7223 /* Not yet implemented */
7227 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7237 if (tp
->assoc_data
& TCAM_ASSOCDATA_DISC
)
7238 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
7240 fsp
->ring_cookie
= (tp
->assoc_data
& TCAM_ASSOCDATA_OFFSET
) >>
7241 TCAM_ASSOCDATA_OFFSET_SHIFT
;
7243 /* put the tcam size here */
7244 nfc
->data
= tcam_get_size(np
);
7249 static int niu_get_ethtool_tcam_all(struct niu
*np
,
7250 struct ethtool_rxnfc
*nfc
,
7253 struct niu_parent
*parent
= np
->parent
;
7254 struct niu_tcam_entry
*tp
;
7256 unsigned long flags
;
7259 /* put the tcam size here */
7260 nfc
->data
= tcam_get_size(np
);
7262 niu_lock_parent(np
, flags
);
7263 for (cnt
= 0, i
= 0; i
< nfc
->data
; i
++) {
7264 idx
= tcam_get_index(np
, i
);
7265 tp
= &parent
->tcam
[idx
];
7268 if (cnt
== nfc
->rule_cnt
) {
7275 niu_unlock_parent(np
, flags
);
7277 nfc
->rule_cnt
= cnt
;
7282 static int niu_get_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
7285 struct niu
*np
= netdev_priv(dev
);
7290 ret
= niu_get_hash_opts(np
, cmd
);
7292 case ETHTOOL_GRXRINGS
:
7293 cmd
->data
= np
->num_rx_rings
;
7295 case ETHTOOL_GRXCLSRLCNT
:
7296 cmd
->rule_cnt
= tcam_get_valid_entry_cnt(np
);
7298 case ETHTOOL_GRXCLSRULE
:
7299 ret
= niu_get_ethtool_tcam_entry(np
, cmd
);
7301 case ETHTOOL_GRXCLSRLALL
:
7302 ret
= niu_get_ethtool_tcam_all(np
, cmd
, rule_locs
);
7312 static int niu_set_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7316 unsigned long flags
;
7318 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7321 if (class < CLASS_CODE_USER_PROG1
||
7322 class > CLASS_CODE_SCTP_IPV6
)
7325 if (nfc
->data
& RXH_DISCARD
) {
7326 niu_lock_parent(np
, flags
);
7327 flow_key
= np
->parent
->tcam_key
[class -
7328 CLASS_CODE_USER_PROG1
];
7329 flow_key
|= TCAM_KEY_DISC
;
7330 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7331 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7332 niu_unlock_parent(np
, flags
);
7335 /* Discard was set before, but is not set now */
7336 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7338 niu_lock_parent(np
, flags
);
7339 flow_key
= np
->parent
->tcam_key
[class -
7340 CLASS_CODE_USER_PROG1
];
7341 flow_key
&= ~TCAM_KEY_DISC
;
7342 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
7344 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
7346 niu_unlock_parent(np
, flags
);
7350 if (!niu_ethflow_to_flowkey(nfc
->data
, &flow_key
))
7353 niu_lock_parent(np
, flags
);
7354 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7355 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7356 niu_unlock_parent(np
, flags
);
7361 static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec
*fsp
,
7362 struct niu_tcam_entry
*tp
,
7363 int l2_rdc_tab
, u64
class)
7366 u32 sip
, dip
, sipm
, dipm
, spi
, spim
;
7367 u16 sport
, dport
, spm
, dpm
;
7369 sip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4src
);
7370 sipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4src
);
7371 dip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
7372 dipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4dst
);
7374 tp
->key
[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7375 tp
->key_mask
[0] = TCAM_V4KEY0_CLASS_CODE
;
7376 tp
->key
[1] = (u64
)l2_rdc_tab
<< TCAM_V4KEY1_L2RDCNUM_SHIFT
;
7377 tp
->key_mask
[1] = TCAM_V4KEY1_L2RDCNUM
;
7379 tp
->key
[3] = (u64
)sip
<< TCAM_V4KEY3_SADDR_SHIFT
;
7382 tp
->key_mask
[3] = (u64
)sipm
<< TCAM_V4KEY3_SADDR_SHIFT
;
7383 tp
->key_mask
[3] |= dipm
;
7385 tp
->key
[2] |= ((u64
)fsp
->h_u
.tcp_ip4_spec
.tos
<<
7386 TCAM_V4KEY2_TOS_SHIFT
);
7387 tp
->key_mask
[2] |= ((u64
)fsp
->m_u
.tcp_ip4_spec
.tos
<<
7388 TCAM_V4KEY2_TOS_SHIFT
);
7389 switch (fsp
->flow_type
) {
7393 sport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.psrc
);
7394 spm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.psrc
);
7395 dport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.pdst
);
7396 dpm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.pdst
);
7398 tp
->key
[2] |= (((u64
)sport
<< 16) | dport
);
7399 tp
->key_mask
[2] |= (((u64
)spm
<< 16) | dpm
);
7400 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7404 spi
= be32_to_cpu(fsp
->h_u
.ah_ip4_spec
.spi
);
7405 spim
= be32_to_cpu(fsp
->m_u
.ah_ip4_spec
.spi
);
7408 tp
->key_mask
[2] |= spim
;
7409 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7412 spi
= be32_to_cpu(fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
);
7413 spim
= be32_to_cpu(fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
);
7416 tp
->key_mask
[2] |= spim
;
7417 pid
= fsp
->h_u
.usr_ip4_spec
.proto
;
7423 tp
->key
[2] |= ((u64
)pid
<< TCAM_V4KEY2_PROTO_SHIFT
);
7425 tp
->key_mask
[2] |= TCAM_V4KEY2_PROTO
;
7429 static int niu_add_ethtool_tcam_entry(struct niu
*np
,
7430 struct ethtool_rxnfc
*nfc
)
7432 struct niu_parent
*parent
= np
->parent
;
7433 struct niu_tcam_entry
*tp
;
7434 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7435 struct niu_rdc_tables
*rdc_table
= &parent
->rdc_group_cfg
[np
->port
];
7436 int l2_rdc_table
= rdc_table
->first_table_num
;
7439 unsigned long flags
;
7444 idx
= nfc
->fs
.location
;
7445 if (idx
>= tcam_get_size(np
))
7448 if (fsp
->flow_type
== IP_USER_FLOW
) {
7450 int add_usr_cls
= 0;
7451 struct ethtool_usrip4_spec
*uspec
= &fsp
->h_u
.usr_ip4_spec
;
7452 struct ethtool_usrip4_spec
*umask
= &fsp
->m_u
.usr_ip4_spec
;
7454 if (uspec
->ip_ver
!= ETH_RX_NFC_IP4
)
7457 niu_lock_parent(np
, flags
);
7459 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7460 if (parent
->l3_cls
[i
]) {
7461 if (uspec
->proto
== parent
->l3_cls_pid
[i
]) {
7462 class = parent
->l3_cls
[i
];
7463 parent
->l3_cls_refcnt
[i
]++;
7468 /* Program new user IP class */
7471 class = CLASS_CODE_USER_PROG1
;
7474 class = CLASS_CODE_USER_PROG2
;
7477 class = CLASS_CODE_USER_PROG3
;
7480 class = CLASS_CODE_USER_PROG4
;
7485 ret
= tcam_user_ip_class_set(np
, class, 0,
7492 ret
= tcam_user_ip_class_enable(np
, class, 1);
7495 parent
->l3_cls
[i
] = class;
7496 parent
->l3_cls_pid
[i
] = uspec
->proto
;
7497 parent
->l3_cls_refcnt
[i
]++;
7503 netdev_info(np
->dev
, "niu%d: %s(): Could not find/insert class for pid %d\n",
7504 parent
->index
, __func__
, uspec
->proto
);
7508 niu_unlock_parent(np
, flags
);
7510 if (!niu_ethflow_to_class(fsp
->flow_type
, &class)) {
7515 niu_lock_parent(np
, flags
);
7517 idx
= tcam_get_index(np
, idx
);
7518 tp
= &parent
->tcam
[idx
];
7520 memset(tp
, 0, sizeof(*tp
));
7522 /* fill in the tcam key and mask */
7523 switch (fsp
->flow_type
) {
7529 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
, class);
7536 /* Not yet implemented */
7537 netdev_info(np
->dev
, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7538 parent
->index
, __func__
, fsp
->flow_type
);
7542 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
, class);
7545 netdev_info(np
->dev
, "niu%d: In %s(): Unknown flow type %d\n",
7546 parent
->index
, __func__
, fsp
->flow_type
);
7551 /* fill in the assoc data */
7552 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
7553 tp
->assoc_data
= TCAM_ASSOCDATA_DISC
;
7555 if (fsp
->ring_cookie
>= np
->num_rx_rings
) {
7556 netdev_info(np
->dev
, "niu%d: In %s(): Invalid RX ring %lld\n",
7557 parent
->index
, __func__
,
7558 (long long)fsp
->ring_cookie
);
7562 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
7563 (fsp
->ring_cookie
<<
7564 TCAM_ASSOCDATA_OFFSET_SHIFT
));
7567 err
= tcam_write(np
, idx
, tp
->key
, tp
->key_mask
);
7572 err
= tcam_assoc_write(np
, idx
, tp
->assoc_data
);
7578 /* validate the entry */
7580 np
->clas
.tcam_valid_entries
++;
7582 niu_unlock_parent(np
, flags
);
7587 static int niu_del_ethtool_tcam_entry(struct niu
*np
, u32 loc
)
7589 struct niu_parent
*parent
= np
->parent
;
7590 struct niu_tcam_entry
*tp
;
7592 unsigned long flags
;
7596 if (loc
>= tcam_get_size(np
))
7599 niu_lock_parent(np
, flags
);
7601 idx
= tcam_get_index(np
, loc
);
7602 tp
= &parent
->tcam
[idx
];
7604 /* if the entry is of a user defined class, then update*/
7605 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7606 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7608 if (class >= CLASS_CODE_USER_PROG1
&& class <= CLASS_CODE_USER_PROG4
) {
7610 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7611 if (parent
->l3_cls
[i
] == class) {
7612 parent
->l3_cls_refcnt
[i
]--;
7613 if (!parent
->l3_cls_refcnt
[i
]) {
7615 ret
= tcam_user_ip_class_enable(np
,
7620 parent
->l3_cls
[i
] = 0;
7621 parent
->l3_cls_pid
[i
] = 0;
7626 if (i
== NIU_L3_PROG_CLS
) {
7627 netdev_info(np
->dev
, "niu%d: In %s(): Usr class 0x%llx not found\n",
7628 parent
->index
, __func__
,
7629 (unsigned long long)class);
7635 ret
= tcam_flush(np
, idx
);
7639 /* invalidate the entry */
7641 np
->clas
.tcam_valid_entries
--;
7643 niu_unlock_parent(np
, flags
);
7648 static int niu_set_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
7650 struct niu
*np
= netdev_priv(dev
);
7655 ret
= niu_set_hash_opts(np
, cmd
);
7657 case ETHTOOL_SRXCLSRLINS
:
7658 ret
= niu_add_ethtool_tcam_entry(np
, cmd
);
7660 case ETHTOOL_SRXCLSRLDEL
:
7661 ret
= niu_del_ethtool_tcam_entry(np
, cmd
->fs
.location
);
7671 static const struct {
7672 const char string
[ETH_GSTRING_LEN
];
7673 } niu_xmac_stat_keys
[] = {
7676 { "tx_fifo_errors" },
7677 { "tx_overflow_errors" },
7678 { "tx_max_pkt_size_errors" },
7679 { "tx_underflow_errors" },
7680 { "rx_local_faults" },
7681 { "rx_remote_faults" },
7682 { "rx_link_faults" },
7683 { "rx_align_errors" },
7695 { "rx_code_violations" },
7696 { "rx_len_errors" },
7697 { "rx_crc_errors" },
7698 { "rx_underflows" },
7700 { "pause_off_state" },
7701 { "pause_on_state" },
7702 { "pause_received" },
7705 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
7707 static const struct {
7708 const char string
[ETH_GSTRING_LEN
];
7709 } niu_bmac_stat_keys
[] = {
7710 { "tx_underflow_errors" },
7711 { "tx_max_pkt_size_errors" },
7716 { "rx_align_errors" },
7717 { "rx_crc_errors" },
7718 { "rx_len_errors" },
7719 { "pause_off_state" },
7720 { "pause_on_state" },
7721 { "pause_received" },
7724 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
7726 static const struct {
7727 const char string
[ETH_GSTRING_LEN
];
7728 } niu_rxchan_stat_keys
[] = {
7736 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
7738 static const struct {
7739 const char string
[ETH_GSTRING_LEN
];
7740 } niu_txchan_stat_keys
[] = {
7747 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
7749 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
7751 struct niu
*np
= netdev_priv(dev
);
7754 if (stringset
!= ETH_SS_STATS
)
7757 if (np
->flags
& NIU_FLAGS_XMAC
) {
7758 memcpy(data
, niu_xmac_stat_keys
,
7759 sizeof(niu_xmac_stat_keys
));
7760 data
+= sizeof(niu_xmac_stat_keys
);
7762 memcpy(data
, niu_bmac_stat_keys
,
7763 sizeof(niu_bmac_stat_keys
));
7764 data
+= sizeof(niu_bmac_stat_keys
);
7766 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7767 memcpy(data
, niu_rxchan_stat_keys
,
7768 sizeof(niu_rxchan_stat_keys
));
7769 data
+= sizeof(niu_rxchan_stat_keys
);
7771 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7772 memcpy(data
, niu_txchan_stat_keys
,
7773 sizeof(niu_txchan_stat_keys
));
7774 data
+= sizeof(niu_txchan_stat_keys
);
7778 static int niu_get_sset_count(struct net_device
*dev
, int stringset
)
7780 struct niu
*np
= netdev_priv(dev
);
7782 if (stringset
!= ETH_SS_STATS
)
7785 return (np
->flags
& NIU_FLAGS_XMAC
?
7786 NUM_XMAC_STAT_KEYS
:
7787 NUM_BMAC_STAT_KEYS
) +
7788 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
7789 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
);
7792 static void niu_get_ethtool_stats(struct net_device
*dev
,
7793 struct ethtool_stats
*stats
, u64
*data
)
7795 struct niu
*np
= netdev_priv(dev
);
7798 niu_sync_mac_stats(np
);
7799 if (np
->flags
& NIU_FLAGS_XMAC
) {
7800 memcpy(data
, &np
->mac_stats
.xmac
,
7801 sizeof(struct niu_xmac_stats
));
7802 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
7804 memcpy(data
, &np
->mac_stats
.bmac
,
7805 sizeof(struct niu_bmac_stats
));
7806 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
7808 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7809 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
7811 niu_sync_rx_discard_stats(np
, rp
, 0);
7813 data
[0] = rp
->rx_channel
;
7814 data
[1] = rp
->rx_packets
;
7815 data
[2] = rp
->rx_bytes
;
7816 data
[3] = rp
->rx_dropped
;
7817 data
[4] = rp
->rx_errors
;
7820 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7821 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
7823 data
[0] = rp
->tx_channel
;
7824 data
[1] = rp
->tx_packets
;
7825 data
[2] = rp
->tx_bytes
;
7826 data
[3] = rp
->tx_errors
;
7831 static u64
niu_led_state_save(struct niu
*np
)
7833 if (np
->flags
& NIU_FLAGS_XMAC
)
7834 return nr64_mac(XMAC_CONFIG
);
7836 return nr64_mac(BMAC_XIF_CONFIG
);
7839 static void niu_led_state_restore(struct niu
*np
, u64 val
)
7841 if (np
->flags
& NIU_FLAGS_XMAC
)
7842 nw64_mac(XMAC_CONFIG
, val
);
7844 nw64_mac(BMAC_XIF_CONFIG
, val
);
7847 static void niu_force_led(struct niu
*np
, int on
)
7851 if (np
->flags
& NIU_FLAGS_XMAC
) {
7853 bit
= XMAC_CONFIG_FORCE_LED_ON
;
7855 reg
= BMAC_XIF_CONFIG
;
7856 bit
= BMAC_XIF_CONFIG_LINK_LED
;
7859 val
= nr64_mac(reg
);
7867 static int niu_set_phys_id(struct net_device
*dev
,
7868 enum ethtool_phys_id_state state
)
7871 struct niu
*np
= netdev_priv(dev
);
7873 if (!netif_running(dev
))
7877 case ETHTOOL_ID_ACTIVE
:
7878 np
->orig_led_state
= niu_led_state_save(np
);
7879 return 1; /* cycle on/off once per second */
7882 niu_force_led(np
, 1);
7885 case ETHTOOL_ID_OFF
:
7886 niu_force_led(np
, 0);
7889 case ETHTOOL_ID_INACTIVE
:
7890 niu_led_state_restore(np
, np
->orig_led_state
);
7896 static const struct ethtool_ops niu_ethtool_ops
= {
7897 .get_drvinfo
= niu_get_drvinfo
,
7898 .get_link
= ethtool_op_get_link
,
7899 .get_msglevel
= niu_get_msglevel
,
7900 .set_msglevel
= niu_set_msglevel
,
7901 .nway_reset
= niu_nway_reset
,
7902 .get_eeprom_len
= niu_get_eeprom_len
,
7903 .get_eeprom
= niu_get_eeprom
,
7904 .get_strings
= niu_get_strings
,
7905 .get_sset_count
= niu_get_sset_count
,
7906 .get_ethtool_stats
= niu_get_ethtool_stats
,
7907 .set_phys_id
= niu_set_phys_id
,
7908 .get_rxnfc
= niu_get_nfc
,
7909 .set_rxnfc
= niu_set_nfc
,
7910 .get_link_ksettings
= niu_get_link_ksettings
,
7911 .set_link_ksettings
= niu_set_link_ksettings
,
7914 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
7917 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
7919 if (ldn
< 0 || ldn
> LDN_MAX
)
7922 parent
->ldg_map
[ldn
] = ldg
;
7924 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
7925 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7926 * the firmware, and we're not supposed to change them.
7927 * Validate the mapping, because if it's wrong we probably
7928 * won't get any interrupts and that's painful to debug.
7930 if (nr64(LDG_NUM(ldn
)) != ldg
) {
7931 dev_err(np
->device
, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7933 (unsigned long long) nr64(LDG_NUM(ldn
)));
7937 nw64(LDG_NUM(ldn
), ldg
);
7942 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
7944 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
7948 nw64(LDG_TIMER_RES
, res
);
7953 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
7955 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
7956 (func
< 0 || func
> 3) ||
7957 (vector
< 0 || vector
> 0x1f))
7960 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
7965 static int niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
7967 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
7968 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
7971 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
7975 nw64(ESPC_PIO_STAT
, frame
);
7979 frame
= nr64(ESPC_PIO_STAT
);
7980 if (frame
& ESPC_PIO_STAT_READ_END
)
7983 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
7984 dev_err(np
->device
, "EEPROM read timeout frame[%llx]\n",
7985 (unsigned long long) frame
);
7990 nw64(ESPC_PIO_STAT
, frame
);
7994 frame
= nr64(ESPC_PIO_STAT
);
7995 if (frame
& ESPC_PIO_STAT_READ_END
)
7998 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
7999 dev_err(np
->device
, "EEPROM read timeout frame[%llx]\n",
8000 (unsigned long long) frame
);
8004 frame
= nr64(ESPC_PIO_STAT
);
8005 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
8008 static int niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
8010 int err
= niu_pci_eeprom_read(np
, off
);
8016 err
= niu_pci_eeprom_read(np
, off
+ 1);
8019 val
|= (err
& 0xff);
8024 static int niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
8026 int err
= niu_pci_eeprom_read(np
, off
);
8033 err
= niu_pci_eeprom_read(np
, off
+ 1);
8037 val
|= (err
& 0xff) << 8;
8042 static int niu_pci_vpd_get_propname(struct niu
*np
, u32 off
, char *namebuf
,
8047 for (i
= 0; i
< namebuf_len
; i
++) {
8048 int err
= niu_pci_eeprom_read(np
, off
+ i
);
8055 if (i
>= namebuf_len
)
8061 static void niu_vpd_parse_version(struct niu
*np
)
8063 struct niu_vpd
*vpd
= &np
->vpd
;
8064 int len
= strlen(vpd
->version
) + 1;
8065 const char *s
= vpd
->version
;
8068 for (i
= 0; i
< len
- 5; i
++) {
8069 if (!strncmp(s
+ i
, "FCode ", 6))
8076 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
8078 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8079 "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8080 vpd
->fcode_major
, vpd
->fcode_minor
);
8081 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
8082 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
8083 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
8084 np
->flags
|= NIU_FLAGS_VPD_VALID
;
8087 /* ESPC_PIO_EN_ENABLE must be set */
8088 static int niu_pci_vpd_scan_props(struct niu
*np
, u32 start
, u32 end
)
8090 unsigned int found_mask
= 0;
8091 #define FOUND_MASK_MODEL 0x00000001
8092 #define FOUND_MASK_BMODEL 0x00000002
8093 #define FOUND_MASK_VERS 0x00000004
8094 #define FOUND_MASK_MAC 0x00000008
8095 #define FOUND_MASK_NMAC 0x00000010
8096 #define FOUND_MASK_PHY 0x00000020
8097 #define FOUND_MASK_ALL 0x0000003f
8099 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8100 "VPD_SCAN: start[%x] end[%x]\n", start
, end
);
8101 while (start
< end
) {
8102 int len
, err
, prop_len
;
8107 if (found_mask
== FOUND_MASK_ALL
) {
8108 niu_vpd_parse_version(np
);
8112 err
= niu_pci_eeprom_read(np
, start
+ 2);
8118 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
8119 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
8125 if (!strcmp(namebuf
, "model")) {
8126 prop_buf
= np
->vpd
.model
;
8127 max_len
= NIU_VPD_MODEL_MAX
;
8128 found_mask
|= FOUND_MASK_MODEL
;
8129 } else if (!strcmp(namebuf
, "board-model")) {
8130 prop_buf
= np
->vpd
.board_model
;
8131 max_len
= NIU_VPD_BD_MODEL_MAX
;
8132 found_mask
|= FOUND_MASK_BMODEL
;
8133 } else if (!strcmp(namebuf
, "version")) {
8134 prop_buf
= np
->vpd
.version
;
8135 max_len
= NIU_VPD_VERSION_MAX
;
8136 found_mask
|= FOUND_MASK_VERS
;
8137 } else if (!strcmp(namebuf
, "local-mac-address")) {
8138 prop_buf
= np
->vpd
.local_mac
;
8140 found_mask
|= FOUND_MASK_MAC
;
8141 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
8142 prop_buf
= &np
->vpd
.mac_num
;
8144 found_mask
|= FOUND_MASK_NMAC
;
8145 } else if (!strcmp(namebuf
, "phy-type")) {
8146 prop_buf
= np
->vpd
.phy_type
;
8147 max_len
= NIU_VPD_PHY_TYPE_MAX
;
8148 found_mask
|= FOUND_MASK_PHY
;
8151 if (max_len
&& prop_len
> max_len
) {
8152 dev_err(np
->device
, "Property '%s' length (%d) is too long\n", namebuf
, prop_len
);
8157 u32 off
= start
+ 5 + err
;
8160 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8161 "VPD_SCAN: Reading in property [%s] len[%d]\n",
8163 for (i
= 0; i
< prop_len
; i
++)
8164 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
8173 /* ESPC_PIO_EN_ENABLE must be set */
8174 static void niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
8179 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
8185 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
8186 u32 here
= start
+ offset
;
8189 err
= niu_pci_eeprom_read(np
, here
);
8193 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
8197 here
= start
+ offset
+ 3;
8198 end
= start
+ offset
+ err
;
8202 err
= niu_pci_vpd_scan_props(np
, here
, end
);
8203 if (err
< 0 || err
== 1)
8208 /* ESPC_PIO_EN_ENABLE must be set */
8209 static u32
niu_pci_vpd_offset(struct niu
*np
)
8211 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
8214 while (start
< end
) {
8217 /* ROM header signature? */
8218 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8222 /* Apply offset to PCI data structure. */
8223 err
= niu_pci_eeprom_read16(np
, start
+ 23);
8228 /* Check for "PCIR" signature. */
8229 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8232 err
= niu_pci_eeprom_read16(np
, start
+ 2);
8236 /* Check for OBP image type. */
8237 err
= niu_pci_eeprom_read(np
, start
+ 20);
8241 err
= niu_pci_eeprom_read(np
, ret
+ 2);
8245 start
= ret
+ (err
* 512);
8249 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
8254 err
= niu_pci_eeprom_read(np
, ret
+ 0);
8264 static int niu_phy_type_prop_decode(struct niu
*np
, const char *phy_prop
)
8266 if (!strcmp(phy_prop
, "mif")) {
8267 /* 1G copper, MII */
8268 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8270 np
->mac_xcvr
= MAC_XCVR_MII
;
8271 } else if (!strcmp(phy_prop
, "xgf")) {
8272 /* 10G fiber, XPCS */
8273 np
->flags
|= (NIU_FLAGS_10G
|
8275 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8276 } else if (!strcmp(phy_prop
, "pcs")) {
8278 np
->flags
&= ~NIU_FLAGS_10G
;
8279 np
->flags
|= NIU_FLAGS_FIBER
;
8280 np
->mac_xcvr
= MAC_XCVR_PCS
;
8281 } else if (!strcmp(phy_prop
, "xgc")) {
8282 /* 10G copper, XPCS */
8283 np
->flags
|= NIU_FLAGS_10G
;
8284 np
->flags
&= ~NIU_FLAGS_FIBER
;
8285 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8286 } else if (!strcmp(phy_prop
, "xgsd") || !strcmp(phy_prop
, "gsd")) {
8287 /* 10G Serdes or 1G Serdes, default to 10G */
8288 np
->flags
|= NIU_FLAGS_10G
;
8289 np
->flags
&= ~NIU_FLAGS_FIBER
;
8290 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8291 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8298 static int niu_pci_vpd_get_nports(struct niu
*np
)
8302 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
8303 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
8304 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
8305 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
8306 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
8308 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
8309 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
8310 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
8311 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
8318 static void niu_pci_vpd_validate(struct niu
*np
)
8320 struct net_device
*dev
= np
->dev
;
8321 struct niu_vpd
*vpd
= &np
->vpd
;
8324 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
8325 dev_err(np
->device
, "VPD MAC invalid, falling back to SPROM\n");
8327 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8331 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8332 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8333 np
->flags
|= NIU_FLAGS_10G
;
8334 np
->flags
&= ~NIU_FLAGS_FIBER
;
8335 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8336 np
->mac_xcvr
= MAC_XCVR_PCS
;
8338 np
->flags
|= NIU_FLAGS_FIBER
;
8339 np
->flags
&= ~NIU_FLAGS_10G
;
8341 if (np
->flags
& NIU_FLAGS_10G
)
8342 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8343 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8344 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
8345 NIU_FLAGS_HOTPLUG_PHY
);
8346 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8347 dev_err(np
->device
, "Illegal phy string [%s]\n",
8349 dev_err(np
->device
, "Falling back to SPROM\n");
8350 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8354 memcpy(dev
->dev_addr
, vpd
->local_mac
, ETH_ALEN
);
8356 val8
= dev
->dev_addr
[5];
8357 dev
->dev_addr
[5] += np
->port
;
8358 if (dev
->dev_addr
[5] < val8
)
8362 static int niu_pci_probe_sprom(struct niu
*np
)
8364 struct net_device
*dev
= np
->dev
;
8369 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
8370 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
8373 np
->eeprom_len
= len
;
8375 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8376 "SPROM: Image size %llu\n", (unsigned long long)val
);
8379 for (i
= 0; i
< len
; i
++) {
8380 val
= nr64(ESPC_NCR(i
));
8381 sum
+= (val
>> 0) & 0xff;
8382 sum
+= (val
>> 8) & 0xff;
8383 sum
+= (val
>> 16) & 0xff;
8384 sum
+= (val
>> 24) & 0xff;
8386 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8387 "SPROM: Checksum %x\n", (int)(sum
& 0xff));
8388 if ((sum
& 0xff) != 0xab) {
8389 dev_err(np
->device
, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum
& 0xff));
8393 val
= nr64(ESPC_PHY_TYPE
);
8396 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
8397 ESPC_PHY_TYPE_PORT0_SHIFT
;
8400 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
8401 ESPC_PHY_TYPE_PORT1_SHIFT
;
8404 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
8405 ESPC_PHY_TYPE_PORT2_SHIFT
;
8408 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
8409 ESPC_PHY_TYPE_PORT3_SHIFT
;
8412 dev_err(np
->device
, "Bogus port number %u\n",
8416 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8417 "SPROM: PHY type %x\n", val8
);
8420 case ESPC_PHY_TYPE_1G_COPPER
:
8421 /* 1G copper, MII */
8422 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8424 np
->mac_xcvr
= MAC_XCVR_MII
;
8427 case ESPC_PHY_TYPE_1G_FIBER
:
8429 np
->flags
&= ~NIU_FLAGS_10G
;
8430 np
->flags
|= NIU_FLAGS_FIBER
;
8431 np
->mac_xcvr
= MAC_XCVR_PCS
;
8434 case ESPC_PHY_TYPE_10G_COPPER
:
8435 /* 10G copper, XPCS */
8436 np
->flags
|= NIU_FLAGS_10G
;
8437 np
->flags
&= ~NIU_FLAGS_FIBER
;
8438 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8441 case ESPC_PHY_TYPE_10G_FIBER
:
8442 /* 10G fiber, XPCS */
8443 np
->flags
|= (NIU_FLAGS_10G
|
8445 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8449 dev_err(np
->device
, "Bogus SPROM phy type %u\n", val8
);
8453 val
= nr64(ESPC_MAC_ADDR0
);
8454 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8455 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val
);
8456 dev
->dev_addr
[0] = (val
>> 0) & 0xff;
8457 dev
->dev_addr
[1] = (val
>> 8) & 0xff;
8458 dev
->dev_addr
[2] = (val
>> 16) & 0xff;
8459 dev
->dev_addr
[3] = (val
>> 24) & 0xff;
8461 val
= nr64(ESPC_MAC_ADDR1
);
8462 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8463 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val
);
8464 dev
->dev_addr
[4] = (val
>> 0) & 0xff;
8465 dev
->dev_addr
[5] = (val
>> 8) & 0xff;
8467 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
8468 dev_err(np
->device
, "SPROM MAC address invalid [ %pM ]\n",
8473 val8
= dev
->dev_addr
[5];
8474 dev
->dev_addr
[5] += np
->port
;
8475 if (dev
->dev_addr
[5] < val8
)
8478 val
= nr64(ESPC_MOD_STR_LEN
);
8479 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8480 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val
);
8484 for (i
= 0; i
< val
; i
+= 4) {
8485 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
8487 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
8488 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
8489 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
8490 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
8492 np
->vpd
.model
[val
] = '\0';
8494 val
= nr64(ESPC_BD_MOD_STR_LEN
);
8495 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8496 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val
);
8500 for (i
= 0; i
< val
; i
+= 4) {
8501 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
8503 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
8504 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
8505 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
8506 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
8508 np
->vpd
.board_model
[val
] = '\0';
8511 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
8512 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8513 "SPROM: NUM_PORTS_MACS[%d]\n", np
->vpd
.mac_num
);
8518 static int niu_get_and_validate_port(struct niu
*np
)
8520 struct niu_parent
*parent
= np
->parent
;
8523 np
->flags
|= NIU_FLAGS_XMAC
;
8525 if (!parent
->num_ports
) {
8526 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8527 parent
->num_ports
= 2;
8529 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
8530 if (!parent
->num_ports
) {
8531 /* Fall back to SPROM as last resort.
8532 * This will fail on most cards.
8534 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
8535 ESPC_NUM_PORTS_MACS_VAL
;
8537 /* All of the current probing methods fail on
8538 * Maramba on-board parts.
8540 if (!parent
->num_ports
)
8541 parent
->num_ports
= 4;
8546 if (np
->port
>= parent
->num_ports
)
8552 static int phy_record(struct niu_parent
*parent
, struct phy_probe_info
*p
,
8553 int dev_id_1
, int dev_id_2
, u8 phy_port
, int type
)
8555 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
8558 if (dev_id_1
< 0 || dev_id_2
< 0)
8560 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
8561 /* Because of the NIU_PHY_ID_MASK being applied, the 8704
8562 * test covers the 8706 as well.
8564 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
8565 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
))
8568 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
8572 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8574 type
== PHY_TYPE_PMA_PMD
? "PMA/PMD" :
8575 type
== PHY_TYPE_PCS
? "PCS" : "MII",
8578 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
8579 pr_err("Too many PHY ports\n");
8583 p
->phy_id
[type
][idx
] = id
;
8584 p
->phy_port
[type
][idx
] = phy_port
;
8585 p
->cur
[type
] = idx
+ 1;
8589 static int port_has_10g(struct phy_probe_info
*p
, int port
)
8593 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
8594 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
8597 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
8598 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
8605 static int count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
8611 for (port
= 8; port
< 32; port
++) {
8612 if (port_has_10g(p
, port
)) {
8622 static int count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
8625 if (p
->cur
[PHY_TYPE_MII
])
8626 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
8628 return p
->cur
[PHY_TYPE_MII
];
8631 static void niu_n2_divide_channels(struct niu_parent
*parent
)
8633 int num_ports
= parent
->num_ports
;
8636 for (i
= 0; i
< num_ports
; i
++) {
8637 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
8638 parent
->txchan_per_port
[i
] = (16 / num_ports
);
8640 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8642 parent
->rxchan_per_port
[i
],
8643 parent
->txchan_per_port
[i
]);
8647 static void niu_divide_channels(struct niu_parent
*parent
,
8648 int num_10g
, int num_1g
)
8650 int num_ports
= parent
->num_ports
;
8651 int rx_chans_per_10g
, rx_chans_per_1g
;
8652 int tx_chans_per_10g
, tx_chans_per_1g
;
8653 int i
, tot_rx
, tot_tx
;
8655 if (!num_10g
|| !num_1g
) {
8656 rx_chans_per_10g
= rx_chans_per_1g
=
8657 (NIU_NUM_RXCHAN
/ num_ports
);
8658 tx_chans_per_10g
= tx_chans_per_1g
=
8659 (NIU_NUM_TXCHAN
/ num_ports
);
8661 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
8662 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
8663 (rx_chans_per_1g
* num_1g
)) /
8666 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
8667 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
8668 (tx_chans_per_1g
* num_1g
)) /
8672 tot_rx
= tot_tx
= 0;
8673 for (i
= 0; i
< num_ports
; i
++) {
8674 int type
= phy_decode(parent
->port_phy
, i
);
8676 if (type
== PORT_TYPE_10G
) {
8677 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
8678 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
8680 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
8681 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
8683 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8685 parent
->rxchan_per_port
[i
],
8686 parent
->txchan_per_port
[i
]);
8687 tot_rx
+= parent
->rxchan_per_port
[i
];
8688 tot_tx
+= parent
->txchan_per_port
[i
];
8691 if (tot_rx
> NIU_NUM_RXCHAN
) {
8692 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8693 parent
->index
, tot_rx
);
8694 for (i
= 0; i
< num_ports
; i
++)
8695 parent
->rxchan_per_port
[i
] = 1;
8697 if (tot_tx
> NIU_NUM_TXCHAN
) {
8698 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8699 parent
->index
, tot_tx
);
8700 for (i
= 0; i
< num_ports
; i
++)
8701 parent
->txchan_per_port
[i
] = 1;
8703 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
8704 pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8705 parent
->index
, tot_rx
, tot_tx
);
8709 static void niu_divide_rdc_groups(struct niu_parent
*parent
,
8710 int num_10g
, int num_1g
)
8712 int i
, num_ports
= parent
->num_ports
;
8713 int rdc_group
, rdc_groups_per_port
;
8714 int rdc_channel_base
;
8717 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
8719 rdc_channel_base
= 0;
8721 for (i
= 0; i
< num_ports
; i
++) {
8722 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
8723 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
8724 int this_channel_offset
;
8726 tp
->first_table_num
= rdc_group
;
8727 tp
->num_tables
= rdc_groups_per_port
;
8728 this_channel_offset
= 0;
8729 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
8730 struct rdc_table
*rt
= &tp
->tables
[grp
];
8733 pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8734 parent
->index
, i
, tp
->first_table_num
+ grp
);
8735 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
8736 rt
->rxdma_channel
[slot
] =
8737 rdc_channel_base
+ this_channel_offset
;
8739 pr_cont("%d ", rt
->rxdma_channel
[slot
]);
8741 if (++this_channel_offset
== num_channels
)
8742 this_channel_offset
= 0;
8747 parent
->rdc_default
[i
] = rdc_channel_base
;
8749 rdc_channel_base
+= num_channels
;
8750 rdc_group
+= rdc_groups_per_port
;
8754 static int fill_phy_probe_info(struct niu
*np
, struct niu_parent
*parent
,
8755 struct phy_probe_info
*info
)
8757 unsigned long flags
;
8760 memset(info
, 0, sizeof(*info
));
8762 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
8763 niu_lock_parent(np
, flags
);
8765 for (port
= 8; port
< 32; port
++) {
8766 int dev_id_1
, dev_id_2
;
8768 dev_id_1
= mdio_read(np
, port
,
8769 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
8770 dev_id_2
= mdio_read(np
, port
,
8771 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
8772 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8776 dev_id_1
= mdio_read(np
, port
,
8777 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
8778 dev_id_2
= mdio_read(np
, port
,
8779 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
8780 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8784 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
8785 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
8786 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8791 niu_unlock_parent(np
, flags
);
8796 static int walk_phys(struct niu
*np
, struct niu_parent
*parent
)
8798 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
8799 int lowest_10g
, lowest_1g
;
8800 int num_10g
, num_1g
;
8804 num_10g
= num_1g
= 0;
8806 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8807 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8810 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
8811 parent
->num_ports
= 4;
8812 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8813 phy_encode(PORT_TYPE_1G
, 1) |
8814 phy_encode(PORT_TYPE_1G
, 2) |
8815 phy_encode(PORT_TYPE_1G
, 3));
8816 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8819 parent
->num_ports
= 2;
8820 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8821 phy_encode(PORT_TYPE_10G
, 1));
8822 } else if ((np
->flags
& NIU_FLAGS_XCVR_SERDES
) &&
8823 (parent
->plat_type
== PLAT_TYPE_NIU
)) {
8824 /* this is the Monza case */
8825 if (np
->flags
& NIU_FLAGS_10G
) {
8826 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8827 phy_encode(PORT_TYPE_10G
, 1));
8829 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8830 phy_encode(PORT_TYPE_1G
, 1));
8833 err
= fill_phy_probe_info(np
, parent
, info
);
8837 num_10g
= count_10g_ports(info
, &lowest_10g
);
8838 num_1g
= count_1g_ports(info
, &lowest_1g
);
8840 switch ((num_10g
<< 4) | num_1g
) {
8842 if (lowest_1g
== 10)
8843 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8844 else if (lowest_1g
== 26)
8845 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8847 goto unknown_vg_1g_port
;
8851 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8852 phy_encode(PORT_TYPE_10G
, 1) |
8853 phy_encode(PORT_TYPE_1G
, 2) |
8854 phy_encode(PORT_TYPE_1G
, 3));
8858 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8859 phy_encode(PORT_TYPE_10G
, 1));
8863 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
8867 if (lowest_1g
== 10)
8868 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8869 else if (lowest_1g
== 26)
8870 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8872 goto unknown_vg_1g_port
;
8876 if ((lowest_10g
& 0x7) == 0)
8877 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8878 phy_encode(PORT_TYPE_1G
, 1) |
8879 phy_encode(PORT_TYPE_1G
, 2) |
8880 phy_encode(PORT_TYPE_1G
, 3));
8882 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8883 phy_encode(PORT_TYPE_10G
, 1) |
8884 phy_encode(PORT_TYPE_1G
, 2) |
8885 phy_encode(PORT_TYPE_1G
, 3));
8889 if (lowest_1g
== 10)
8890 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8891 else if (lowest_1g
== 26)
8892 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8894 goto unknown_vg_1g_port
;
8896 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8897 phy_encode(PORT_TYPE_1G
, 1) |
8898 phy_encode(PORT_TYPE_1G
, 2) |
8899 phy_encode(PORT_TYPE_1G
, 3));
8903 pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8909 parent
->port_phy
= val
;
8911 if (parent
->plat_type
== PLAT_TYPE_NIU
)
8912 niu_n2_divide_channels(parent
);
8914 niu_divide_channels(parent
, num_10g
, num_1g
);
8916 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
8921 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g
);
8925 static int niu_probe_ports(struct niu
*np
)
8927 struct niu_parent
*parent
= np
->parent
;
8930 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
8931 err
= walk_phys(np
, parent
);
8935 niu_set_ldg_timer_res(np
, 2);
8936 for (i
= 0; i
<= LDN_MAX
; i
++)
8937 niu_ldn_irq_enable(np
, i
, 0);
8940 if (parent
->port_phy
== PORT_PHY_INVALID
)
8946 static int niu_classifier_swstate_init(struct niu
*np
)
8948 struct niu_classifier
*cp
= &np
->clas
;
8950 cp
->tcam_top
= (u16
) np
->port
;
8951 cp
->tcam_sz
= np
->parent
->tcam_num_entries
/ np
->parent
->num_ports
;
8952 cp
->h1_init
= 0xffffffff;
8953 cp
->h2_init
= 0xffff;
8955 return fflp_early_init(np
);
8958 static void niu_link_config_init(struct niu
*np
)
8960 struct niu_link_config
*lp
= &np
->link_config
;
8962 lp
->advertising
= (ADVERTISED_10baseT_Half
|
8963 ADVERTISED_10baseT_Full
|
8964 ADVERTISED_100baseT_Half
|
8965 ADVERTISED_100baseT_Full
|
8966 ADVERTISED_1000baseT_Half
|
8967 ADVERTISED_1000baseT_Full
|
8968 ADVERTISED_10000baseT_Full
|
8969 ADVERTISED_Autoneg
);
8970 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
8971 lp
->duplex
= DUPLEX_FULL
;
8972 lp
->active_duplex
= DUPLEX_INVALID
;
8975 lp
->loopback_mode
= LOOPBACK_MAC
;
8976 lp
->active_speed
= SPEED_10000
;
8977 lp
->active_duplex
= DUPLEX_FULL
;
8979 lp
->loopback_mode
= LOOPBACK_DISABLED
;
8983 static int niu_init_mac_ipp_pcs_base(struct niu
*np
)
8987 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
8988 np
->ipp_off
= 0x00000;
8989 np
->pcs_off
= 0x04000;
8990 np
->xpcs_off
= 0x02000;
8994 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
8995 np
->ipp_off
= 0x08000;
8996 np
->pcs_off
= 0x0a000;
8997 np
->xpcs_off
= 0x08000;
9001 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
9002 np
->ipp_off
= 0x04000;
9003 np
->pcs_off
= 0x0e000;
9004 np
->xpcs_off
= ~0UL;
9008 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
9009 np
->ipp_off
= 0x0c000;
9010 np
->pcs_off
= 0x12000;
9011 np
->xpcs_off
= ~0UL;
9015 dev_err(np
->device
, "Port %u is invalid, cannot compute MAC block offset\n", np
->port
);
9022 static void niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
9024 struct msix_entry msi_vec
[NIU_NUM_LDG
];
9025 struct niu_parent
*parent
= np
->parent
;
9026 struct pci_dev
*pdev
= np
->pdev
;
9030 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
9031 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
9032 ldg_num_map
[i
] = first_ldg
+ i
;
9034 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
9035 parent
->txchan_per_port
[np
->port
] +
9036 (np
->port
== 0 ? 3 : 1));
9037 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
9039 for (i
= 0; i
< num_irqs
; i
++) {
9040 msi_vec
[i
].vector
= 0;
9041 msi_vec
[i
].entry
= i
;
9044 num_irqs
= pci_enable_msix_range(pdev
, msi_vec
, 1, num_irqs
);
9046 np
->flags
&= ~NIU_FLAGS_MSIX
;
9050 np
->flags
|= NIU_FLAGS_MSIX
;
9051 for (i
= 0; i
< num_irqs
; i
++)
9052 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
9053 np
->num_ldg
= num_irqs
;
9056 static int niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
9058 #ifdef CONFIG_SPARC64
9059 struct platform_device
*op
= np
->op
;
9060 const u32
*int_prop
;
9063 int_prop
= of_get_property(op
->dev
.of_node
, "interrupts", NULL
);
9067 for (i
= 0; i
< op
->archdata
.num_irqs
; i
++) {
9068 ldg_num_map
[i
] = int_prop
[i
];
9069 np
->ldg
[i
].irq
= op
->archdata
.irqs
[i
];
9072 np
->num_ldg
= op
->archdata
.num_irqs
;
9080 static int niu_ldg_init(struct niu
*np
)
9082 struct niu_parent
*parent
= np
->parent
;
9083 u8 ldg_num_map
[NIU_NUM_LDG
];
9084 int first_chan
, num_chan
;
9085 int i
, err
, ldg_rotor
;
9089 np
->ldg
[0].irq
= np
->dev
->irq
;
9090 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
9091 err
= niu_n2_irq_init(np
, ldg_num_map
);
9095 niu_try_msix(np
, ldg_num_map
);
9098 for (i
= 0; i
< np
->num_ldg
; i
++) {
9099 struct niu_ldg
*lp
= &np
->ldg
[i
];
9101 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
9104 lp
->ldg_num
= ldg_num_map
[i
];
9105 lp
->timer
= 2; /* XXX */
9107 /* On N2 NIU the firmware has setup the SID mappings so they go
9108 * to the correct values that will route the LDG to the proper
9109 * interrupt in the NCU interrupt table.
9111 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
9112 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
9118 /* We adopt the LDG assignment ordering used by the N2 NIU
9119 * 'interrupt' properties because that simplifies a lot of
9120 * things. This ordering is:
9123 * MIF (if port zero)
9124 * SYSERR (if port zero)
9131 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
9137 if (ldg_rotor
== np
->num_ldg
)
9141 err
= niu_ldg_assign_ldn(np
, parent
,
9142 ldg_num_map
[ldg_rotor
],
9148 if (ldg_rotor
== np
->num_ldg
)
9151 err
= niu_ldg_assign_ldn(np
, parent
,
9152 ldg_num_map
[ldg_rotor
],
9158 if (ldg_rotor
== np
->num_ldg
)
9164 for (i
= 0; i
< port
; i
++)
9165 first_chan
+= parent
->rxchan_per_port
[i
];
9166 num_chan
= parent
->rxchan_per_port
[port
];
9168 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9169 err
= niu_ldg_assign_ldn(np
, parent
,
9170 ldg_num_map
[ldg_rotor
],
9175 if (ldg_rotor
== np
->num_ldg
)
9180 for (i
= 0; i
< port
; i
++)
9181 first_chan
+= parent
->txchan_per_port
[i
];
9182 num_chan
= parent
->txchan_per_port
[port
];
9183 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9184 err
= niu_ldg_assign_ldn(np
, parent
,
9185 ldg_num_map
[ldg_rotor
],
9190 if (ldg_rotor
== np
->num_ldg
)
9197 static void niu_ldg_free(struct niu
*np
)
9199 if (np
->flags
& NIU_FLAGS_MSIX
)
9200 pci_disable_msix(np
->pdev
);
9203 static int niu_get_of_props(struct niu
*np
)
9205 #ifdef CONFIG_SPARC64
9206 struct net_device
*dev
= np
->dev
;
9207 struct device_node
*dp
;
9208 const char *phy_type
;
9213 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9214 dp
= np
->op
->dev
.of_node
;
9216 dp
= pci_device_to_OF_node(np
->pdev
);
9218 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
9220 netdev_err(dev
, "%pOF: OF node lacks phy-type property\n", dp
);
9224 if (!strcmp(phy_type
, "none"))
9227 strcpy(np
->vpd
.phy_type
, phy_type
);
9229 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
9230 netdev_err(dev
, "%pOF: Illegal phy string [%s]\n",
9231 dp
, np
->vpd
.phy_type
);
9235 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
9237 netdev_err(dev
, "%pOF: OF node lacks local-mac-address property\n",
9241 if (prop_len
!= dev
->addr_len
) {
9242 netdev_err(dev
, "%pOF: OF MAC address prop len (%d) is wrong\n",
9245 memcpy(dev
->dev_addr
, mac_addr
, dev
->addr_len
);
9246 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
9247 netdev_err(dev
, "%pOF: OF MAC address is invalid\n", dp
);
9248 netdev_err(dev
, "%pOF: [ %pM ]\n", dp
, dev
->dev_addr
);
9252 model
= of_get_property(dp
, "model", &prop_len
);
9255 strcpy(np
->vpd
.model
, model
);
9257 if (of_find_property(dp
, "hot-swappable-phy", &prop_len
)) {
9258 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
9259 NIU_FLAGS_HOTPLUG_PHY
);
9268 static int niu_get_invariants(struct niu
*np
)
9270 int err
, have_props
;
9273 err
= niu_get_of_props(np
);
9279 err
= niu_init_mac_ipp_pcs_base(np
);
9284 err
= niu_get_and_validate_port(np
);
9289 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9292 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
9293 offset
= niu_pci_vpd_offset(np
);
9294 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
9295 "%s() VPD offset [%08x]\n", __func__
, offset
);
9297 niu_pci_vpd_fetch(np
, offset
);
9298 nw64(ESPC_PIO_EN
, 0);
9300 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
9301 niu_pci_vpd_validate(np
);
9302 err
= niu_get_and_validate_port(np
);
9307 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
9308 err
= niu_get_and_validate_port(np
);
9311 err
= niu_pci_probe_sprom(np
);
9317 err
= niu_probe_ports(np
);
9323 niu_classifier_swstate_init(np
);
9324 niu_link_config_init(np
);
9326 err
= niu_determine_phy_disposition(np
);
9328 err
= niu_init_link(np
);
9333 static LIST_HEAD(niu_parent_list
);
9334 static DEFINE_MUTEX(niu_parent_lock
);
9335 static int niu_parent_index
;
9337 static ssize_t
show_port_phy(struct device
*dev
,
9338 struct device_attribute
*attr
, char *buf
)
9340 struct platform_device
*plat_dev
= to_platform_device(dev
);
9341 struct niu_parent
*p
= dev_get_platdata(&plat_dev
->dev
);
9342 u32 port_phy
= p
->port_phy
;
9343 char *orig_buf
= buf
;
9346 if (port_phy
== PORT_PHY_UNKNOWN
||
9347 port_phy
== PORT_PHY_INVALID
)
9350 for (i
= 0; i
< p
->num_ports
; i
++) {
9351 const char *type_str
;
9354 type
= phy_decode(port_phy
, i
);
9355 if (type
== PORT_TYPE_10G
)
9360 (i
== 0) ? "%s" : " %s",
9363 buf
+= sprintf(buf
, "\n");
9364 return buf
- orig_buf
;
9367 static ssize_t
show_plat_type(struct device
*dev
,
9368 struct device_attribute
*attr
, char *buf
)
9370 struct platform_device
*plat_dev
= to_platform_device(dev
);
9371 struct niu_parent
*p
= dev_get_platdata(&plat_dev
->dev
);
9372 const char *type_str
;
9374 switch (p
->plat_type
) {
9375 case PLAT_TYPE_ATLAS
:
9381 case PLAT_TYPE_VF_P0
:
9384 case PLAT_TYPE_VF_P1
:
9388 type_str
= "unknown";
9392 return sprintf(buf
, "%s\n", type_str
);
9395 static ssize_t
__show_chan_per_port(struct device
*dev
,
9396 struct device_attribute
*attr
, char *buf
,
9399 struct platform_device
*plat_dev
= to_platform_device(dev
);
9400 struct niu_parent
*p
= dev_get_platdata(&plat_dev
->dev
);
9401 char *orig_buf
= buf
;
9405 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
9407 for (i
= 0; i
< p
->num_ports
; i
++) {
9409 (i
== 0) ? "%d" : " %d",
9412 buf
+= sprintf(buf
, "\n");
9414 return buf
- orig_buf
;
9417 static ssize_t
show_rxchan_per_port(struct device
*dev
,
9418 struct device_attribute
*attr
, char *buf
)
9420 return __show_chan_per_port(dev
, attr
, buf
, 1);
9423 static ssize_t
show_txchan_per_port(struct device
*dev
,
9424 struct device_attribute
*attr
, char *buf
)
9426 return __show_chan_per_port(dev
, attr
, buf
, 1);
9429 static ssize_t
show_num_ports(struct device
*dev
,
9430 struct device_attribute
*attr
, char *buf
)
9432 struct platform_device
*plat_dev
= to_platform_device(dev
);
9433 struct niu_parent
*p
= dev_get_platdata(&plat_dev
->dev
);
9435 return sprintf(buf
, "%d\n", p
->num_ports
);
9438 static struct device_attribute niu_parent_attributes
[] = {
9439 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
9440 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
9441 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
9442 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
9443 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
9447 static struct niu_parent
*niu_new_parent(struct niu
*np
,
9448 union niu_parent_id
*id
, u8 ptype
)
9450 struct platform_device
*plat_dev
;
9451 struct niu_parent
*p
;
9454 plat_dev
= platform_device_register_simple("niu-board", niu_parent_index
,
9456 if (IS_ERR(plat_dev
))
9459 for (i
= 0; niu_parent_attributes
[i
].attr
.name
; i
++) {
9460 int err
= device_create_file(&plat_dev
->dev
,
9461 &niu_parent_attributes
[i
]);
9463 goto fail_unregister
;
9466 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
9468 goto fail_unregister
;
9470 p
->index
= niu_parent_index
++;
9472 plat_dev
->dev
.platform_data
= p
;
9473 p
->plat_dev
= plat_dev
;
9475 memcpy(&p
->id
, id
, sizeof(*id
));
9476 p
->plat_type
= ptype
;
9477 INIT_LIST_HEAD(&p
->list
);
9478 atomic_set(&p
->refcnt
, 0);
9479 list_add(&p
->list
, &niu_parent_list
);
9480 spin_lock_init(&p
->lock
);
9482 p
->rxdma_clock_divider
= 7500;
9484 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
9485 if (p
->plat_type
== PLAT_TYPE_NIU
)
9486 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
9488 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
9489 int index
= i
- CLASS_CODE_USER_PROG1
;
9491 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
9492 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
9495 (FLOW_KEY_L4_BYTE12
<<
9496 FLOW_KEY_L4_0_SHIFT
) |
9497 (FLOW_KEY_L4_BYTE12
<<
9498 FLOW_KEY_L4_1_SHIFT
));
9501 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
9502 p
->ldg_map
[i
] = LDG_INVALID
;
9507 platform_device_unregister(plat_dev
);
9511 static struct niu_parent
*niu_get_parent(struct niu
*np
,
9512 union niu_parent_id
*id
, u8 ptype
)
9514 struct niu_parent
*p
, *tmp
;
9515 int port
= np
->port
;
9517 mutex_lock(&niu_parent_lock
);
9519 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
9520 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
9526 p
= niu_new_parent(np
, id
, ptype
);
9532 sprintf(port_name
, "port%d", port
);
9533 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
9537 p
->ports
[port
] = np
;
9538 atomic_inc(&p
->refcnt
);
9541 mutex_unlock(&niu_parent_lock
);
9546 static void niu_put_parent(struct niu
*np
)
9548 struct niu_parent
*p
= np
->parent
;
9552 BUG_ON(!p
|| p
->ports
[port
] != np
);
9554 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
9555 "%s() port[%u]\n", __func__
, port
);
9557 sprintf(port_name
, "port%d", port
);
9559 mutex_lock(&niu_parent_lock
);
9561 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
9563 p
->ports
[port
] = NULL
;
9566 if (atomic_dec_and_test(&p
->refcnt
)) {
9568 platform_device_unregister(p
->plat_dev
);
9571 mutex_unlock(&niu_parent_lock
);
9574 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
9575 u64
*handle
, gfp_t flag
)
9580 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
9586 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
9587 void *cpu_addr
, u64 handle
)
9589 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
9592 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
9593 unsigned long offset
, size_t size
,
9594 enum dma_data_direction direction
)
9596 return dma_map_page(dev
, page
, offset
, size
, direction
);
9599 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
9600 size_t size
, enum dma_data_direction direction
)
9602 dma_unmap_page(dev
, dma_address
, size
, direction
);
9605 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
9607 enum dma_data_direction direction
)
9609 return dma_map_single(dev
, cpu_addr
, size
, direction
);
9612 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
9614 enum dma_data_direction direction
)
9616 dma_unmap_single(dev
, dma_address
, size
, direction
);
9619 static const struct niu_ops niu_pci_ops
= {
9620 .alloc_coherent
= niu_pci_alloc_coherent
,
9621 .free_coherent
= niu_pci_free_coherent
,
9622 .map_page
= niu_pci_map_page
,
9623 .unmap_page
= niu_pci_unmap_page
,
9624 .map_single
= niu_pci_map_single
,
9625 .unmap_single
= niu_pci_unmap_single
,
9628 static void niu_driver_version(void)
9630 static int niu_version_printed
;
9632 if (niu_version_printed
++ == 0)
9633 pr_info("%s", version
);
9636 static struct net_device
*niu_alloc_and_init(struct device
*gen_dev
,
9637 struct pci_dev
*pdev
,
9638 struct platform_device
*op
,
9639 const struct niu_ops
*ops
, u8 port
)
9641 struct net_device
*dev
;
9644 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
9648 SET_NETDEV_DEV(dev
, gen_dev
);
9650 np
= netdev_priv(dev
);
9654 np
->device
= gen_dev
;
9657 np
->msg_enable
= niu_debug
;
9659 spin_lock_init(&np
->lock
);
9660 INIT_WORK(&np
->reset_task
, niu_reset_task
);
9667 static const struct net_device_ops niu_netdev_ops
= {
9668 .ndo_open
= niu_open
,
9669 .ndo_stop
= niu_close
,
9670 .ndo_start_xmit
= niu_start_xmit
,
9671 .ndo_get_stats64
= niu_get_stats
,
9672 .ndo_set_rx_mode
= niu_set_rx_mode
,
9673 .ndo_validate_addr
= eth_validate_addr
,
9674 .ndo_set_mac_address
= niu_set_mac_addr
,
9675 .ndo_do_ioctl
= niu_ioctl
,
9676 .ndo_tx_timeout
= niu_tx_timeout
,
9677 .ndo_change_mtu
= niu_change_mtu
,
9680 static void niu_assign_netdev_ops(struct net_device
*dev
)
9682 dev
->netdev_ops
= &niu_netdev_ops
;
9683 dev
->ethtool_ops
= &niu_ethtool_ops
;
9684 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
9687 static void niu_device_announce(struct niu
*np
)
9689 struct net_device
*dev
= np
->dev
;
9691 pr_info("%s: NIU Ethernet %pM\n", dev
->name
, dev
->dev_addr
);
9693 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
9694 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9696 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9697 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9698 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
9699 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9700 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9703 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9705 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9706 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9707 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" :
9708 (np
->flags
& NIU_FLAGS_XCVR_SERDES
? "SERDES" :
9710 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9711 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9716 static void niu_set_basic_features(struct net_device
*dev
)
9718 dev
->hw_features
= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXHASH
;
9719 dev
->features
|= dev
->hw_features
| NETIF_F_RXCSUM
;
9722 static int niu_pci_init_one(struct pci_dev
*pdev
,
9723 const struct pci_device_id
*ent
)
9725 union niu_parent_id parent_id
;
9726 struct net_device
*dev
;
9731 niu_driver_version();
9733 err
= pci_enable_device(pdev
);
9735 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
9739 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
9740 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
9741 dev_err(&pdev
->dev
, "Cannot find proper PCI device base addresses, aborting\n");
9743 goto err_out_disable_pdev
;
9746 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9748 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
9749 goto err_out_disable_pdev
;
9752 if (!pci_is_pcie(pdev
)) {
9753 dev_err(&pdev
->dev
, "Cannot find PCI Express capability, aborting\n");
9755 goto err_out_free_res
;
9758 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
9759 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
9762 goto err_out_free_res
;
9764 np
= netdev_priv(dev
);
9766 memset(&parent_id
, 0, sizeof(parent_id
));
9767 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
9768 parent_id
.pci
.bus
= pdev
->bus
->number
;
9769 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
9771 np
->parent
= niu_get_parent(np
, &parent_id
,
9775 goto err_out_free_dev
;
9778 pcie_capability_clear_and_set_word(pdev
, PCI_EXP_DEVCTL
,
9779 PCI_EXP_DEVCTL_NOSNOOP_EN
,
9780 PCI_EXP_DEVCTL_CERE
| PCI_EXP_DEVCTL_NFERE
|
9781 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
|
9782 PCI_EXP_DEVCTL_RELAX_EN
);
9784 dma_mask
= DMA_BIT_MASK(44);
9785 err
= pci_set_dma_mask(pdev
, dma_mask
);
9787 dev
->features
|= NETIF_F_HIGHDMA
;
9788 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
9790 dev_err(&pdev
->dev
, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9791 goto err_out_release_parent
;
9795 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9797 dev_err(&pdev
->dev
, "No usable DMA configuration, aborting\n");
9798 goto err_out_release_parent
;
9802 niu_set_basic_features(dev
);
9804 dev
->priv_flags
|= IFF_UNICAST_FLT
;
9806 np
->regs
= pci_ioremap_bar(pdev
, 0);
9808 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
9810 goto err_out_release_parent
;
9813 pci_set_master(pdev
);
9814 pci_save_state(pdev
);
9816 dev
->irq
= pdev
->irq
;
9818 /* MTU range: 68 - 9216 */
9819 dev
->min_mtu
= ETH_MIN_MTU
;
9820 dev
->max_mtu
= NIU_MAX_MTU
;
9822 niu_assign_netdev_ops(dev
);
9824 err
= niu_get_invariants(np
);
9827 dev_err(&pdev
->dev
, "Problem fetching invariants of chip, aborting\n");
9828 goto err_out_iounmap
;
9831 err
= register_netdev(dev
);
9833 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
9834 goto err_out_iounmap
;
9837 pci_set_drvdata(pdev
, dev
);
9839 niu_device_announce(np
);
9849 err_out_release_parent
:
9856 pci_release_regions(pdev
);
9858 err_out_disable_pdev
:
9859 pci_disable_device(pdev
);
9864 static void niu_pci_remove_one(struct pci_dev
*pdev
)
9866 struct net_device
*dev
= pci_get_drvdata(pdev
);
9869 struct niu
*np
= netdev_priv(dev
);
9871 unregister_netdev(dev
);
9882 pci_release_regions(pdev
);
9883 pci_disable_device(pdev
);
9887 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
9889 struct net_device
*dev
= pci_get_drvdata(pdev
);
9890 struct niu
*np
= netdev_priv(dev
);
9891 unsigned long flags
;
9893 if (!netif_running(dev
))
9896 flush_work(&np
->reset_task
);
9899 del_timer_sync(&np
->timer
);
9901 spin_lock_irqsave(&np
->lock
, flags
);
9902 niu_enable_interrupts(np
, 0);
9903 spin_unlock_irqrestore(&np
->lock
, flags
);
9905 netif_device_detach(dev
);
9907 spin_lock_irqsave(&np
->lock
, flags
);
9909 spin_unlock_irqrestore(&np
->lock
, flags
);
9911 pci_save_state(pdev
);
9916 static int niu_resume(struct pci_dev
*pdev
)
9918 struct net_device
*dev
= pci_get_drvdata(pdev
);
9919 struct niu
*np
= netdev_priv(dev
);
9920 unsigned long flags
;
9923 if (!netif_running(dev
))
9926 pci_restore_state(pdev
);
9928 netif_device_attach(dev
);
9930 spin_lock_irqsave(&np
->lock
, flags
);
9932 err
= niu_init_hw(np
);
9934 np
->timer
.expires
= jiffies
+ HZ
;
9935 add_timer(&np
->timer
);
9936 niu_netif_start(np
);
9939 spin_unlock_irqrestore(&np
->lock
, flags
);
9944 static struct pci_driver niu_pci_driver
= {
9945 .name
= DRV_MODULE_NAME
,
9946 .id_table
= niu_pci_tbl
,
9947 .probe
= niu_pci_init_one
,
9948 .remove
= niu_pci_remove_one
,
9949 .suspend
= niu_suspend
,
9950 .resume
= niu_resume
,
9953 #ifdef CONFIG_SPARC64
9954 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
9955 u64
*dma_addr
, gfp_t flag
)
9957 unsigned long order
= get_order(size
);
9958 unsigned long page
= __get_free_pages(flag
, order
);
9962 memset((char *)page
, 0, PAGE_SIZE
<< order
);
9963 *dma_addr
= __pa(page
);
9965 return (void *) page
;
9968 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
9969 void *cpu_addr
, u64 handle
)
9971 unsigned long order
= get_order(size
);
9973 free_pages((unsigned long) cpu_addr
, order
);
9976 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
9977 unsigned long offset
, size_t size
,
9978 enum dma_data_direction direction
)
9980 return page_to_phys(page
) + offset
;
9983 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
9984 size_t size
, enum dma_data_direction direction
)
9986 /* Nothing to do. */
9989 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
9991 enum dma_data_direction direction
)
9993 return __pa(cpu_addr
);
9996 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
9998 enum dma_data_direction direction
)
10000 /* Nothing to do. */
10003 static const struct niu_ops niu_phys_ops
= {
10004 .alloc_coherent
= niu_phys_alloc_coherent
,
10005 .free_coherent
= niu_phys_free_coherent
,
10006 .map_page
= niu_phys_map_page
,
10007 .unmap_page
= niu_phys_unmap_page
,
10008 .map_single
= niu_phys_map_single
,
10009 .unmap_single
= niu_phys_unmap_single
,
10012 static int niu_of_probe(struct platform_device
*op
)
10014 union niu_parent_id parent_id
;
10015 struct net_device
*dev
;
10020 niu_driver_version();
10022 reg
= of_get_property(op
->dev
.of_node
, "reg", NULL
);
10024 dev_err(&op
->dev
, "%pOF: No 'reg' property, aborting\n",
10029 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
10030 &niu_phys_ops
, reg
[0] & 0x1);
10035 np
= netdev_priv(dev
);
10037 memset(&parent_id
, 0, sizeof(parent_id
));
10038 parent_id
.of
= of_get_parent(op
->dev
.of_node
);
10040 np
->parent
= niu_get_parent(np
, &parent_id
,
10044 goto err_out_free_dev
;
10047 niu_set_basic_features(dev
);
10049 np
->regs
= of_ioremap(&op
->resource
[1], 0,
10050 resource_size(&op
->resource
[1]),
10053 dev_err(&op
->dev
, "Cannot map device registers, aborting\n");
10055 goto err_out_release_parent
;
10058 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
10059 resource_size(&op
->resource
[2]),
10061 if (!np
->vir_regs_1
) {
10062 dev_err(&op
->dev
, "Cannot map device vir registers 1, aborting\n");
10064 goto err_out_iounmap
;
10067 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
10068 resource_size(&op
->resource
[3]),
10070 if (!np
->vir_regs_2
) {
10071 dev_err(&op
->dev
, "Cannot map device vir registers 2, aborting\n");
10073 goto err_out_iounmap
;
10076 niu_assign_netdev_ops(dev
);
10078 err
= niu_get_invariants(np
);
10080 if (err
!= -ENODEV
)
10081 dev_err(&op
->dev
, "Problem fetching invariants of chip, aborting\n");
10082 goto err_out_iounmap
;
10085 err
= register_netdev(dev
);
10087 dev_err(&op
->dev
, "Cannot register net device, aborting\n");
10088 goto err_out_iounmap
;
10091 platform_set_drvdata(op
, dev
);
10093 niu_device_announce(np
);
10098 if (np
->vir_regs_1
) {
10099 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10100 resource_size(&op
->resource
[2]));
10101 np
->vir_regs_1
= NULL
;
10104 if (np
->vir_regs_2
) {
10105 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10106 resource_size(&op
->resource
[3]));
10107 np
->vir_regs_2
= NULL
;
10111 of_iounmap(&op
->resource
[1], np
->regs
,
10112 resource_size(&op
->resource
[1]));
10116 err_out_release_parent
:
10117 niu_put_parent(np
);
10126 static int niu_of_remove(struct platform_device
*op
)
10128 struct net_device
*dev
= platform_get_drvdata(op
);
10131 struct niu
*np
= netdev_priv(dev
);
10133 unregister_netdev(dev
);
10135 if (np
->vir_regs_1
) {
10136 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10137 resource_size(&op
->resource
[2]));
10138 np
->vir_regs_1
= NULL
;
10141 if (np
->vir_regs_2
) {
10142 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10143 resource_size(&op
->resource
[3]));
10144 np
->vir_regs_2
= NULL
;
10148 of_iounmap(&op
->resource
[1], np
->regs
,
10149 resource_size(&op
->resource
[1]));
10155 niu_put_parent(np
);
10162 static const struct of_device_id niu_match
[] = {
10165 .compatible
= "SUNW,niusl",
10169 MODULE_DEVICE_TABLE(of
, niu_match
);
10171 static struct platform_driver niu_of_driver
= {
10174 .of_match_table
= niu_match
,
10176 .probe
= niu_of_probe
,
10177 .remove
= niu_of_remove
,
10180 #endif /* CONFIG_SPARC64 */
10182 static int __init
niu_init(void)
10186 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
10188 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
10190 #ifdef CONFIG_SPARC64
10191 err
= platform_driver_register(&niu_of_driver
);
10195 err
= pci_register_driver(&niu_pci_driver
);
10196 #ifdef CONFIG_SPARC64
10198 platform_driver_unregister(&niu_of_driver
);
10205 static void __exit
niu_exit(void)
10207 pci_unregister_driver(&niu_pci_driver
);
10208 #ifdef CONFIG_SPARC64
10209 platform_driver_unregister(&niu_of_driver
);
10213 module_init(niu_init
);
10214 module_exit(niu_exit
);