1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/pci.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/platform_device.h>
16 #include <linux/delay.h>
17 #include <linux/bitops.h>
18 #include <linux/mii.h>
19 #include <linux/if_ether.h>
20 #include <linux/if_vlan.h>
23 #include <linux/ipv6.h>
24 #include <linux/log2.h>
25 #include <linux/jiffies.h>
26 #include <linux/crc32.h>
27 #include <linux/list.h>
32 #include <linux/of_device.h>
37 #define DRV_MODULE_NAME "niu"
38 #define DRV_MODULE_VERSION "1.0"
39 #define DRV_MODULE_RELDATE "Nov 14, 2008"
41 static char version
[] __devinitdata
=
42 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
44 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
45 MODULE_DESCRIPTION("NIU ethernet driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION
);
50 static u64
readq(void __iomem
*reg
)
52 return ((u64
) readl(reg
)) | (((u64
) readl(reg
+ 4UL)) << 32);
55 static void writeq(u64 val
, void __iomem
*reg
)
57 writel(val
& 0xffffffff, reg
);
58 writel(val
>> 32, reg
+ 0x4UL
);
62 static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl
) = {
63 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
67 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
69 #define NIU_TX_TIMEOUT (5 * HZ)
71 #define nr64(reg) readq(np->regs + (reg))
72 #define nw64(reg, val) writeq((val), np->regs + (reg))
74 #define nr64_mac(reg) readq(np->mac_regs + (reg))
75 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
77 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
78 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
80 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
81 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
83 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
84 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
86 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
89 static int debug
= -1;
90 module_param(debug
, int, 0);
91 MODULE_PARM_DESC(debug
, "NIU debug level");
93 #define niu_lock_parent(np, flags) \
94 spin_lock_irqsave(&np->parent->lock, flags)
95 #define niu_unlock_parent(np, flags) \
96 spin_unlock_irqrestore(&np->parent->lock, flags)
98 static int serdes_init_10g_serdes(struct niu
*np
);
100 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
101 u64 bits
, int limit
, int delay
)
103 while (--limit
>= 0) {
104 u64 val
= nr64_mac(reg
);
115 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
116 u64 bits
, int limit
, int delay
,
117 const char *reg_name
)
122 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
124 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
125 (unsigned long long)bits
, reg_name
,
126 (unsigned long long)nr64_mac(reg
));
130 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
131 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
132 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
135 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
136 u64 bits
, int limit
, int delay
)
138 while (--limit
>= 0) {
139 u64 val
= nr64_ipp(reg
);
150 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
151 u64 bits
, int limit
, int delay
,
152 const char *reg_name
)
161 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
163 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
164 (unsigned long long)bits
, reg_name
,
165 (unsigned long long)nr64_ipp(reg
));
169 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
170 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
171 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
174 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
175 u64 bits
, int limit
, int delay
)
177 while (--limit
>= 0) {
189 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
190 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
191 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
194 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
195 u64 bits
, int limit
, int delay
,
196 const char *reg_name
)
201 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
203 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
204 (unsigned long long)bits
, reg_name
,
205 (unsigned long long)nr64(reg
));
209 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
210 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
211 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
214 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
216 u64 val
= (u64
) lp
->timer
;
219 val
|= LDG_IMGMT_ARM
;
221 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
224 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
226 unsigned long mask_reg
, bits
;
229 if (ldn
< 0 || ldn
> LDN_MAX
)
233 mask_reg
= LD_IM0(ldn
);
236 mask_reg
= LD_IM1(ldn
- 64);
240 val
= nr64(mask_reg
);
250 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
252 struct niu_parent
*parent
= np
->parent
;
255 for (i
= 0; i
<= LDN_MAX
; i
++) {
258 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
261 err
= niu_ldn_irq_enable(np
, i
, on
);
268 static int niu_enable_interrupts(struct niu
*np
, int on
)
272 for (i
= 0; i
< np
->num_ldg
; i
++) {
273 struct niu_ldg
*lp
= &np
->ldg
[i
];
276 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
280 for (i
= 0; i
< np
->num_ldg
; i
++)
281 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
286 static u32
phy_encode(u32 type
, int port
)
288 return (type
<< (port
* 2));
291 static u32
phy_decode(u32 val
, int port
)
293 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
296 static int mdio_wait(struct niu
*np
)
301 while (--limit
> 0) {
302 val
= nr64(MIF_FRAME_OUTPUT
);
303 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
304 return val
& MIF_FRAME_OUTPUT_DATA
;
312 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
316 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
321 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
322 return mdio_wait(np
);
325 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
329 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
334 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
342 static int mii_read(struct niu
*np
, int port
, int reg
)
344 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
345 return mdio_wait(np
);
348 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
352 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
360 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
364 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
365 ESR2_TI_PLL_TX_CFG_L(channel
),
368 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
369 ESR2_TI_PLL_TX_CFG_H(channel
),
374 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
378 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
379 ESR2_TI_PLL_RX_CFG_L(channel
),
382 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
383 ESR2_TI_PLL_RX_CFG_H(channel
),
388 /* Mode is always 10G fiber. */
389 static int serdes_init_niu_10g_fiber(struct niu
*np
)
391 struct niu_link_config
*lp
= &np
->link_config
;
395 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
396 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
397 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
398 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
400 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
401 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
403 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
404 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
406 tx_cfg
|= PLL_TX_CFG_ENTEST
;
407 rx_cfg
|= PLL_RX_CFG_ENTEST
;
410 /* Initialize all 4 lanes of the SERDES. */
411 for (i
= 0; i
< 4; i
++) {
412 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
417 for (i
= 0; i
< 4; i
++) {
418 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
426 static int serdes_init_niu_1g_serdes(struct niu
*np
)
428 struct niu_link_config
*lp
= &np
->link_config
;
429 u16 pll_cfg
, pll_sts
;
431 u64
uninitialized_var(sig
), mask
, val
;
436 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
|
437 PLL_TX_CFG_RATE_HALF
);
438 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
439 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
440 PLL_RX_CFG_RATE_HALF
);
443 rx_cfg
|= PLL_RX_CFG_EQ_LP_ADAPTIVE
;
445 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
446 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
448 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
449 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
451 tx_cfg
|= PLL_TX_CFG_ENTEST
;
452 rx_cfg
|= PLL_RX_CFG_ENTEST
;
455 /* Initialize PLL for 1G */
456 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_8X
);
458 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
459 ESR2_TI_PLL_CFG_L
, pll_cfg
);
461 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
466 pll_sts
= PLL_CFG_ENPLL
;
468 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
469 ESR2_TI_PLL_STS_L
, pll_sts
);
471 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
478 /* Initialize all 4 lanes of the SERDES. */
479 for (i
= 0; i
< 4; i
++) {
480 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
485 for (i
= 0; i
< 4; i
++) {
486 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
493 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
498 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
506 while (max_retry
--) {
507 sig
= nr64(ESR_INT_SIGNALS
);
508 if ((sig
& mask
) == val
)
514 if ((sig
& mask
) != val
) {
515 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
516 np
->port
, (int)(sig
& mask
), (int)val
);
523 static int serdes_init_niu_10g_serdes(struct niu
*np
)
525 struct niu_link_config
*lp
= &np
->link_config
;
526 u32 tx_cfg
, rx_cfg
, pll_cfg
, pll_sts
;
528 u64
uninitialized_var(sig
), mask
, val
;
532 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
533 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
534 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
535 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
537 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
538 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
540 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
541 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
543 tx_cfg
|= PLL_TX_CFG_ENTEST
;
544 rx_cfg
|= PLL_RX_CFG_ENTEST
;
547 /* Initialize PLL for 10G */
548 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_10X
);
550 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
551 ESR2_TI_PLL_CFG_L
, pll_cfg
& 0xffff);
553 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
558 pll_sts
= PLL_CFG_ENPLL
;
560 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
561 ESR2_TI_PLL_STS_L
, pll_sts
& 0xffff);
563 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
570 /* Initialize all 4 lanes of the SERDES. */
571 for (i
= 0; i
< 4; i
++) {
572 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
577 for (i
= 0; i
< 4; i
++) {
578 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
583 /* check if serdes is ready */
587 mask
= ESR_INT_SIGNALS_P0_BITS
;
588 val
= (ESR_INT_SRDY0_P0
|
598 mask
= ESR_INT_SIGNALS_P1_BITS
;
599 val
= (ESR_INT_SRDY0_P1
|
612 while (max_retry
--) {
613 sig
= nr64(ESR_INT_SIGNALS
);
614 if ((sig
& mask
) == val
)
620 if ((sig
& mask
) != val
) {
621 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
622 np
->port
, (int)(sig
& mask
), (int)val
);
624 /* 10G failed, try initializing at 1G */
625 err
= serdes_init_niu_1g_serdes(np
);
627 np
->flags
&= ~NIU_FLAGS_10G
;
628 np
->mac_xcvr
= MAC_XCVR_PCS
;
630 netdev_err(np
->dev
, "Port %u 10G/1G SERDES Link Failed\n",
638 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
642 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
644 *val
= (err
& 0xffff);
645 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
646 ESR_RXTX_CTRL_H(chan
));
648 *val
|= ((err
& 0xffff) << 16);
654 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
658 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
659 ESR_GLUE_CTRL0_L(chan
));
661 *val
= (err
& 0xffff);
662 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
663 ESR_GLUE_CTRL0_H(chan
));
665 *val
|= ((err
& 0xffff) << 16);
672 static int esr_read_reset(struct niu
*np
, u32
*val
)
676 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
677 ESR_RXTX_RESET_CTRL_L
);
679 *val
= (err
& 0xffff);
680 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
681 ESR_RXTX_RESET_CTRL_H
);
683 *val
|= ((err
& 0xffff) << 16);
690 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
694 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
695 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
697 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
698 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
702 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
706 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
707 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
709 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
710 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
714 static int esr_reset(struct niu
*np
)
716 u32
uninitialized_var(reset
);
719 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
720 ESR_RXTX_RESET_CTRL_L
, 0x0000);
723 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
724 ESR_RXTX_RESET_CTRL_H
, 0xffff);
729 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
730 ESR_RXTX_RESET_CTRL_L
, 0xffff);
735 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
736 ESR_RXTX_RESET_CTRL_H
, 0x0000);
741 err
= esr_read_reset(np
, &reset
);
745 netdev_err(np
->dev
, "Port %u ESR_RESET did not clear [%08x]\n",
753 static int serdes_init_10g(struct niu
*np
)
755 struct niu_link_config
*lp
= &np
->link_config
;
756 unsigned long ctrl_reg
, test_cfg_reg
, i
;
757 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
762 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
763 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
766 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
767 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
773 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
774 ENET_SERDES_CTRL_SDET_1
|
775 ENET_SERDES_CTRL_SDET_2
|
776 ENET_SERDES_CTRL_SDET_3
|
777 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
778 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
779 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
780 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
781 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
782 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
783 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
784 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
787 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
788 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
789 ENET_SERDES_TEST_MD_0_SHIFT
) |
790 (ENET_TEST_MD_PAD_LOOPBACK
<<
791 ENET_SERDES_TEST_MD_1_SHIFT
) |
792 (ENET_TEST_MD_PAD_LOOPBACK
<<
793 ENET_SERDES_TEST_MD_2_SHIFT
) |
794 (ENET_TEST_MD_PAD_LOOPBACK
<<
795 ENET_SERDES_TEST_MD_3_SHIFT
));
798 nw64(ctrl_reg
, ctrl_val
);
799 nw64(test_cfg_reg
, test_cfg_val
);
801 /* Initialize all 4 lanes of the SERDES. */
802 for (i
= 0; i
< 4; i
++) {
803 u32 rxtx_ctrl
, glue0
;
805 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
808 err
= esr_read_glue0(np
, i
, &glue0
);
812 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
813 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
814 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
816 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
817 ESR_GLUE_CTRL0_THCNT
|
818 ESR_GLUE_CTRL0_BLTIME
);
819 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
820 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
821 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
822 (BLTIME_300_CYCLES
<<
823 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
825 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
828 err
= esr_write_glue0(np
, i
, glue0
);
837 sig
= nr64(ESR_INT_SIGNALS
);
840 mask
= ESR_INT_SIGNALS_P0_BITS
;
841 val
= (ESR_INT_SRDY0_P0
|
851 mask
= ESR_INT_SIGNALS_P1_BITS
;
852 val
= (ESR_INT_SRDY0_P1
|
865 if ((sig
& mask
) != val
) {
866 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
867 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
870 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
871 np
->port
, (int)(sig
& mask
), (int)val
);
874 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
875 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
879 static int serdes_init_1g(struct niu
*np
)
883 val
= nr64(ENET_SERDES_1_PLL_CFG
);
884 val
&= ~ENET_SERDES_PLL_FBDIV2
;
887 val
|= ENET_SERDES_PLL_HRATE0
;
890 val
|= ENET_SERDES_PLL_HRATE1
;
893 val
|= ENET_SERDES_PLL_HRATE2
;
896 val
|= ENET_SERDES_PLL_HRATE3
;
901 nw64(ENET_SERDES_1_PLL_CFG
, val
);
906 static int serdes_init_1g_serdes(struct niu
*np
)
908 struct niu_link_config
*lp
= &np
->link_config
;
909 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
910 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
912 u64 reset_val
, val_rd
;
914 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
915 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
916 ENET_SERDES_PLL_FBDIV0
;
919 reset_val
= ENET_SERDES_RESET_0
;
920 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
921 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
922 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
925 reset_val
= ENET_SERDES_RESET_1
;
926 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
927 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
928 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
934 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
935 ENET_SERDES_CTRL_SDET_1
|
936 ENET_SERDES_CTRL_SDET_2
|
937 ENET_SERDES_CTRL_SDET_3
|
938 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
939 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
940 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
941 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
942 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
943 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
944 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
945 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
948 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
949 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
950 ENET_SERDES_TEST_MD_0_SHIFT
) |
951 (ENET_TEST_MD_PAD_LOOPBACK
<<
952 ENET_SERDES_TEST_MD_1_SHIFT
) |
953 (ENET_TEST_MD_PAD_LOOPBACK
<<
954 ENET_SERDES_TEST_MD_2_SHIFT
) |
955 (ENET_TEST_MD_PAD_LOOPBACK
<<
956 ENET_SERDES_TEST_MD_3_SHIFT
));
959 nw64(ENET_SERDES_RESET
, reset_val
);
961 val_rd
= nr64(ENET_SERDES_RESET
);
962 val_rd
&= ~reset_val
;
964 nw64(ctrl_reg
, ctrl_val
);
965 nw64(test_cfg_reg
, test_cfg_val
);
966 nw64(ENET_SERDES_RESET
, val_rd
);
969 /* Initialize all 4 lanes of the SERDES. */
970 for (i
= 0; i
< 4; i
++) {
971 u32 rxtx_ctrl
, glue0
;
973 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
976 err
= esr_read_glue0(np
, i
, &glue0
);
980 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
981 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
982 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
984 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
985 ESR_GLUE_CTRL0_THCNT
|
986 ESR_GLUE_CTRL0_BLTIME
);
987 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
988 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
989 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
990 (BLTIME_300_CYCLES
<<
991 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
993 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
996 err
= esr_write_glue0(np
, i
, glue0
);
1002 sig
= nr64(ESR_INT_SIGNALS
);
1005 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
1010 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
1018 if ((sig
& mask
) != val
) {
1019 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
1020 np
->port
, (int)(sig
& mask
), (int)val
);
1027 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
1029 struct niu_link_config
*lp
= &np
->link_config
;
1033 unsigned long flags
;
1037 current_speed
= SPEED_INVALID
;
1038 current_duplex
= DUPLEX_INVALID
;
1040 spin_lock_irqsave(&np
->lock
, flags
);
1042 val
= nr64_pcs(PCS_MII_STAT
);
1044 if (val
& PCS_MII_STAT_LINK_STATUS
) {
1046 current_speed
= SPEED_1000
;
1047 current_duplex
= DUPLEX_FULL
;
1050 lp
->active_speed
= current_speed
;
1051 lp
->active_duplex
= current_duplex
;
1052 spin_unlock_irqrestore(&np
->lock
, flags
);
1054 *link_up_p
= link_up
;
1058 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
1060 unsigned long flags
;
1061 struct niu_link_config
*lp
= &np
->link_config
;
1068 if (!(np
->flags
& NIU_FLAGS_10G
))
1069 return link_status_1g_serdes(np
, link_up_p
);
1071 current_speed
= SPEED_INVALID
;
1072 current_duplex
= DUPLEX_INVALID
;
1073 spin_lock_irqsave(&np
->lock
, flags
);
1075 val
= nr64_xpcs(XPCS_STATUS(0));
1076 val2
= nr64_mac(XMAC_INTER2
);
1077 if (val2
& 0x01000000)
1080 if ((val
& 0x1000ULL
) && link_ok
) {
1082 current_speed
= SPEED_10000
;
1083 current_duplex
= DUPLEX_FULL
;
1085 lp
->active_speed
= current_speed
;
1086 lp
->active_duplex
= current_duplex
;
1087 spin_unlock_irqrestore(&np
->lock
, flags
);
1088 *link_up_p
= link_up
;
1092 static int link_status_mii(struct niu
*np
, int *link_up_p
)
1094 struct niu_link_config
*lp
= &np
->link_config
;
1096 int bmsr
, advert
, ctrl1000
, stat1000
, lpa
, bmcr
, estatus
;
1097 int supported
, advertising
, active_speed
, active_duplex
;
1099 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1100 if (unlikely(err
< 0))
1104 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1105 if (unlikely(err
< 0))
1109 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1110 if (unlikely(err
< 0))
1114 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1115 if (unlikely(err
< 0))
1119 if (likely(bmsr
& BMSR_ESTATEN
)) {
1120 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1121 if (unlikely(err
< 0))
1125 err
= mii_read(np
, np
->phy_addr
, MII_CTRL1000
);
1126 if (unlikely(err
< 0))
1130 err
= mii_read(np
, np
->phy_addr
, MII_STAT1000
);
1131 if (unlikely(err
< 0))
1135 estatus
= ctrl1000
= stat1000
= 0;
1138 if (bmsr
& BMSR_ANEGCAPABLE
)
1139 supported
|= SUPPORTED_Autoneg
;
1140 if (bmsr
& BMSR_10HALF
)
1141 supported
|= SUPPORTED_10baseT_Half
;
1142 if (bmsr
& BMSR_10FULL
)
1143 supported
|= SUPPORTED_10baseT_Full
;
1144 if (bmsr
& BMSR_100HALF
)
1145 supported
|= SUPPORTED_100baseT_Half
;
1146 if (bmsr
& BMSR_100FULL
)
1147 supported
|= SUPPORTED_100baseT_Full
;
1148 if (estatus
& ESTATUS_1000_THALF
)
1149 supported
|= SUPPORTED_1000baseT_Half
;
1150 if (estatus
& ESTATUS_1000_TFULL
)
1151 supported
|= SUPPORTED_1000baseT_Full
;
1152 lp
->supported
= supported
;
1155 if (advert
& ADVERTISE_10HALF
)
1156 advertising
|= ADVERTISED_10baseT_Half
;
1157 if (advert
& ADVERTISE_10FULL
)
1158 advertising
|= ADVERTISED_10baseT_Full
;
1159 if (advert
& ADVERTISE_100HALF
)
1160 advertising
|= ADVERTISED_100baseT_Half
;
1161 if (advert
& ADVERTISE_100FULL
)
1162 advertising
|= ADVERTISED_100baseT_Full
;
1163 if (ctrl1000
& ADVERTISE_1000HALF
)
1164 advertising
|= ADVERTISED_1000baseT_Half
;
1165 if (ctrl1000
& ADVERTISE_1000FULL
)
1166 advertising
|= ADVERTISED_1000baseT_Full
;
1168 if (bmcr
& BMCR_ANENABLE
) {
1171 lp
->active_autoneg
= 1;
1172 advertising
|= ADVERTISED_Autoneg
;
1175 neg1000
= (ctrl1000
<< 2) & stat1000
;
1177 if (neg1000
& (LPA_1000FULL
| LPA_1000HALF
))
1178 active_speed
= SPEED_1000
;
1179 else if (neg
& LPA_100
)
1180 active_speed
= SPEED_100
;
1181 else if (neg
& (LPA_10HALF
| LPA_10FULL
))
1182 active_speed
= SPEED_10
;
1184 active_speed
= SPEED_INVALID
;
1186 if ((neg1000
& LPA_1000FULL
) || (neg
& LPA_DUPLEX
))
1187 active_duplex
= DUPLEX_FULL
;
1188 else if (active_speed
!= SPEED_INVALID
)
1189 active_duplex
= DUPLEX_HALF
;
1191 active_duplex
= DUPLEX_INVALID
;
1193 lp
->active_autoneg
= 0;
1195 if ((bmcr
& BMCR_SPEED1000
) && !(bmcr
& BMCR_SPEED100
))
1196 active_speed
= SPEED_1000
;
1197 else if (bmcr
& BMCR_SPEED100
)
1198 active_speed
= SPEED_100
;
1200 active_speed
= SPEED_10
;
1202 if (bmcr
& BMCR_FULLDPLX
)
1203 active_duplex
= DUPLEX_FULL
;
1205 active_duplex
= DUPLEX_HALF
;
1208 lp
->active_advertising
= advertising
;
1209 lp
->active_speed
= active_speed
;
1210 lp
->active_duplex
= active_duplex
;
1211 *link_up_p
= !!(bmsr
& BMSR_LSTATUS
);
1216 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
1218 struct niu_link_config
*lp
= &np
->link_config
;
1219 u16 current_speed
, bmsr
;
1220 unsigned long flags
;
1225 current_speed
= SPEED_INVALID
;
1226 current_duplex
= DUPLEX_INVALID
;
1228 spin_lock_irqsave(&np
->lock
, flags
);
1232 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1237 if (bmsr
& BMSR_LSTATUS
) {
1238 u16 adv
, lpa
, common
, estat
;
1240 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1245 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1252 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1257 current_speed
= SPEED_1000
;
1258 current_duplex
= DUPLEX_FULL
;
1261 lp
->active_speed
= current_speed
;
1262 lp
->active_duplex
= current_duplex
;
1266 spin_unlock_irqrestore(&np
->lock
, flags
);
1268 *link_up_p
= link_up
;
1272 static int link_status_1g(struct niu
*np
, int *link_up_p
)
1274 struct niu_link_config
*lp
= &np
->link_config
;
1275 unsigned long flags
;
1278 spin_lock_irqsave(&np
->lock
, flags
);
1280 err
= link_status_mii(np
, link_up_p
);
1281 lp
->supported
|= SUPPORTED_TP
;
1282 lp
->active_advertising
|= ADVERTISED_TP
;
1284 spin_unlock_irqrestore(&np
->lock
, flags
);
1288 static int bcm8704_reset(struct niu
*np
)
1292 err
= mdio_read(np
, np
->phy_addr
,
1293 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1294 if (err
< 0 || err
== 0xffff)
1297 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1303 while (--limit
>= 0) {
1304 err
= mdio_read(np
, np
->phy_addr
,
1305 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1308 if (!(err
& BMCR_RESET
))
1312 netdev_err(np
->dev
, "Port %u PHY will not reset (bmcr=%04x)\n",
1313 np
->port
, (err
& 0xffff));
1319 /* When written, certain PHY registers need to be read back twice
1320 * in order for the bits to settle properly.
1322 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
1324 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1327 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1333 static int bcm8706_init_user_dev3(struct niu
*np
)
1338 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1339 BCM8704_USER_OPT_DIGITAL_CTRL
);
1342 err
&= ~USER_ODIG_CTRL_GPIOS
;
1343 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1344 err
|= USER_ODIG_CTRL_RESV2
;
1345 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1346 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1355 static int bcm8704_init_user_dev3(struct niu
*np
)
1359 err
= mdio_write(np
, np
->phy_addr
,
1360 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1361 (USER_CONTROL_OPTXRST_LVL
|
1362 USER_CONTROL_OPBIASFLT_LVL
|
1363 USER_CONTROL_OBTMPFLT_LVL
|
1364 USER_CONTROL_OPPRFLT_LVL
|
1365 USER_CONTROL_OPTXFLT_LVL
|
1366 USER_CONTROL_OPRXLOS_LVL
|
1367 USER_CONTROL_OPRXFLT_LVL
|
1368 USER_CONTROL_OPTXON_LVL
|
1369 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1373 err
= mdio_write(np
, np
->phy_addr
,
1374 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1375 (USER_PMD_TX_CTL_XFP_CLKEN
|
1376 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1377 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1378 USER_PMD_TX_CTL_TSCK_LPWREN
));
1382 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1385 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1389 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1390 BCM8704_USER_OPT_DIGITAL_CTRL
);
1393 err
&= ~USER_ODIG_CTRL_GPIOS
;
1394 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1395 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1396 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1405 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1409 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1410 MRVL88X2011_LED_8_TO_11_CTL
);
1414 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1415 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1417 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1418 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1421 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1425 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1426 MRVL88X2011_LED_BLINK_CTL
);
1428 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1431 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1432 MRVL88X2011_LED_BLINK_CTL
, err
);
1438 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1442 /* Set LED functions */
1443 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1448 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1452 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1453 MRVL88X2011_GENERAL_CTL
);
1457 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1459 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1460 MRVL88X2011_GENERAL_CTL
, err
);
1464 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1465 MRVL88X2011_PMA_PMD_CTL_1
);
1469 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1470 err
|= MRVL88X2011_LOOPBACK
;
1472 err
&= ~MRVL88X2011_LOOPBACK
;
1474 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1475 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1480 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1481 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1485 static int xcvr_diag_bcm870x(struct niu
*np
)
1487 u16 analog_stat0
, tx_alarm_status
;
1491 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1495 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np
->port
, err
);
1497 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1500 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np
->port
, err
);
1502 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1506 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np
->port
, err
);
1509 /* XXX dig this out it might not be so useful XXX */
1510 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1511 BCM8704_USER_ANALOG_STATUS0
);
1514 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1515 BCM8704_USER_ANALOG_STATUS0
);
1520 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1521 BCM8704_USER_TX_ALARM_STATUS
);
1524 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1525 BCM8704_USER_TX_ALARM_STATUS
);
1528 tx_alarm_status
= err
;
1530 if (analog_stat0
!= 0x03fc) {
1531 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1532 pr_info("Port %u cable not connected or bad cable\n",
1534 } else if (analog_stat0
== 0x639c) {
1535 pr_info("Port %u optical module is bad or missing\n",
1543 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1545 struct niu_link_config
*lp
= &np
->link_config
;
1548 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1553 err
&= ~BMCR_LOOPBACK
;
1555 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1556 err
|= BMCR_LOOPBACK
;
1558 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1566 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1571 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1572 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1575 val
= nr64_mac(XMAC_CONFIG
);
1576 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1577 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1578 nw64_mac(XMAC_CONFIG
, val
);
1580 val
= nr64(MIF_CONFIG
);
1581 val
|= MIF_CONFIG_INDIRECT_MODE
;
1582 nw64(MIF_CONFIG
, val
);
1584 err
= bcm8704_reset(np
);
1588 err
= xcvr_10g_set_lb_bcm870x(np
);
1592 err
= bcm8706_init_user_dev3(np
);
1596 err
= xcvr_diag_bcm870x(np
);
1603 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1607 err
= bcm8704_reset(np
);
1611 err
= bcm8704_init_user_dev3(np
);
1615 err
= xcvr_10g_set_lb_bcm870x(np
);
1619 err
= xcvr_diag_bcm870x(np
);
1626 static int xcvr_init_10g(struct niu
*np
)
1631 val
= nr64_mac(XMAC_CONFIG
);
1632 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1633 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1634 nw64_mac(XMAC_CONFIG
, val
);
1636 /* XXX shared resource, lock parent XXX */
1637 val
= nr64(MIF_CONFIG
);
1638 val
|= MIF_CONFIG_INDIRECT_MODE
;
1639 nw64(MIF_CONFIG
, val
);
1641 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1642 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1644 /* handle different phy types */
1645 switch (phy_id
& NIU_PHY_ID_MASK
) {
1646 case NIU_PHY_ID_MRVL88X2011
:
1647 err
= xcvr_init_10g_mrvl88x2011(np
);
1650 default: /* bcom 8704 */
1651 err
= xcvr_init_10g_bcm8704(np
);
1658 static int mii_reset(struct niu
*np
)
1662 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1667 while (--limit
>= 0) {
1669 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1672 if (!(err
& BMCR_RESET
))
1676 netdev_err(np
->dev
, "Port %u MII would not reset, bmcr[%04x]\n",
1684 static int xcvr_init_1g_rgmii(struct niu
*np
)
1688 u16 bmcr
, bmsr
, estat
;
1690 val
= nr64(MIF_CONFIG
);
1691 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1692 nw64(MIF_CONFIG
, val
);
1694 err
= mii_reset(np
);
1698 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1704 if (bmsr
& BMSR_ESTATEN
) {
1705 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1712 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1716 if (bmsr
& BMSR_ESTATEN
) {
1719 if (estat
& ESTATUS_1000_TFULL
)
1720 ctrl1000
|= ADVERTISE_1000FULL
;
1721 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1726 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1728 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1732 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1735 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1737 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1744 static int mii_init_common(struct niu
*np
)
1746 struct niu_link_config
*lp
= &np
->link_config
;
1747 u16 bmcr
, bmsr
, adv
, estat
;
1750 err
= mii_reset(np
);
1754 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1760 if (bmsr
& BMSR_ESTATEN
) {
1761 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1768 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1772 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1773 bmcr
|= BMCR_LOOPBACK
;
1774 if (lp
->active_speed
== SPEED_1000
)
1775 bmcr
|= BMCR_SPEED1000
;
1776 if (lp
->active_duplex
== DUPLEX_FULL
)
1777 bmcr
|= BMCR_FULLDPLX
;
1780 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1783 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1784 BCM5464R_AUX_CTL_WRITE_1
);
1785 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1793 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1794 if ((bmsr
& BMSR_10HALF
) &&
1795 (lp
->advertising
& ADVERTISED_10baseT_Half
))
1796 adv
|= ADVERTISE_10HALF
;
1797 if ((bmsr
& BMSR_10FULL
) &&
1798 (lp
->advertising
& ADVERTISED_10baseT_Full
))
1799 adv
|= ADVERTISE_10FULL
;
1800 if ((bmsr
& BMSR_100HALF
) &&
1801 (lp
->advertising
& ADVERTISED_100baseT_Half
))
1802 adv
|= ADVERTISE_100HALF
;
1803 if ((bmsr
& BMSR_100FULL
) &&
1804 (lp
->advertising
& ADVERTISED_100baseT_Full
))
1805 adv
|= ADVERTISE_100FULL
;
1806 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1810 if (likely(bmsr
& BMSR_ESTATEN
)) {
1812 if ((estat
& ESTATUS_1000_THALF
) &&
1813 (lp
->advertising
& ADVERTISED_1000baseT_Half
))
1814 ctrl1000
|= ADVERTISE_1000HALF
;
1815 if ((estat
& ESTATUS_1000_TFULL
) &&
1816 (lp
->advertising
& ADVERTISED_1000baseT_Full
))
1817 ctrl1000
|= ADVERTISE_1000FULL
;
1818 err
= mii_write(np
, np
->phy_addr
,
1819 MII_CTRL1000
, ctrl1000
);
1824 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1829 if (lp
->duplex
== DUPLEX_FULL
) {
1830 bmcr
|= BMCR_FULLDPLX
;
1832 } else if (lp
->duplex
== DUPLEX_HALF
)
1837 if (lp
->speed
== SPEED_1000
) {
1838 /* if X-full requested while not supported, or
1839 X-half requested while not supported... */
1840 if ((fulldpx
&& !(estat
& ESTATUS_1000_TFULL
)) ||
1841 (!fulldpx
&& !(estat
& ESTATUS_1000_THALF
)))
1843 bmcr
|= BMCR_SPEED1000
;
1844 } else if (lp
->speed
== SPEED_100
) {
1845 if ((fulldpx
&& !(bmsr
& BMSR_100FULL
)) ||
1846 (!fulldpx
&& !(bmsr
& BMSR_100HALF
)))
1848 bmcr
|= BMCR_SPEED100
;
1849 } else if (lp
->speed
== SPEED_10
) {
1850 if ((fulldpx
&& !(bmsr
& BMSR_10FULL
)) ||
1851 (!fulldpx
&& !(bmsr
& BMSR_10HALF
)))
1857 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1862 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1867 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1872 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1873 np
->port
, bmcr
, bmsr
);
1879 static int xcvr_init_1g(struct niu
*np
)
1883 /* XXX shared resource, lock parent XXX */
1884 val
= nr64(MIF_CONFIG
);
1885 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1886 nw64(MIF_CONFIG
, val
);
1888 return mii_init_common(np
);
1891 static int niu_xcvr_init(struct niu
*np
)
1893 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1898 err
= ops
->xcvr_init(np
);
1903 static int niu_serdes_init(struct niu
*np
)
1905 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1909 if (ops
->serdes_init
)
1910 err
= ops
->serdes_init(np
);
1915 static void niu_init_xif(struct niu
*);
1916 static void niu_handle_led(struct niu
*, int status
);
1918 static int niu_link_status_common(struct niu
*np
, int link_up
)
1920 struct niu_link_config
*lp
= &np
->link_config
;
1921 struct net_device
*dev
= np
->dev
;
1922 unsigned long flags
;
1924 if (!netif_carrier_ok(dev
) && link_up
) {
1925 netif_info(np
, link
, dev
, "Link is up at %s, %s duplex\n",
1926 lp
->active_speed
== SPEED_10000
? "10Gb/sec" :
1927 lp
->active_speed
== SPEED_1000
? "1Gb/sec" :
1928 lp
->active_speed
== SPEED_100
? "100Mbit/sec" :
1930 lp
->active_duplex
== DUPLEX_FULL
? "full" : "half");
1932 spin_lock_irqsave(&np
->lock
, flags
);
1934 niu_handle_led(np
, 1);
1935 spin_unlock_irqrestore(&np
->lock
, flags
);
1937 netif_carrier_on(dev
);
1938 } else if (netif_carrier_ok(dev
) && !link_up
) {
1939 netif_warn(np
, link
, dev
, "Link is down\n");
1940 spin_lock_irqsave(&np
->lock
, flags
);
1941 niu_handle_led(np
, 0);
1942 spin_unlock_irqrestore(&np
->lock
, flags
);
1943 netif_carrier_off(dev
);
1949 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1951 int err
, link_up
, pma_status
, pcs_status
;
1955 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1956 MRVL88X2011_10G_PMD_STATUS_2
);
1960 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1961 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1962 MRVL88X2011_PMA_PMD_STATUS_1
);
1966 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1968 /* Check PMC Register : 3.0001.2 == 1: read twice */
1969 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1970 MRVL88X2011_PMA_PMD_STATUS_1
);
1974 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1975 MRVL88X2011_PMA_PMD_STATUS_1
);
1979 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1981 /* Check XGXS Register : 4.0018.[0-3,12] */
1982 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
1983 MRVL88X2011_10G_XGXS_LANE_STAT
);
1987 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
1988 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
1989 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
1991 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
1993 np
->link_config
.active_speed
= SPEED_10000
;
1994 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1997 mrvl88x2011_act_led(np
, (link_up
?
1998 MRVL88X2011_LED_CTL_PCS_ACT
:
1999 MRVL88X2011_LED_CTL_OFF
));
2001 *link_up_p
= link_up
;
2005 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
2010 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2011 BCM8704_PMD_RCV_SIGDET
);
2012 if (err
< 0 || err
== 0xffff)
2014 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2019 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2020 BCM8704_PCS_10G_R_STATUS
);
2024 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2029 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2030 BCM8704_PHYXS_XGXS_LANE_STAT
);
2033 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2034 PHYXS_XGXS_LANE_STAT_MAGIC
|
2035 PHYXS_XGXS_LANE_STAT_PATTEST
|
2036 PHYXS_XGXS_LANE_STAT_LANE3
|
2037 PHYXS_XGXS_LANE_STAT_LANE2
|
2038 PHYXS_XGXS_LANE_STAT_LANE1
|
2039 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2041 np
->link_config
.active_speed
= SPEED_INVALID
;
2042 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
2047 np
->link_config
.active_speed
= SPEED_10000
;
2048 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2052 *link_up_p
= link_up
;
2056 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
2062 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2063 BCM8704_PMD_RCV_SIGDET
);
2066 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2071 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2072 BCM8704_PCS_10G_R_STATUS
);
2075 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2080 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2081 BCM8704_PHYXS_XGXS_LANE_STAT
);
2085 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2086 PHYXS_XGXS_LANE_STAT_MAGIC
|
2087 PHYXS_XGXS_LANE_STAT_LANE3
|
2088 PHYXS_XGXS_LANE_STAT_LANE2
|
2089 PHYXS_XGXS_LANE_STAT_LANE1
|
2090 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2096 np
->link_config
.active_speed
= SPEED_10000
;
2097 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2101 *link_up_p
= link_up
;
2105 static int link_status_10g(struct niu
*np
, int *link_up_p
)
2107 unsigned long flags
;
2110 spin_lock_irqsave(&np
->lock
, flags
);
2112 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2115 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
2116 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
2118 /* handle different phy types */
2119 switch (phy_id
& NIU_PHY_ID_MASK
) {
2120 case NIU_PHY_ID_MRVL88X2011
:
2121 err
= link_status_10g_mrvl(np
, link_up_p
);
2124 default: /* bcom 8704 */
2125 err
= link_status_10g_bcom(np
, link_up_p
);
2130 spin_unlock_irqrestore(&np
->lock
, flags
);
2135 static int niu_10g_phy_present(struct niu
*np
)
2139 sig
= nr64(ESR_INT_SIGNALS
);
2142 mask
= ESR_INT_SIGNALS_P0_BITS
;
2143 val
= (ESR_INT_SRDY0_P0
|
2146 ESR_INT_XDP_P0_CH3
|
2147 ESR_INT_XDP_P0_CH2
|
2148 ESR_INT_XDP_P0_CH1
|
2149 ESR_INT_XDP_P0_CH0
);
2153 mask
= ESR_INT_SIGNALS_P1_BITS
;
2154 val
= (ESR_INT_SRDY0_P1
|
2157 ESR_INT_XDP_P1_CH3
|
2158 ESR_INT_XDP_P1_CH2
|
2159 ESR_INT_XDP_P1_CH1
|
2160 ESR_INT_XDP_P1_CH0
);
2167 if ((sig
& mask
) != val
)
2172 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
2174 unsigned long flags
;
2177 int phy_present_prev
;
2179 spin_lock_irqsave(&np
->lock
, flags
);
2181 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2182 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
2184 phy_present
= niu_10g_phy_present(np
);
2185 if (phy_present
!= phy_present_prev
) {
2188 /* A NEM was just plugged in */
2189 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2190 if (np
->phy_ops
->xcvr_init
)
2191 err
= np
->phy_ops
->xcvr_init(np
);
2193 err
= mdio_read(np
, np
->phy_addr
,
2194 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
2195 if (err
== 0xffff) {
2196 /* No mdio, back-to-back XAUI */
2200 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2203 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2205 netif_warn(np
, link
, np
->dev
,
2206 "Hotplug PHY Removed\n");
2210 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) {
2211 err
= link_status_10g_bcm8706(np
, link_up_p
);
2212 if (err
== 0xffff) {
2213 /* No mdio, back-to-back XAUI: it is C10NEM */
2215 np
->link_config
.active_speed
= SPEED_10000
;
2216 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2221 spin_unlock_irqrestore(&np
->lock
, flags
);
2226 static int niu_link_status(struct niu
*np
, int *link_up_p
)
2228 const struct niu_phy_ops
*ops
= np
->phy_ops
;
2232 if (ops
->link_status
)
2233 err
= ops
->link_status(np
, link_up_p
);
2238 static void niu_timer(unsigned long __opaque
)
2240 struct niu
*np
= (struct niu
*) __opaque
;
2244 err
= niu_link_status(np
, &link_up
);
2246 niu_link_status_common(np
, link_up
);
2248 if (netif_carrier_ok(np
->dev
))
2252 np
->timer
.expires
= jiffies
+ off
;
2254 add_timer(&np
->timer
);
2257 static const struct niu_phy_ops phy_ops_10g_serdes
= {
2258 .serdes_init
= serdes_init_10g_serdes
,
2259 .link_status
= link_status_10g_serdes
,
2262 static const struct niu_phy_ops phy_ops_10g_serdes_niu
= {
2263 .serdes_init
= serdes_init_niu_10g_serdes
,
2264 .link_status
= link_status_10g_serdes
,
2267 static const struct niu_phy_ops phy_ops_1g_serdes_niu
= {
2268 .serdes_init
= serdes_init_niu_1g_serdes
,
2269 .link_status
= link_status_1g_serdes
,
2272 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
2273 .xcvr_init
= xcvr_init_1g_rgmii
,
2274 .link_status
= link_status_1g_rgmii
,
2277 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
2278 .serdes_init
= serdes_init_niu_10g_fiber
,
2279 .xcvr_init
= xcvr_init_10g
,
2280 .link_status
= link_status_10g
,
2283 static const struct niu_phy_ops phy_ops_10g_fiber
= {
2284 .serdes_init
= serdes_init_10g
,
2285 .xcvr_init
= xcvr_init_10g
,
2286 .link_status
= link_status_10g
,
2289 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
2290 .serdes_init
= serdes_init_10g
,
2291 .xcvr_init
= xcvr_init_10g_bcm8706
,
2292 .link_status
= link_status_10g_hotplug
,
2295 static const struct niu_phy_ops phy_ops_niu_10g_hotplug
= {
2296 .serdes_init
= serdes_init_niu_10g_fiber
,
2297 .xcvr_init
= xcvr_init_10g_bcm8706
,
2298 .link_status
= link_status_10g_hotplug
,
2301 static const struct niu_phy_ops phy_ops_10g_copper
= {
2302 .serdes_init
= serdes_init_10g
,
2303 .link_status
= link_status_10g
, /* XXX */
2306 static const struct niu_phy_ops phy_ops_1g_fiber
= {
2307 .serdes_init
= serdes_init_1g
,
2308 .xcvr_init
= xcvr_init_1g
,
2309 .link_status
= link_status_1g
,
2312 static const struct niu_phy_ops phy_ops_1g_copper
= {
2313 .xcvr_init
= xcvr_init_1g
,
2314 .link_status
= link_status_1g
,
2317 struct niu_phy_template
{
2318 const struct niu_phy_ops
*ops
;
2322 static const struct niu_phy_template phy_template_niu_10g_fiber
= {
2323 .ops
= &phy_ops_10g_fiber_niu
,
2324 .phy_addr_base
= 16,
2327 static const struct niu_phy_template phy_template_niu_10g_serdes
= {
2328 .ops
= &phy_ops_10g_serdes_niu
,
2332 static const struct niu_phy_template phy_template_niu_1g_serdes
= {
2333 .ops
= &phy_ops_1g_serdes_niu
,
2337 static const struct niu_phy_template phy_template_10g_fiber
= {
2338 .ops
= &phy_ops_10g_fiber
,
2342 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2343 .ops
= &phy_ops_10g_fiber_hotplug
,
2347 static const struct niu_phy_template phy_template_niu_10g_hotplug
= {
2348 .ops
= &phy_ops_niu_10g_hotplug
,
2352 static const struct niu_phy_template phy_template_10g_copper
= {
2353 .ops
= &phy_ops_10g_copper
,
2354 .phy_addr_base
= 10,
2357 static const struct niu_phy_template phy_template_1g_fiber
= {
2358 .ops
= &phy_ops_1g_fiber
,
2362 static const struct niu_phy_template phy_template_1g_copper
= {
2363 .ops
= &phy_ops_1g_copper
,
2367 static const struct niu_phy_template phy_template_1g_rgmii
= {
2368 .ops
= &phy_ops_1g_rgmii
,
2372 static const struct niu_phy_template phy_template_10g_serdes
= {
2373 .ops
= &phy_ops_10g_serdes
,
2377 static int niu_atca_port_num
[4] = {
2381 static int serdes_init_10g_serdes(struct niu
*np
)
2383 struct niu_link_config
*lp
= &np
->link_config
;
2384 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2385 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2390 reset_val
= ENET_SERDES_RESET_0
;
2391 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2392 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2393 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2396 reset_val
= ENET_SERDES_RESET_1
;
2397 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2398 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2399 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2405 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2406 ENET_SERDES_CTRL_SDET_1
|
2407 ENET_SERDES_CTRL_SDET_2
|
2408 ENET_SERDES_CTRL_SDET_3
|
2409 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2410 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2411 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2412 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2413 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2414 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2415 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2416 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2419 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2420 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2421 ENET_SERDES_TEST_MD_0_SHIFT
) |
2422 (ENET_TEST_MD_PAD_LOOPBACK
<<
2423 ENET_SERDES_TEST_MD_1_SHIFT
) |
2424 (ENET_TEST_MD_PAD_LOOPBACK
<<
2425 ENET_SERDES_TEST_MD_2_SHIFT
) |
2426 (ENET_TEST_MD_PAD_LOOPBACK
<<
2427 ENET_SERDES_TEST_MD_3_SHIFT
));
2431 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2432 nw64(ctrl_reg
, ctrl_val
);
2433 nw64(test_cfg_reg
, test_cfg_val
);
2435 /* Initialize all 4 lanes of the SERDES. */
2436 for (i
= 0; i
< 4; i
++) {
2437 u32 rxtx_ctrl
, glue0
;
2440 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2443 err
= esr_read_glue0(np
, i
, &glue0
);
2447 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2448 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2449 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2451 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2452 ESR_GLUE_CTRL0_THCNT
|
2453 ESR_GLUE_CTRL0_BLTIME
);
2454 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2455 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2456 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2457 (BLTIME_300_CYCLES
<<
2458 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2460 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2463 err
= esr_write_glue0(np
, i
, glue0
);
2469 sig
= nr64(ESR_INT_SIGNALS
);
2472 mask
= ESR_INT_SIGNALS_P0_BITS
;
2473 val
= (ESR_INT_SRDY0_P0
|
2476 ESR_INT_XDP_P0_CH3
|
2477 ESR_INT_XDP_P0_CH2
|
2478 ESR_INT_XDP_P0_CH1
|
2479 ESR_INT_XDP_P0_CH0
);
2483 mask
= ESR_INT_SIGNALS_P1_BITS
;
2484 val
= (ESR_INT_SRDY0_P1
|
2487 ESR_INT_XDP_P1_CH3
|
2488 ESR_INT_XDP_P1_CH2
|
2489 ESR_INT_XDP_P1_CH1
|
2490 ESR_INT_XDP_P1_CH0
);
2497 if ((sig
& mask
) != val
) {
2499 err
= serdes_init_1g_serdes(np
);
2501 np
->flags
&= ~NIU_FLAGS_10G
;
2502 np
->mac_xcvr
= MAC_XCVR_PCS
;
2504 netdev_err(np
->dev
, "Port %u 10G/1G SERDES Link Failed\n",
2513 static int niu_determine_phy_disposition(struct niu
*np
)
2515 struct niu_parent
*parent
= np
->parent
;
2516 u8 plat_type
= parent
->plat_type
;
2517 const struct niu_phy_template
*tp
;
2518 u32 phy_addr_off
= 0;
2520 if (plat_type
== PLAT_TYPE_NIU
) {
2524 NIU_FLAGS_XCVR_SERDES
)) {
2525 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2527 tp
= &phy_template_niu_10g_serdes
;
2529 case NIU_FLAGS_XCVR_SERDES
:
2531 tp
= &phy_template_niu_1g_serdes
;
2533 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2536 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2537 tp
= &phy_template_niu_10g_hotplug
;
2543 tp
= &phy_template_niu_10g_fiber
;
2544 phy_addr_off
+= np
->port
;
2552 NIU_FLAGS_XCVR_SERDES
)) {
2555 tp
= &phy_template_1g_copper
;
2556 if (plat_type
== PLAT_TYPE_VF_P0
)
2558 else if (plat_type
== PLAT_TYPE_VF_P1
)
2561 phy_addr_off
+= (np
->port
^ 0x3);
2566 tp
= &phy_template_10g_copper
;
2569 case NIU_FLAGS_FIBER
:
2571 tp
= &phy_template_1g_fiber
;
2574 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2576 tp
= &phy_template_10g_fiber
;
2577 if (plat_type
== PLAT_TYPE_VF_P0
||
2578 plat_type
== PLAT_TYPE_VF_P1
)
2580 phy_addr_off
+= np
->port
;
2581 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2582 tp
= &phy_template_10g_fiber_hotplug
;
2590 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2591 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2592 case NIU_FLAGS_XCVR_SERDES
:
2596 tp
= &phy_template_10g_serdes
;
2600 tp
= &phy_template_1g_rgmii
;
2606 phy_addr_off
= niu_atca_port_num
[np
->port
];
2614 np
->phy_ops
= tp
->ops
;
2615 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2620 static int niu_init_link(struct niu
*np
)
2622 struct niu_parent
*parent
= np
->parent
;
2625 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2626 err
= niu_xcvr_init(np
);
2631 err
= niu_serdes_init(np
);
2632 if (err
&& !(np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2635 err
= niu_xcvr_init(np
);
2636 if (!err
|| (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2637 niu_link_status(np
, &ignore
);
2641 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2643 u16 reg0
= addr
[4] << 8 | addr
[5];
2644 u16 reg1
= addr
[2] << 8 | addr
[3];
2645 u16 reg2
= addr
[0] << 8 | addr
[1];
2647 if (np
->flags
& NIU_FLAGS_XMAC
) {
2648 nw64_mac(XMAC_ADDR0
, reg0
);
2649 nw64_mac(XMAC_ADDR1
, reg1
);
2650 nw64_mac(XMAC_ADDR2
, reg2
);
2652 nw64_mac(BMAC_ADDR0
, reg0
);
2653 nw64_mac(BMAC_ADDR1
, reg1
);
2654 nw64_mac(BMAC_ADDR2
, reg2
);
2658 static int niu_num_alt_addr(struct niu
*np
)
2660 if (np
->flags
& NIU_FLAGS_XMAC
)
2661 return XMAC_NUM_ALT_ADDR
;
2663 return BMAC_NUM_ALT_ADDR
;
2666 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2668 u16 reg0
= addr
[4] << 8 | addr
[5];
2669 u16 reg1
= addr
[2] << 8 | addr
[3];
2670 u16 reg2
= addr
[0] << 8 | addr
[1];
2672 if (index
>= niu_num_alt_addr(np
))
2675 if (np
->flags
& NIU_FLAGS_XMAC
) {
2676 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2677 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2678 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2680 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2681 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2682 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2688 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2693 if (index
>= niu_num_alt_addr(np
))
2696 if (np
->flags
& NIU_FLAGS_XMAC
) {
2697 reg
= XMAC_ADDR_CMPEN
;
2700 reg
= BMAC_ADDR_CMPEN
;
2701 mask
= 1 << (index
+ 1);
2704 val
= nr64_mac(reg
);
2714 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2715 int num
, int mac_pref
)
2717 u64 val
= nr64_mac(reg
);
2718 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2721 val
|= HOST_INFO_MPR
;
2725 static int __set_rdc_table_num(struct niu
*np
,
2726 int xmac_index
, int bmac_index
,
2727 int rdc_table_num
, int mac_pref
)
2731 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2733 if (np
->flags
& NIU_FLAGS_XMAC
)
2734 reg
= XMAC_HOST_INFO(xmac_index
);
2736 reg
= BMAC_HOST_INFO(bmac_index
);
2737 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2741 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2744 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2747 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2750 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2753 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2754 int table_num
, int mac_pref
)
2756 if (idx
>= niu_num_alt_addr(np
))
2758 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2761 static u64
vlan_entry_set_parity(u64 reg_val
)
2766 port01_mask
= 0x00ff;
2767 port23_mask
= 0xff00;
2769 if (hweight64(reg_val
& port01_mask
) & 1)
2770 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2772 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2774 if (hweight64(reg_val
& port23_mask
) & 1)
2775 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2777 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2782 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2783 int port
, int vpr
, int rdc_table
)
2785 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2787 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2788 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2789 ENET_VLAN_TBL_SHIFT(port
));
2791 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2792 ENET_VLAN_TBL_SHIFT(port
));
2793 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2795 reg_val
= vlan_entry_set_parity(reg_val
);
2797 nw64(ENET_VLAN_TBL(index
), reg_val
);
2800 static void vlan_tbl_clear(struct niu
*np
)
2804 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2805 nw64(ENET_VLAN_TBL(i
), 0);
2808 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2812 while (--limit
> 0) {
2813 if (nr64(TCAM_CTL
) & bit
)
2823 static int tcam_flush(struct niu
*np
, int index
)
2825 nw64(TCAM_KEY_0
, 0x00);
2826 nw64(TCAM_KEY_MASK_0
, 0xff);
2827 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2829 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2833 static int tcam_read(struct niu
*np
, int index
,
2834 u64
*key
, u64
*mask
)
2838 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2839 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2841 key
[0] = nr64(TCAM_KEY_0
);
2842 key
[1] = nr64(TCAM_KEY_1
);
2843 key
[2] = nr64(TCAM_KEY_2
);
2844 key
[3] = nr64(TCAM_KEY_3
);
2845 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2846 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2847 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2848 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2854 static int tcam_write(struct niu
*np
, int index
,
2855 u64
*key
, u64
*mask
)
2857 nw64(TCAM_KEY_0
, key
[0]);
2858 nw64(TCAM_KEY_1
, key
[1]);
2859 nw64(TCAM_KEY_2
, key
[2]);
2860 nw64(TCAM_KEY_3
, key
[3]);
2861 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2862 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2863 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2864 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2865 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2867 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2871 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2875 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2876 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2878 *data
= nr64(TCAM_KEY_1
);
2884 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2886 nw64(TCAM_KEY_1
, assoc_data
);
2887 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2889 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2892 static void tcam_enable(struct niu
*np
, int on
)
2894 u64 val
= nr64(FFLP_CFG_1
);
2897 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2899 val
|= FFLP_CFG_1_TCAM_DIS
;
2900 nw64(FFLP_CFG_1
, val
);
2903 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2905 u64 val
= nr64(FFLP_CFG_1
);
2907 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2909 FFLP_CFG_1_CAMRATIO
);
2910 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2911 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2912 nw64(FFLP_CFG_1
, val
);
2914 val
= nr64(FFLP_CFG_1
);
2915 val
|= FFLP_CFG_1_FFLPINITDONE
;
2916 nw64(FFLP_CFG_1
, val
);
2919 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2925 if (class < CLASS_CODE_ETHERTYPE1
||
2926 class > CLASS_CODE_ETHERTYPE2
)
2929 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2941 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2947 if (class < CLASS_CODE_ETHERTYPE1
||
2948 class > CLASS_CODE_ETHERTYPE2
||
2949 (ether_type
& ~(u64
)0xffff) != 0)
2952 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2954 val
&= ~L2_CLS_ETYPE
;
2955 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2962 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
2968 if (class < CLASS_CODE_USER_PROG1
||
2969 class > CLASS_CODE_USER_PROG4
)
2972 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2975 val
|= L3_CLS_VALID
;
2977 val
&= ~L3_CLS_VALID
;
2983 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
2984 int ipv6
, u64 protocol_id
,
2985 u64 tos_mask
, u64 tos_val
)
2990 if (class < CLASS_CODE_USER_PROG1
||
2991 class > CLASS_CODE_USER_PROG4
||
2992 (protocol_id
& ~(u64
)0xff) != 0 ||
2993 (tos_mask
& ~(u64
)0xff) != 0 ||
2994 (tos_val
& ~(u64
)0xff) != 0)
2997 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2999 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
3000 L3_CLS_TOSMASK
| L3_CLS_TOS
);
3002 val
|= L3_CLS_IPVER
;
3003 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
3004 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
3005 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
3011 static int tcam_early_init(struct niu
*np
)
3017 tcam_set_lat_and_ratio(np
,
3018 DEFAULT_TCAM_LATENCY
,
3019 DEFAULT_TCAM_ACCESS_RATIO
);
3020 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
3021 err
= tcam_user_eth_class_enable(np
, i
, 0);
3025 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
3026 err
= tcam_user_ip_class_enable(np
, i
, 0);
3034 static int tcam_flush_all(struct niu
*np
)
3038 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
3039 int err
= tcam_flush(np
, i
);
3046 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
3048 return ((u64
)index
| (num_entries
== 1 ?
3049 HASH_TBL_ADDR_AUTOINC
: 0));
3053 static int hash_read(struct niu
*np
, unsigned long partition
,
3054 unsigned long index
, unsigned long num_entries
,
3057 u64 val
= hash_addr_regval(index
, num_entries
);
3060 if (partition
>= FCRAM_NUM_PARTITIONS
||
3061 index
+ num_entries
> FCRAM_SIZE
)
3064 nw64(HASH_TBL_ADDR(partition
), val
);
3065 for (i
= 0; i
< num_entries
; i
++)
3066 data
[i
] = nr64(HASH_TBL_DATA(partition
));
3072 static int hash_write(struct niu
*np
, unsigned long partition
,
3073 unsigned long index
, unsigned long num_entries
,
3076 u64 val
= hash_addr_regval(index
, num_entries
);
3079 if (partition
>= FCRAM_NUM_PARTITIONS
||
3080 index
+ (num_entries
* 8) > FCRAM_SIZE
)
3083 nw64(HASH_TBL_ADDR(partition
), val
);
3084 for (i
= 0; i
< num_entries
; i
++)
3085 nw64(HASH_TBL_DATA(partition
), data
[i
]);
3090 static void fflp_reset(struct niu
*np
)
3094 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
3096 nw64(FFLP_CFG_1
, 0);
3098 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
3099 nw64(FFLP_CFG_1
, val
);
3102 static void fflp_set_timings(struct niu
*np
)
3104 u64 val
= nr64(FFLP_CFG_1
);
3106 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
3107 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
3108 nw64(FFLP_CFG_1
, val
);
3110 val
= nr64(FFLP_CFG_1
);
3111 val
|= FFLP_CFG_1_FFLPINITDONE
;
3112 nw64(FFLP_CFG_1
, val
);
3114 val
= nr64(FCRAM_REF_TMR
);
3115 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
3116 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
3117 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
3118 nw64(FCRAM_REF_TMR
, val
);
3121 static int fflp_set_partition(struct niu
*np
, u64 partition
,
3122 u64 mask
, u64 base
, int enable
)
3127 if (partition
>= FCRAM_NUM_PARTITIONS
||
3128 (mask
& ~(u64
)0x1f) != 0 ||
3129 (base
& ~(u64
)0x1f) != 0)
3132 reg
= FLW_PRT_SEL(partition
);
3135 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
3136 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
3137 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
3139 val
|= FLW_PRT_SEL_EXT
;
3145 static int fflp_disable_all_partitions(struct niu
*np
)
3149 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
3150 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
3157 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
3159 u64 val
= nr64(FFLP_CFG_1
);
3162 val
|= FFLP_CFG_1_LLCSNAP
;
3164 val
&= ~FFLP_CFG_1_LLCSNAP
;
3165 nw64(FFLP_CFG_1
, val
);
3168 static void fflp_errors_enable(struct niu
*np
, int on
)
3170 u64 val
= nr64(FFLP_CFG_1
);
3173 val
&= ~FFLP_CFG_1_ERRORDIS
;
3175 val
|= FFLP_CFG_1_ERRORDIS
;
3176 nw64(FFLP_CFG_1
, val
);
3179 static int fflp_hash_clear(struct niu
*np
)
3181 struct fcram_hash_ipv4 ent
;
3184 /* IPV4 hash entry with valid bit clear, rest is don't care. */
3185 memset(&ent
, 0, sizeof(ent
));
3186 ent
.header
= HASH_HEADER_EXT
;
3188 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
3189 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
3196 static int fflp_early_init(struct niu
*np
)
3198 struct niu_parent
*parent
;
3199 unsigned long flags
;
3202 niu_lock_parent(np
, flags
);
3204 parent
= np
->parent
;
3206 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
3207 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3209 fflp_set_timings(np
);
3210 err
= fflp_disable_all_partitions(np
);
3212 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3213 "fflp_disable_all_partitions failed, err=%d\n",
3219 err
= tcam_early_init(np
);
3221 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3222 "tcam_early_init failed, err=%d\n", err
);
3225 fflp_llcsnap_enable(np
, 1);
3226 fflp_errors_enable(np
, 0);
3230 err
= tcam_flush_all(np
);
3232 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3233 "tcam_flush_all failed, err=%d\n", err
);
3236 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3237 err
= fflp_hash_clear(np
);
3239 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3240 "fflp_hash_clear failed, err=%d\n",
3248 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
3251 niu_unlock_parent(np
, flags
);
3255 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3257 if (class_code
< CLASS_CODE_USER_PROG1
||
3258 class_code
> CLASS_CODE_SCTP_IPV6
)
3261 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3265 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3267 if (class_code
< CLASS_CODE_USER_PROG1
||
3268 class_code
> CLASS_CODE_SCTP_IPV6
)
3271 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3275 /* Entries for the ports are interleaved in the TCAM */
3276 static u16
tcam_get_index(struct niu
*np
, u16 idx
)
3278 /* One entry reserved for IP fragment rule */
3279 if (idx
>= (np
->clas
.tcam_sz
- 1))
3281 return (np
->clas
.tcam_top
+ ((idx
+1) * np
->parent
->num_ports
));
3284 static u16
tcam_get_size(struct niu
*np
)
3286 /* One entry reserved for IP fragment rule */
3287 return np
->clas
.tcam_sz
- 1;
3290 static u16
tcam_get_valid_entry_cnt(struct niu
*np
)
3292 /* One entry reserved for IP fragment rule */
3293 return np
->clas
.tcam_valid_entries
- 1;
3296 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
3297 u32 offset
, u32 size
)
3299 int i
= skb_shinfo(skb
)->nr_frags
;
3300 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3303 frag
->page_offset
= offset
;
3307 skb
->data_len
+= size
;
3308 skb
->truesize
+= size
;
3310 skb_shinfo(skb
)->nr_frags
= i
+ 1;
3313 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
3316 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
3318 return (a
& (MAX_RBR_RING_SIZE
- 1));
3321 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
3322 struct page
***link
)
3324 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
3325 struct page
*p
, **pp
;
3328 pp
= &rp
->rxhash
[h
];
3329 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
3330 if (p
->index
== addr
) {
3339 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
3341 unsigned int h
= niu_hash_rxaddr(rp
, base
);
3344 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
3345 rp
->rxhash
[h
] = page
;
3348 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
3349 gfp_t mask
, int start_index
)
3355 page
= alloc_page(mask
);
3359 addr
= np
->ops
->map_page(np
->device
, page
, 0,
3360 PAGE_SIZE
, DMA_FROM_DEVICE
);
3362 niu_hash_page(rp
, page
, addr
);
3363 if (rp
->rbr_blocks_per_page
> 1)
3364 atomic_add(rp
->rbr_blocks_per_page
- 1,
3365 &compound_head(page
)->_count
);
3367 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
3368 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
3370 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
3371 addr
+= rp
->rbr_block_size
;
3377 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3379 int index
= rp
->rbr_index
;
3382 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3383 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3385 if (unlikely(err
)) {
3390 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3391 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3392 if (rp
->rbr_index
== rp
->rbr_table_size
)
3395 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3396 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3397 rp
->rbr_pending
= 0;
3402 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3404 unsigned int index
= rp
->rcr_index
;
3409 struct page
*page
, **link
;
3415 val
= le64_to_cpup(&rp
->rcr
[index
]);
3416 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3417 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3418 page
= niu_find_rxpage(rp
, addr
, &link
);
3420 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3421 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3422 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3423 *link
= (struct page
*) page
->mapping
;
3424 np
->ops
->unmap_page(np
->device
, page
->index
,
3425 PAGE_SIZE
, DMA_FROM_DEVICE
);
3427 page
->mapping
= NULL
;
3429 rp
->rbr_refill_pending
++;
3432 index
= NEXT_RCR(rp
, index
);
3433 if (!(val
& RCR_ENTRY_MULTI
))
3437 rp
->rcr_index
= index
;
3442 static int niu_process_rx_pkt(struct napi_struct
*napi
, struct niu
*np
,
3443 struct rx_ring_info
*rp
)
3445 unsigned int index
= rp
->rcr_index
;
3446 struct sk_buff
*skb
;
3449 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3451 return niu_rx_pkt_ignore(np
, rp
);
3455 struct page
*page
, **link
;
3456 u32 rcr_size
, append_size
;
3461 val
= le64_to_cpup(&rp
->rcr
[index
]);
3463 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3464 RCR_ENTRY_L2_LEN_SHIFT
;
3467 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3468 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3469 page
= niu_find_rxpage(rp
, addr
, &link
);
3471 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3472 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3474 off
= addr
& ~PAGE_MASK
;
3475 append_size
= rcr_size
;
3482 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3483 if ((ptype
== RCR_PKT_TYPE_TCP
||
3484 ptype
== RCR_PKT_TYPE_UDP
) &&
3485 !(val
& (RCR_ENTRY_NOPORT
|
3487 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3489 skb
->ip_summed
= CHECKSUM_NONE
;
3491 if (!(val
& RCR_ENTRY_MULTI
))
3492 append_size
= len
- skb
->len
;
3494 niu_rx_skb_append(skb
, page
, off
, append_size
);
3495 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3496 *link
= (struct page
*) page
->mapping
;
3497 np
->ops
->unmap_page(np
->device
, page
->index
,
3498 PAGE_SIZE
, DMA_FROM_DEVICE
);
3500 page
->mapping
= NULL
;
3501 rp
->rbr_refill_pending
++;
3505 index
= NEXT_RCR(rp
, index
);
3506 if (!(val
& RCR_ENTRY_MULTI
))
3510 rp
->rcr_index
= index
;
3512 skb_reserve(skb
, NET_IP_ALIGN
);
3513 __pskb_pull_tail(skb
, min(len
, VLAN_ETH_HLEN
));
3516 rp
->rx_bytes
+= skb
->len
;
3518 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3519 skb_record_rx_queue(skb
, rp
->rx_channel
);
3520 napi_gro_receive(napi
, skb
);
3525 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3527 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3528 int err
, index
= rp
->rbr_index
;
3531 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3532 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3536 index
+= blocks_per_page
;
3539 rp
->rbr_index
= index
;
3543 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3547 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3550 page
= rp
->rxhash
[i
];
3552 struct page
*next
= (struct page
*) page
->mapping
;
3553 u64 base
= page
->index
;
3555 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3558 page
->mapping
= NULL
;
3566 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3567 rp
->rbr
[i
] = cpu_to_le32(0);
3571 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3573 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3574 struct sk_buff
*skb
= tb
->skb
;
3575 struct tx_pkt_hdr
*tp
;
3579 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3580 tx_flags
= le64_to_cpup(&tp
->flags
);
3583 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3584 ((tx_flags
& TXHDR_PAD
) / 2));
3586 len
= skb_headlen(skb
);
3587 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3588 len
, DMA_TO_DEVICE
);
3590 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3595 idx
= NEXT_TX(rp
, idx
);
3596 len
-= MAX_TX_DESC_LEN
;
3599 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3600 tb
= &rp
->tx_buffs
[idx
];
3601 BUG_ON(tb
->skb
!= NULL
);
3602 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3603 skb_shinfo(skb
)->frags
[i
].size
,
3605 idx
= NEXT_TX(rp
, idx
);
3613 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3615 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3617 struct netdev_queue
*txq
;
3622 index
= (rp
- np
->tx_rings
);
3623 txq
= netdev_get_tx_queue(np
->dev
, index
);
3626 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3629 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3630 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3631 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3633 rp
->last_pkt_cnt
= tmp
;
3637 netif_printk(np
, tx_done
, KERN_DEBUG
, np
->dev
,
3638 "%s() pkt_cnt[%u] cons[%d]\n", __func__
, pkt_cnt
, cons
);
3641 cons
= release_tx_packet(np
, rp
, cons
);
3647 if (unlikely(netif_tx_queue_stopped(txq
) &&
3648 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3649 __netif_tx_lock(txq
, smp_processor_id());
3650 if (netif_tx_queue_stopped(txq
) &&
3651 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3652 netif_tx_wake_queue(txq
);
3653 __netif_tx_unlock(txq
);
3657 static inline void niu_sync_rx_discard_stats(struct niu
*np
,
3658 struct rx_ring_info
*rp
,
3661 /* This elaborate scheme is needed for reading the RX discard
3662 * counters, as they are only 16-bit and can overflow quickly,
3663 * and because the overflow indication bit is not usable as
3664 * the counter value does not wrap, but remains at max value
3667 * In theory and in practice counters can be lost in between
3668 * reading nr64() and clearing the counter nw64(). For this
3669 * reason, the number of counter clearings nw64() is
3670 * limited/reduced though the limit parameter.
3672 int rx_channel
= rp
->rx_channel
;
3675 /* RXMISC (Receive Miscellaneous Discard Count), covers the
3676 * following discard events: IPP (Input Port Process),
3677 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3678 * Block Ring) prefetch buffer is empty.
3680 misc
= nr64(RXMISC(rx_channel
));
3681 if (unlikely((misc
& RXMISC_COUNT
) > limit
)) {
3682 nw64(RXMISC(rx_channel
), 0);
3683 rp
->rx_errors
+= misc
& RXMISC_COUNT
;
3685 if (unlikely(misc
& RXMISC_OFLOW
))
3686 dev_err(np
->device
, "rx-%d: Counter overflow RXMISC discard\n",
3689 netif_printk(np
, rx_err
, KERN_DEBUG
, np
->dev
,
3690 "rx-%d: MISC drop=%u over=%u\n",
3691 rx_channel
, misc
, misc
-limit
);
3694 /* WRED (Weighted Random Early Discard) by hardware */
3695 wred
= nr64(RED_DIS_CNT(rx_channel
));
3696 if (unlikely((wred
& RED_DIS_CNT_COUNT
) > limit
)) {
3697 nw64(RED_DIS_CNT(rx_channel
), 0);
3698 rp
->rx_dropped
+= wred
& RED_DIS_CNT_COUNT
;
3700 if (unlikely(wred
& RED_DIS_CNT_OFLOW
))
3701 dev_err(np
->device
, "rx-%d: Counter overflow WRED discard\n", rx_channel
);
3703 netif_printk(np
, rx_err
, KERN_DEBUG
, np
->dev
,
3704 "rx-%d: WRED drop=%u over=%u\n",
3705 rx_channel
, wred
, wred
-limit
);
3709 static int niu_rx_work(struct napi_struct
*napi
, struct niu
*np
,
3710 struct rx_ring_info
*rp
, int budget
)
3712 int qlen
, rcr_done
= 0, work_done
= 0;
3713 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3717 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3718 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3720 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3721 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3723 mbox
->rx_dma_ctl_stat
= 0;
3724 mbox
->rcrstat_a
= 0;
3726 netif_printk(np
, rx_status
, KERN_DEBUG
, np
->dev
,
3727 "%s(chan[%d]), stat[%llx] qlen=%d\n",
3728 __func__
, rp
->rx_channel
, (unsigned long long)stat
, qlen
);
3730 rcr_done
= work_done
= 0;
3731 qlen
= min(qlen
, budget
);
3732 while (work_done
< qlen
) {
3733 rcr_done
+= niu_process_rx_pkt(napi
, np
, rp
);
3737 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3740 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3741 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3742 rp
->rbr_refill_pending
= 0;
3745 stat
= (RX_DMA_CTL_STAT_MEX
|
3746 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3747 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3749 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3751 /* Only sync discards stats when qlen indicate potential for drops */
3753 niu_sync_rx_discard_stats(np
, rp
, 0x7FFF);
3758 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3761 u32 tx_vec
= (v0
>> 32);
3762 u32 rx_vec
= (v0
& 0xffffffff);
3763 int i
, work_done
= 0;
3765 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
3766 "%s() v0[%016llx]\n", __func__
, (unsigned long long)v0
);
3768 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3769 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3770 if (tx_vec
& (1 << rp
->tx_channel
))
3771 niu_tx_work(np
, rp
);
3772 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3775 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3776 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3778 if (rx_vec
& (1 << rp
->rx_channel
)) {
3781 this_work_done
= niu_rx_work(&lp
->napi
, np
, rp
,
3784 budget
-= this_work_done
;
3785 work_done
+= this_work_done
;
3787 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3793 static int niu_poll(struct napi_struct
*napi
, int budget
)
3795 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3796 struct niu
*np
= lp
->np
;
3799 work_done
= niu_poll_core(np
, lp
, budget
);
3801 if (work_done
< budget
) {
3802 napi_complete(napi
);
3803 niu_ldg_rearm(np
, lp
, 1);
3808 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3811 netdev_err(np
->dev
, "RX channel %u errors ( ", rp
->rx_channel
);
3813 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3814 pr_cont("RBR_TMOUT ");
3815 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3816 pr_cont("RSP_CNT ");
3817 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3818 pr_cont("BYTE_EN_BUS ");
3819 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3820 pr_cont("RSP_DAT ");
3821 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3822 pr_cont("RCR_ACK ");
3823 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3824 pr_cont("RCR_SHA_PAR ");
3825 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3826 pr_cont("RBR_PRE_PAR ");
3827 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3829 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3830 pr_cont("RCRINCON ");
3831 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3832 pr_cont("RCRFULL ");
3833 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3834 pr_cont("RBRFULL ");
3835 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3836 pr_cont("RBRLOGPAGE ");
3837 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3838 pr_cont("CFIGLOGPAGE ");
3839 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3840 pr_cont("DC_FIDO ");
3845 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3847 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3851 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3852 RX_DMA_CTL_STAT_PORT_FATAL
))
3856 netdev_err(np
->dev
, "RX channel %u error, stat[%llx]\n",
3858 (unsigned long long) stat
);
3860 niu_log_rxchan_errors(np
, rp
, stat
);
3863 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3864 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3869 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3872 netdev_err(np
->dev
, "TX channel %u errors ( ", rp
->tx_channel
);
3874 if (cs
& TX_CS_MBOX_ERR
)
3876 if (cs
& TX_CS_PKT_SIZE_ERR
)
3877 pr_cont("PKT_SIZE ");
3878 if (cs
& TX_CS_TX_RING_OFLOW
)
3879 pr_cont("TX_RING_OFLOW ");
3880 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3881 pr_cont("PREF_BUF_PAR ");
3882 if (cs
& TX_CS_NACK_PREF
)
3883 pr_cont("NACK_PREF ");
3884 if (cs
& TX_CS_NACK_PKT_RD
)
3885 pr_cont("NACK_PKT_RD ");
3886 if (cs
& TX_CS_CONF_PART_ERR
)
3887 pr_cont("CONF_PART ");
3888 if (cs
& TX_CS_PKT_PRT_ERR
)
3889 pr_cont("PKT_PTR ");
3894 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3898 cs
= nr64(TX_CS(rp
->tx_channel
));
3899 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3900 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3902 netdev_err(np
->dev
, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3904 (unsigned long long)cs
,
3905 (unsigned long long)logh
,
3906 (unsigned long long)logl
);
3908 niu_log_txchan_errors(np
, rp
, cs
);
3913 static int niu_mif_interrupt(struct niu
*np
)
3915 u64 mif_status
= nr64(MIF_STATUS
);
3918 if (np
->flags
& NIU_FLAGS_XMAC
) {
3919 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3921 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3925 netdev_err(np
->dev
, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3926 (unsigned long long)mif_status
, phy_mdint
);
3931 static void niu_xmac_interrupt(struct niu
*np
)
3933 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3936 val
= nr64_mac(XTXMAC_STATUS
);
3937 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3938 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3939 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3940 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3941 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3942 mp
->tx_fifo_errors
++;
3943 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3944 mp
->tx_overflow_errors
++;
3945 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3946 mp
->tx_max_pkt_size_errors
++;
3947 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3948 mp
->tx_underflow_errors
++;
3950 val
= nr64_mac(XRXMAC_STATUS
);
3951 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3952 mp
->rx_local_faults
++;
3953 if (val
& XRXMAC_STATUS_RFLT_DET
)
3954 mp
->rx_remote_faults
++;
3955 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3956 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3957 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3958 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3959 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3960 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3961 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3962 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
3963 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3964 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3965 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3966 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3967 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
3968 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
3969 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
3970 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
3971 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
3972 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
3973 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
3974 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
3975 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
3976 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
3977 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
3978 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
3979 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
3980 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
3981 if (val
& XRXMAC_STATUS_RXOCTET_CNT_EXP
)
3982 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
3983 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
3984 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
3985 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
3986 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
3987 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
3988 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
3989 if (val
& XRXMAC_STATUS_RXUFLOW
)
3990 mp
->rx_underflows
++;
3991 if (val
& XRXMAC_STATUS_RXOFLOW
)
3994 val
= nr64_mac(XMAC_FC_STAT
);
3995 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
3996 mp
->pause_off_state
++;
3997 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
3998 mp
->pause_on_state
++;
3999 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
4000 mp
->pause_received
++;
4003 static void niu_bmac_interrupt(struct niu
*np
)
4005 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
4008 val
= nr64_mac(BTXMAC_STATUS
);
4009 if (val
& BTXMAC_STATUS_UNDERRUN
)
4010 mp
->tx_underflow_errors
++;
4011 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
4012 mp
->tx_max_pkt_size_errors
++;
4013 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
4014 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
4015 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
4016 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
4018 val
= nr64_mac(BRXMAC_STATUS
);
4019 if (val
& BRXMAC_STATUS_OVERFLOW
)
4021 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
4022 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
4023 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
4024 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4025 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
4026 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4027 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
4028 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
4030 val
= nr64_mac(BMAC_CTRL_STATUS
);
4031 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
4032 mp
->pause_off_state
++;
4033 if (val
& BMAC_CTRL_STATUS_PAUSE
)
4034 mp
->pause_on_state
++;
4035 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
4036 mp
->pause_received
++;
4039 static int niu_mac_interrupt(struct niu
*np
)
4041 if (np
->flags
& NIU_FLAGS_XMAC
)
4042 niu_xmac_interrupt(np
);
4044 niu_bmac_interrupt(np
);
4049 static void niu_log_device_error(struct niu
*np
, u64 stat
)
4051 netdev_err(np
->dev
, "Core device errors ( ");
4053 if (stat
& SYS_ERR_MASK_META2
)
4055 if (stat
& SYS_ERR_MASK_META1
)
4057 if (stat
& SYS_ERR_MASK_PEU
)
4059 if (stat
& SYS_ERR_MASK_TXC
)
4061 if (stat
& SYS_ERR_MASK_RDMC
)
4063 if (stat
& SYS_ERR_MASK_TDMC
)
4065 if (stat
& SYS_ERR_MASK_ZCP
)
4067 if (stat
& SYS_ERR_MASK_FFLP
)
4069 if (stat
& SYS_ERR_MASK_IPP
)
4071 if (stat
& SYS_ERR_MASK_MAC
)
4073 if (stat
& SYS_ERR_MASK_SMX
)
4079 static int niu_device_error(struct niu
*np
)
4081 u64 stat
= nr64(SYS_ERR_STAT
);
4083 netdev_err(np
->dev
, "Core device error, stat[%llx]\n",
4084 (unsigned long long)stat
);
4086 niu_log_device_error(np
, stat
);
4091 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
4092 u64 v0
, u64 v1
, u64 v2
)
4101 if (v1
& 0x00000000ffffffffULL
) {
4102 u32 rx_vec
= (v1
& 0xffffffff);
4104 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4105 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4107 if (rx_vec
& (1 << rp
->rx_channel
)) {
4108 int r
= niu_rx_error(np
, rp
);
4113 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
4114 RX_DMA_CTL_STAT_MEX
);
4119 if (v1
& 0x7fffffff00000000ULL
) {
4120 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
4122 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4123 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4125 if (tx_vec
& (1 << rp
->tx_channel
)) {
4126 int r
= niu_tx_error(np
, rp
);
4132 if ((v0
| v1
) & 0x8000000000000000ULL
) {
4133 int r
= niu_mif_interrupt(np
);
4139 int r
= niu_mac_interrupt(np
);
4144 int r
= niu_device_error(np
);
4151 niu_enable_interrupts(np
, 0);
4156 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
4159 struct rxdma_mailbox
*mbox
= rp
->mbox
;
4160 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
4162 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
4163 RX_DMA_CTL_STAT_RCRTO
);
4164 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
4166 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
4167 "%s() stat[%llx]\n", __func__
, (unsigned long long)stat
);
4170 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
4173 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
4175 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
4176 "%s() cs[%llx]\n", __func__
, (unsigned long long)rp
->tx_cs
);
4179 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
4181 struct niu_parent
*parent
= np
->parent
;
4185 tx_vec
= (v0
>> 32);
4186 rx_vec
= (v0
& 0xffffffff);
4188 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4189 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4190 int ldn
= LDN_RXDMA(rp
->rx_channel
);
4192 if (parent
->ldg_map
[ldn
] != ldg
)
4195 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4196 if (rx_vec
& (1 << rp
->rx_channel
))
4197 niu_rxchan_intr(np
, rp
, ldn
);
4200 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4201 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4202 int ldn
= LDN_TXDMA(rp
->tx_channel
);
4204 if (parent
->ldg_map
[ldn
] != ldg
)
4207 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4208 if (tx_vec
& (1 << rp
->tx_channel
))
4209 niu_txchan_intr(np
, rp
, ldn
);
4213 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
4214 u64 v0
, u64 v1
, u64 v2
)
4216 if (likely(napi_schedule_prep(&lp
->napi
))) {
4220 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
4221 __napi_schedule(&lp
->napi
);
4225 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
4227 struct niu_ldg
*lp
= dev_id
;
4228 struct niu
*np
= lp
->np
;
4229 int ldg
= lp
->ldg_num
;
4230 unsigned long flags
;
4233 if (netif_msg_intr(np
))
4234 printk(KERN_DEBUG KBUILD_MODNAME
": " "%s() ldg[%p](%d)",
4237 spin_lock_irqsave(&np
->lock
, flags
);
4239 v0
= nr64(LDSV0(ldg
));
4240 v1
= nr64(LDSV1(ldg
));
4241 v2
= nr64(LDSV2(ldg
));
4243 if (netif_msg_intr(np
))
4244 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4245 (unsigned long long) v0
,
4246 (unsigned long long) v1
,
4247 (unsigned long long) v2
);
4249 if (unlikely(!v0
&& !v1
&& !v2
)) {
4250 spin_unlock_irqrestore(&np
->lock
, flags
);
4254 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
4255 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
4259 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
4260 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
4262 niu_ldg_rearm(np
, lp
, 1);
4264 spin_unlock_irqrestore(&np
->lock
, flags
);
4269 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
4272 np
->ops
->free_coherent(np
->device
,
4273 sizeof(struct rxdma_mailbox
),
4274 rp
->mbox
, rp
->mbox_dma
);
4278 np
->ops
->free_coherent(np
->device
,
4279 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4280 rp
->rcr
, rp
->rcr_dma
);
4282 rp
->rcr_table_size
= 0;
4286 niu_rbr_free(np
, rp
);
4288 np
->ops
->free_coherent(np
->device
,
4289 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4290 rp
->rbr
, rp
->rbr_dma
);
4292 rp
->rbr_table_size
= 0;
4299 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
4302 np
->ops
->free_coherent(np
->device
,
4303 sizeof(struct txdma_mailbox
),
4304 rp
->mbox
, rp
->mbox_dma
);
4310 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
4311 if (rp
->tx_buffs
[i
].skb
)
4312 (void) release_tx_packet(np
, rp
, i
);
4315 np
->ops
->free_coherent(np
->device
,
4316 MAX_TX_RING_SIZE
* sizeof(__le64
),
4317 rp
->descr
, rp
->descr_dma
);
4326 static void niu_free_channels(struct niu
*np
)
4331 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4332 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4334 niu_free_rx_ring_info(np
, rp
);
4336 kfree(np
->rx_rings
);
4337 np
->rx_rings
= NULL
;
4338 np
->num_rx_rings
= 0;
4342 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4343 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4345 niu_free_tx_ring_info(np
, rp
);
4347 kfree(np
->tx_rings
);
4348 np
->tx_rings
= NULL
;
4349 np
->num_tx_rings
= 0;
4353 static int niu_alloc_rx_ring_info(struct niu
*np
,
4354 struct rx_ring_info
*rp
)
4356 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
4358 rp
->rxhash
= kzalloc(MAX_RBR_RING_SIZE
* sizeof(struct page
*),
4363 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4364 sizeof(struct rxdma_mailbox
),
4365 &rp
->mbox_dma
, GFP_KERNEL
);
4368 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4369 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4374 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
4375 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4376 &rp
->rcr_dma
, GFP_KERNEL
);
4379 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
4380 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4384 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
4387 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
4388 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4389 &rp
->rbr_dma
, GFP_KERNEL
);
4392 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
4393 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4397 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
4399 rp
->rbr_pending
= 0;
4404 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
4406 int mtu
= np
->dev
->mtu
;
4408 /* These values are recommended by the HW designers for fair
4409 * utilization of DRR amongst the rings.
4411 rp
->max_burst
= mtu
+ 32;
4412 if (rp
->max_burst
> 4096)
4413 rp
->max_burst
= 4096;
4416 static int niu_alloc_tx_ring_info(struct niu
*np
,
4417 struct tx_ring_info
*rp
)
4419 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
4421 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4422 sizeof(struct txdma_mailbox
),
4423 &rp
->mbox_dma
, GFP_KERNEL
);
4426 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4427 netdev_err(np
->dev
, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4432 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4433 MAX_TX_RING_SIZE
* sizeof(__le64
),
4434 &rp
->descr_dma
, GFP_KERNEL
);
4437 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4438 netdev_err(np
->dev
, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4443 rp
->pending
= MAX_TX_RING_SIZE
;
4448 /* XXX make these configurable... XXX */
4449 rp
->mark_freq
= rp
->pending
/ 4;
4451 niu_set_max_burst(np
, rp
);
4456 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4460 bss
= min(PAGE_SHIFT
, 15);
4462 rp
->rbr_block_size
= 1 << bss
;
4463 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4465 rp
->rbr_sizes
[0] = 256;
4466 rp
->rbr_sizes
[1] = 1024;
4467 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4468 switch (PAGE_SIZE
) {
4470 rp
->rbr_sizes
[2] = 4096;
4474 rp
->rbr_sizes
[2] = 8192;
4478 rp
->rbr_sizes
[2] = 2048;
4480 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4483 static int niu_alloc_channels(struct niu
*np
)
4485 struct niu_parent
*parent
= np
->parent
;
4486 int first_rx_channel
, first_tx_channel
;
4490 first_rx_channel
= first_tx_channel
= 0;
4491 for (i
= 0; i
< port
; i
++) {
4492 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4493 first_tx_channel
+= parent
->txchan_per_port
[i
];
4496 np
->num_rx_rings
= parent
->rxchan_per_port
[port
];
4497 np
->num_tx_rings
= parent
->txchan_per_port
[port
];
4499 np
->dev
->real_num_tx_queues
= np
->num_tx_rings
;
4501 np
->rx_rings
= kzalloc(np
->num_rx_rings
* sizeof(struct rx_ring_info
),
4507 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4508 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4511 rp
->rx_channel
= first_rx_channel
+ i
;
4513 err
= niu_alloc_rx_ring_info(np
, rp
);
4517 niu_size_rbr(np
, rp
);
4519 /* XXX better defaults, configurable, etc... XXX */
4520 rp
->nonsyn_window
= 64;
4521 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4522 rp
->syn_window
= 64;
4523 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4524 rp
->rcr_pkt_threshold
= 16;
4525 rp
->rcr_timeout
= 8;
4526 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4527 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4528 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4530 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4535 np
->tx_rings
= kzalloc(np
->num_tx_rings
* sizeof(struct tx_ring_info
),
4541 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4542 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4545 rp
->tx_channel
= first_tx_channel
+ i
;
4547 err
= niu_alloc_tx_ring_info(np
, rp
);
4555 niu_free_channels(np
);
4559 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4563 while (--limit
> 0) {
4564 u64 val
= nr64(TX_CS(channel
));
4565 if (val
& TX_CS_SNG_STATE
)
4571 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4573 u64 val
= nr64(TX_CS(channel
));
4575 val
|= TX_CS_STOP_N_GO
;
4576 nw64(TX_CS(channel
), val
);
4578 return niu_tx_cs_sng_poll(np
, channel
);
4581 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4585 while (--limit
> 0) {
4586 u64 val
= nr64(TX_CS(channel
));
4587 if (!(val
& TX_CS_RST
))
4593 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4595 u64 val
= nr64(TX_CS(channel
));
4599 nw64(TX_CS(channel
), val
);
4601 err
= niu_tx_cs_reset_poll(np
, channel
);
4603 nw64(TX_RING_KICK(channel
), 0);
4608 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4612 nw64(TX_LOG_MASK1(channel
), 0);
4613 nw64(TX_LOG_VAL1(channel
), 0);
4614 nw64(TX_LOG_MASK2(channel
), 0);
4615 nw64(TX_LOG_VAL2(channel
), 0);
4616 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4617 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4618 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4620 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4621 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4622 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4624 /* XXX TXDMA 32bit mode? XXX */
4629 static void niu_txc_enable_port(struct niu
*np
, int on
)
4631 unsigned long flags
;
4634 niu_lock_parent(np
, flags
);
4635 val
= nr64(TXC_CONTROL
);
4636 mask
= (u64
)1 << np
->port
;
4638 val
|= TXC_CONTROL_ENABLE
| mask
;
4641 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4642 val
&= ~TXC_CONTROL_ENABLE
;
4644 nw64(TXC_CONTROL
, val
);
4645 niu_unlock_parent(np
, flags
);
4648 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4650 unsigned long flags
;
4653 niu_lock_parent(np
, flags
);
4654 val
= nr64(TXC_INT_MASK
);
4655 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4656 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4657 niu_unlock_parent(np
, flags
);
4660 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4667 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4668 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4670 nw64(TXC_PORT_DMA(np
->port
), val
);
4673 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4675 int err
, channel
= rp
->tx_channel
;
4678 err
= niu_tx_channel_stop(np
, channel
);
4682 err
= niu_tx_channel_reset(np
, channel
);
4686 err
= niu_tx_channel_lpage_init(np
, channel
);
4690 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4691 nw64(TX_ENT_MSK(channel
), 0);
4693 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4694 TX_RNG_CFIG_STADDR
)) {
4695 netdev_err(np
->dev
, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4696 channel
, (unsigned long long)rp
->descr_dma
);
4700 /* The length field in TX_RNG_CFIG is measured in 64-byte
4701 * blocks. rp->pending is the number of TX descriptors in
4702 * our ring, 8 bytes each, thus we divide by 8 bytes more
4703 * to get the proper value the chip wants.
4705 ring_len
= (rp
->pending
/ 8);
4707 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4709 nw64(TX_RNG_CFIG(channel
), val
);
4711 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4712 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4713 netdev_err(np
->dev
, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4714 channel
, (unsigned long long)rp
->mbox_dma
);
4717 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4718 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4720 nw64(TX_CS(channel
), 0);
4722 rp
->last_pkt_cnt
= 0;
4727 static void niu_init_rdc_groups(struct niu
*np
)
4729 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4730 int i
, first_table_num
= tp
->first_table_num
;
4732 for (i
= 0; i
< tp
->num_tables
; i
++) {
4733 struct rdc_table
*tbl
= &tp
->tables
[i
];
4734 int this_table
= first_table_num
+ i
;
4737 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4738 nw64(RDC_TBL(this_table
, slot
),
4739 tbl
->rxdma_channel
[slot
]);
4742 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4745 static void niu_init_drr_weight(struct niu
*np
)
4747 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4752 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4757 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4760 nw64(PT_DRR_WT(np
->port
), val
);
4763 static int niu_init_hostinfo(struct niu
*np
)
4765 struct niu_parent
*parent
= np
->parent
;
4766 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4767 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4768 int first_rdc_table
= tp
->first_table_num
;
4770 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4774 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4778 for (i
= 0; i
< num_alt
; i
++) {
4779 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4787 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4789 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4790 RXDMA_CFIG1_RST
, 1000, 10,
4794 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4798 nw64(RX_LOG_MASK1(channel
), 0);
4799 nw64(RX_LOG_VAL1(channel
), 0);
4800 nw64(RX_LOG_MASK2(channel
), 0);
4801 nw64(RX_LOG_VAL2(channel
), 0);
4802 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4803 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4804 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4806 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4807 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4808 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4813 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4817 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4818 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4819 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4820 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4821 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4824 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4829 switch (rp
->rbr_block_size
) {
4831 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4834 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4837 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4840 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4845 val
|= RBR_CFIG_B_VLD2
;
4846 switch (rp
->rbr_sizes
[2]) {
4848 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4851 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4854 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4857 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4863 val
|= RBR_CFIG_B_VLD1
;
4864 switch (rp
->rbr_sizes
[1]) {
4866 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4869 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4872 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4875 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4881 val
|= RBR_CFIG_B_VLD0
;
4882 switch (rp
->rbr_sizes
[0]) {
4884 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4887 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4890 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4893 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4904 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4906 u64 val
= nr64(RXDMA_CFIG1(channel
));
4910 val
|= RXDMA_CFIG1_EN
;
4912 val
&= ~RXDMA_CFIG1_EN
;
4913 nw64(RXDMA_CFIG1(channel
), val
);
4916 while (--limit
> 0) {
4917 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4926 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4928 int err
, channel
= rp
->rx_channel
;
4931 err
= niu_rx_channel_reset(np
, channel
);
4935 err
= niu_rx_channel_lpage_init(np
, channel
);
4939 niu_rx_channel_wred_init(np
, rp
);
4941 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4942 nw64(RX_DMA_CTL_STAT(channel
),
4943 (RX_DMA_CTL_STAT_MEX
|
4944 RX_DMA_CTL_STAT_RCRTHRES
|
4945 RX_DMA_CTL_STAT_RCRTO
|
4946 RX_DMA_CTL_STAT_RBR_EMPTY
));
4947 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4948 nw64(RXDMA_CFIG2(channel
), (rp
->mbox_dma
& 0x00000000ffffffc0));
4949 nw64(RBR_CFIG_A(channel
),
4950 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4951 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4952 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4955 nw64(RBR_CFIG_B(channel
), val
);
4956 nw64(RCRCFIG_A(channel
),
4957 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
4958 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
4959 nw64(RCRCFIG_B(channel
),
4960 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
4962 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
4964 err
= niu_enable_rx_channel(np
, channel
, 1);
4968 nw64(RBR_KICK(channel
), rp
->rbr_index
);
4970 val
= nr64(RX_DMA_CTL_STAT(channel
));
4971 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
4972 nw64(RX_DMA_CTL_STAT(channel
), val
);
4977 static int niu_init_rx_channels(struct niu
*np
)
4979 unsigned long flags
;
4980 u64 seed
= jiffies_64
;
4983 niu_lock_parent(np
, flags
);
4984 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
4985 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
4986 niu_unlock_parent(np
, flags
);
4988 /* XXX RXDMA 32bit mode? XXX */
4990 niu_init_rdc_groups(np
);
4991 niu_init_drr_weight(np
);
4993 err
= niu_init_hostinfo(np
);
4997 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4998 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5000 err
= niu_init_one_rx_channel(np
, rp
);
5008 static int niu_set_ip_frag_rule(struct niu
*np
)
5010 struct niu_parent
*parent
= np
->parent
;
5011 struct niu_classifier
*cp
= &np
->clas
;
5012 struct niu_tcam_entry
*tp
;
5015 index
= cp
->tcam_top
;
5016 tp
= &parent
->tcam
[index
];
5018 /* Note that the noport bit is the same in both ipv4 and
5019 * ipv6 format TCAM entries.
5021 memset(tp
, 0, sizeof(*tp
));
5022 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
5023 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
5024 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
5025 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
5026 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
5029 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
5033 cp
->tcam_valid_entries
++;
5038 static int niu_init_classifier_hw(struct niu
*np
)
5040 struct niu_parent
*parent
= np
->parent
;
5041 struct niu_classifier
*cp
= &np
->clas
;
5044 nw64(H1POLY
, cp
->h1_init
);
5045 nw64(H2POLY
, cp
->h2_init
);
5047 err
= niu_init_hostinfo(np
);
5051 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
5052 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
5054 vlan_tbl_write(np
, i
, np
->port
,
5055 vp
->vlan_pref
, vp
->rdc_num
);
5058 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
5059 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
5061 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
5062 ap
->rdc_num
, ap
->mac_pref
);
5067 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
5068 int index
= i
- CLASS_CODE_USER_PROG1
;
5070 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
5073 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
5078 err
= niu_set_ip_frag_rule(np
);
5087 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
5089 nw64(ZCP_RAM_DATA0
, data
[0]);
5090 nw64(ZCP_RAM_DATA1
, data
[1]);
5091 nw64(ZCP_RAM_DATA2
, data
[2]);
5092 nw64(ZCP_RAM_DATA3
, data
[3]);
5093 nw64(ZCP_RAM_DATA4
, data
[4]);
5094 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
5096 (ZCP_RAM_ACC_WRITE
|
5097 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5098 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5100 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5104 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
5108 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5111 netdev_err(np
->dev
, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5112 (unsigned long long)nr64(ZCP_RAM_ACC
));
5118 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5119 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5121 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5124 netdev_err(np
->dev
, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5125 (unsigned long long)nr64(ZCP_RAM_ACC
));
5129 data
[0] = nr64(ZCP_RAM_DATA0
);
5130 data
[1] = nr64(ZCP_RAM_DATA1
);
5131 data
[2] = nr64(ZCP_RAM_DATA2
);
5132 data
[3] = nr64(ZCP_RAM_DATA3
);
5133 data
[4] = nr64(ZCP_RAM_DATA4
);
5138 static void niu_zcp_cfifo_reset(struct niu
*np
)
5140 u64 val
= nr64(RESET_CFIFO
);
5142 val
|= RESET_CFIFO_RST(np
->port
);
5143 nw64(RESET_CFIFO
, val
);
5146 val
&= ~RESET_CFIFO_RST(np
->port
);
5147 nw64(RESET_CFIFO
, val
);
5150 static int niu_init_zcp(struct niu
*np
)
5152 u64 data
[5], rbuf
[5];
5155 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5156 if (np
->port
== 0 || np
->port
== 1)
5157 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
5159 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
5161 max
= NIU_CFIFO_ENTRIES
;
5169 for (i
= 0; i
< max
; i
++) {
5170 err
= niu_zcp_write(np
, i
, data
);
5173 err
= niu_zcp_read(np
, i
, rbuf
);
5178 niu_zcp_cfifo_reset(np
);
5179 nw64(CFIFO_ECC(np
->port
), 0);
5180 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
5181 (void) nr64(ZCP_INT_STAT
);
5182 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
5187 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
5189 u64 val
= nr64_ipp(IPP_CFIG
);
5191 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
5192 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
5193 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
5194 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
5195 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
5196 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
5197 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
5198 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
5201 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
5203 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
5204 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
5205 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
5206 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
5207 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
5208 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
5211 static int niu_ipp_reset(struct niu
*np
)
5213 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
5214 1000, 100, "IPP_CFIG");
5217 static int niu_init_ipp(struct niu
*np
)
5219 u64 data
[5], rbuf
[5], val
;
5222 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5223 if (np
->port
== 0 || np
->port
== 1)
5224 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
5226 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
5228 max
= NIU_DFIFO_ENTRIES
;
5236 for (i
= 0; i
< max
; i
++) {
5237 niu_ipp_write(np
, i
, data
);
5238 niu_ipp_read(np
, i
, rbuf
);
5241 (void) nr64_ipp(IPP_INT_STAT
);
5242 (void) nr64_ipp(IPP_INT_STAT
);
5244 err
= niu_ipp_reset(np
);
5248 (void) nr64_ipp(IPP_PKT_DIS
);
5249 (void) nr64_ipp(IPP_BAD_CS_CNT
);
5250 (void) nr64_ipp(IPP_ECC
);
5252 (void) nr64_ipp(IPP_INT_STAT
);
5254 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
5256 val
= nr64_ipp(IPP_CFIG
);
5257 val
&= ~IPP_CFIG_IP_MAX_PKT
;
5258 val
|= (IPP_CFIG_IPP_ENABLE
|
5259 IPP_CFIG_DFIFO_ECC_EN
|
5260 IPP_CFIG_DROP_BAD_CRC
|
5262 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
5263 nw64_ipp(IPP_CFIG
, val
);
5268 static void niu_handle_led(struct niu
*np
, int status
)
5271 val
= nr64_mac(XMAC_CONFIG
);
5273 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
5274 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
5276 val
|= XMAC_CONFIG_LED_POLARITY
;
5277 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
5279 val
|= XMAC_CONFIG_FORCE_LED_ON
;
5280 val
&= ~XMAC_CONFIG_LED_POLARITY
;
5284 nw64_mac(XMAC_CONFIG
, val
);
5287 static void niu_init_xif_xmac(struct niu
*np
)
5289 struct niu_link_config
*lp
= &np
->link_config
;
5292 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
5293 val
= nr64(MIF_CONFIG
);
5294 val
|= MIF_CONFIG_ATCA_GE
;
5295 nw64(MIF_CONFIG
, val
);
5298 val
= nr64_mac(XMAC_CONFIG
);
5299 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5301 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
5303 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
5304 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5305 val
|= XMAC_CONFIG_LOOPBACK
;
5307 val
&= ~XMAC_CONFIG_LOOPBACK
;
5310 if (np
->flags
& NIU_FLAGS_10G
) {
5311 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
5313 val
|= XMAC_CONFIG_LFS_DISABLE
;
5314 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
5315 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
5316 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
5318 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
5321 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5323 if (lp
->active_speed
== SPEED_100
)
5324 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
5326 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
5328 nw64_mac(XMAC_CONFIG
, val
);
5330 val
= nr64_mac(XMAC_CONFIG
);
5331 val
&= ~XMAC_CONFIG_MODE_MASK
;
5332 if (np
->flags
& NIU_FLAGS_10G
) {
5333 val
|= XMAC_CONFIG_MODE_XGMII
;
5335 if (lp
->active_speed
== SPEED_1000
)
5336 val
|= XMAC_CONFIG_MODE_GMII
;
5338 val
|= XMAC_CONFIG_MODE_MII
;
5341 nw64_mac(XMAC_CONFIG
, val
);
5344 static void niu_init_xif_bmac(struct niu
*np
)
5346 struct niu_link_config
*lp
= &np
->link_config
;
5349 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
5351 if (lp
->loopback_mode
== LOOPBACK_MAC
)
5352 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
5354 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
5356 if (lp
->active_speed
== SPEED_1000
)
5357 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
5359 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
5361 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
5362 BMAC_XIF_CONFIG_LED_POLARITY
);
5364 if (!(np
->flags
& NIU_FLAGS_10G
) &&
5365 !(np
->flags
& NIU_FLAGS_FIBER
) &&
5366 lp
->active_speed
== SPEED_100
)
5367 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5369 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5371 nw64_mac(BMAC_XIF_CONFIG
, val
);
5374 static void niu_init_xif(struct niu
*np
)
5376 if (np
->flags
& NIU_FLAGS_XMAC
)
5377 niu_init_xif_xmac(np
);
5379 niu_init_xif_bmac(np
);
5382 static void niu_pcs_mii_reset(struct niu
*np
)
5385 u64 val
= nr64_pcs(PCS_MII_CTL
);
5386 val
|= PCS_MII_CTL_RST
;
5387 nw64_pcs(PCS_MII_CTL
, val
);
5388 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
5390 val
= nr64_pcs(PCS_MII_CTL
);
5394 static void niu_xpcs_reset(struct niu
*np
)
5397 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
5398 val
|= XPCS_CONTROL1_RESET
;
5399 nw64_xpcs(XPCS_CONTROL1
, val
);
5400 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
5402 val
= nr64_xpcs(XPCS_CONTROL1
);
5406 static int niu_init_pcs(struct niu
*np
)
5408 struct niu_link_config
*lp
= &np
->link_config
;
5411 switch (np
->flags
& (NIU_FLAGS_10G
|
5413 NIU_FLAGS_XCVR_SERDES
)) {
5414 case NIU_FLAGS_FIBER
:
5416 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5417 nw64_pcs(PCS_DPATH_MODE
, 0);
5418 niu_pcs_mii_reset(np
);
5422 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
5423 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
5425 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5428 /* 10G copper or fiber */
5429 val
= nr64_mac(XMAC_CONFIG
);
5430 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5431 nw64_mac(XMAC_CONFIG
, val
);
5435 val
= nr64_xpcs(XPCS_CONTROL1
);
5436 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5437 val
|= XPCS_CONTROL1_LOOPBACK
;
5439 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5440 nw64_xpcs(XPCS_CONTROL1
, val
);
5442 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5443 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5444 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5448 case NIU_FLAGS_XCVR_SERDES
:
5450 niu_pcs_mii_reset(np
);
5451 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5452 nw64_pcs(PCS_DPATH_MODE
, 0);
5457 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5458 /* 1G RGMII FIBER */
5459 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5460 niu_pcs_mii_reset(np
);
5470 static int niu_reset_tx_xmac(struct niu
*np
)
5472 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5473 (XTXMAC_SW_RST_REG_RS
|
5474 XTXMAC_SW_RST_SOFT_RST
),
5475 1000, 100, "XTXMAC_SW_RST");
5478 static int niu_reset_tx_bmac(struct niu
*np
)
5482 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5484 while (--limit
>= 0) {
5485 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5490 dev_err(np
->device
, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5492 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5499 static int niu_reset_tx_mac(struct niu
*np
)
5501 if (np
->flags
& NIU_FLAGS_XMAC
)
5502 return niu_reset_tx_xmac(np
);
5504 return niu_reset_tx_bmac(np
);
5507 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5511 val
= nr64_mac(XMAC_MIN
);
5512 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5513 XMAC_MIN_RX_MIN_PKT_SIZE
);
5514 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5515 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5516 nw64_mac(XMAC_MIN
, val
);
5518 nw64_mac(XMAC_MAX
, max
);
5520 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5522 val
= nr64_mac(XMAC_IPG
);
5523 if (np
->flags
& NIU_FLAGS_10G
) {
5524 val
&= ~XMAC_IPG_IPG_XGMII
;
5525 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5527 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5528 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5530 nw64_mac(XMAC_IPG
, val
);
5532 val
= nr64_mac(XMAC_CONFIG
);
5533 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5534 XMAC_CONFIG_STRETCH_MODE
|
5535 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5536 XMAC_CONFIG_TX_ENABLE
);
5537 nw64_mac(XMAC_CONFIG
, val
);
5539 nw64_mac(TXMAC_FRM_CNT
, 0);
5540 nw64_mac(TXMAC_BYTE_CNT
, 0);
5543 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5547 nw64_mac(BMAC_MIN_FRAME
, min
);
5548 nw64_mac(BMAC_MAX_FRAME
, max
);
5550 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5551 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5552 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5554 val
= nr64_mac(BTXMAC_CONFIG
);
5555 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5556 BTXMAC_CONFIG_ENABLE
);
5557 nw64_mac(BTXMAC_CONFIG
, val
);
5560 static void niu_init_tx_mac(struct niu
*np
)
5565 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5570 /* The XMAC_MIN register only accepts values for TX min which
5571 * have the low 3 bits cleared.
5575 if (np
->flags
& NIU_FLAGS_XMAC
)
5576 niu_init_tx_xmac(np
, min
, max
);
5578 niu_init_tx_bmac(np
, min
, max
);
5581 static int niu_reset_rx_xmac(struct niu
*np
)
5585 nw64_mac(XRXMAC_SW_RST
,
5586 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5588 while (--limit
>= 0) {
5589 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5590 XRXMAC_SW_RST_SOFT_RST
)))
5595 dev_err(np
->device
, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5597 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5604 static int niu_reset_rx_bmac(struct niu
*np
)
5608 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5610 while (--limit
>= 0) {
5611 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5616 dev_err(np
->device
, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5618 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5625 static int niu_reset_rx_mac(struct niu
*np
)
5627 if (np
->flags
& NIU_FLAGS_XMAC
)
5628 return niu_reset_rx_xmac(np
);
5630 return niu_reset_rx_bmac(np
);
5633 static void niu_init_rx_xmac(struct niu
*np
)
5635 struct niu_parent
*parent
= np
->parent
;
5636 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5637 int first_rdc_table
= tp
->first_table_num
;
5641 nw64_mac(XMAC_ADD_FILT0
, 0);
5642 nw64_mac(XMAC_ADD_FILT1
, 0);
5643 nw64_mac(XMAC_ADD_FILT2
, 0);
5644 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5645 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5646 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5647 nw64_mac(XMAC_HASH_TBL(i
), 0);
5648 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5649 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5650 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5652 val
= nr64_mac(XMAC_CONFIG
);
5653 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5654 XMAC_CONFIG_PROMISCUOUS
|
5655 XMAC_CONFIG_PROMISC_GROUP
|
5656 XMAC_CONFIG_ERR_CHK_DIS
|
5657 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5658 XMAC_CONFIG_RESERVED_MULTICAST
|
5659 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5660 XMAC_CONFIG_ADDR_FILTER_EN
|
5661 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5662 XMAC_CONFIG_STRIP_CRC
|
5663 XMAC_CONFIG_PASS_FLOW_CTRL
|
5664 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5665 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5666 nw64_mac(XMAC_CONFIG
, val
);
5668 nw64_mac(RXMAC_BT_CNT
, 0);
5669 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5670 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5671 nw64_mac(RXMAC_FRAG_CNT
, 0);
5672 nw64_mac(RXMAC_HIST_CNT1
, 0);
5673 nw64_mac(RXMAC_HIST_CNT2
, 0);
5674 nw64_mac(RXMAC_HIST_CNT3
, 0);
5675 nw64_mac(RXMAC_HIST_CNT4
, 0);
5676 nw64_mac(RXMAC_HIST_CNT5
, 0);
5677 nw64_mac(RXMAC_HIST_CNT6
, 0);
5678 nw64_mac(RXMAC_HIST_CNT7
, 0);
5679 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5680 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5681 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5682 nw64_mac(LINK_FAULT_CNT
, 0);
5685 static void niu_init_rx_bmac(struct niu
*np
)
5687 struct niu_parent
*parent
= np
->parent
;
5688 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5689 int first_rdc_table
= tp
->first_table_num
;
5693 nw64_mac(BMAC_ADD_FILT0
, 0);
5694 nw64_mac(BMAC_ADD_FILT1
, 0);
5695 nw64_mac(BMAC_ADD_FILT2
, 0);
5696 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5697 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5698 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5699 nw64_mac(BMAC_HASH_TBL(i
), 0);
5700 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5701 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5702 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5704 val
= nr64_mac(BRXMAC_CONFIG
);
5705 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5706 BRXMAC_CONFIG_STRIP_PAD
|
5707 BRXMAC_CONFIG_STRIP_FCS
|
5708 BRXMAC_CONFIG_PROMISC
|
5709 BRXMAC_CONFIG_PROMISC_GRP
|
5710 BRXMAC_CONFIG_ADDR_FILT_EN
|
5711 BRXMAC_CONFIG_DISCARD_DIS
);
5712 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5713 nw64_mac(BRXMAC_CONFIG
, val
);
5715 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5716 val
|= BMAC_ADDR_CMPEN_EN0
;
5717 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5720 static void niu_init_rx_mac(struct niu
*np
)
5722 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5724 if (np
->flags
& NIU_FLAGS_XMAC
)
5725 niu_init_rx_xmac(np
);
5727 niu_init_rx_bmac(np
);
5730 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5732 u64 val
= nr64_mac(XMAC_CONFIG
);
5735 val
|= XMAC_CONFIG_TX_ENABLE
;
5737 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5738 nw64_mac(XMAC_CONFIG
, val
);
5741 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5743 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5746 val
|= BTXMAC_CONFIG_ENABLE
;
5748 val
&= ~BTXMAC_CONFIG_ENABLE
;
5749 nw64_mac(BTXMAC_CONFIG
, val
);
5752 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5754 if (np
->flags
& NIU_FLAGS_XMAC
)
5755 niu_enable_tx_xmac(np
, on
);
5757 niu_enable_tx_bmac(np
, on
);
5760 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5762 u64 val
= nr64_mac(XMAC_CONFIG
);
5764 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5765 XMAC_CONFIG_PROMISCUOUS
);
5767 if (np
->flags
& NIU_FLAGS_MCAST
)
5768 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5769 if (np
->flags
& NIU_FLAGS_PROMISC
)
5770 val
|= XMAC_CONFIG_PROMISCUOUS
;
5773 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5775 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5776 nw64_mac(XMAC_CONFIG
, val
);
5779 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5781 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5783 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5784 BRXMAC_CONFIG_PROMISC
);
5786 if (np
->flags
& NIU_FLAGS_MCAST
)
5787 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5788 if (np
->flags
& NIU_FLAGS_PROMISC
)
5789 val
|= BRXMAC_CONFIG_PROMISC
;
5792 val
|= BRXMAC_CONFIG_ENABLE
;
5794 val
&= ~BRXMAC_CONFIG_ENABLE
;
5795 nw64_mac(BRXMAC_CONFIG
, val
);
5798 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5800 if (np
->flags
& NIU_FLAGS_XMAC
)
5801 niu_enable_rx_xmac(np
, on
);
5803 niu_enable_rx_bmac(np
, on
);
5806 static int niu_init_mac(struct niu
*np
)
5811 err
= niu_init_pcs(np
);
5815 err
= niu_reset_tx_mac(np
);
5818 niu_init_tx_mac(np
);
5819 err
= niu_reset_rx_mac(np
);
5822 niu_init_rx_mac(np
);
5824 /* This looks hookey but the RX MAC reset we just did will
5825 * undo some of the state we setup in niu_init_tx_mac() so we
5826 * have to call it again. In particular, the RX MAC reset will
5827 * set the XMAC_MAX register back to it's default value.
5829 niu_init_tx_mac(np
);
5830 niu_enable_tx_mac(np
, 1);
5832 niu_enable_rx_mac(np
, 1);
5837 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5839 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5842 static void niu_stop_tx_channels(struct niu
*np
)
5846 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5847 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5849 niu_stop_one_tx_channel(np
, rp
);
5853 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5855 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5858 static void niu_reset_tx_channels(struct niu
*np
)
5862 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5863 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5865 niu_reset_one_tx_channel(np
, rp
);
5869 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5871 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5874 static void niu_stop_rx_channels(struct niu
*np
)
5878 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5879 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5881 niu_stop_one_rx_channel(np
, rp
);
5885 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5887 int channel
= rp
->rx_channel
;
5889 (void) niu_rx_channel_reset(np
, channel
);
5890 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5891 nw64(RX_DMA_CTL_STAT(channel
), 0);
5892 (void) niu_enable_rx_channel(np
, channel
, 0);
5895 static void niu_reset_rx_channels(struct niu
*np
)
5899 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5900 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5902 niu_reset_one_rx_channel(np
, rp
);
5906 static void niu_disable_ipp(struct niu
*np
)
5911 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5912 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5914 while (--limit
>= 0 && (rd
!= wr
)) {
5915 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5916 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5919 (rd
!= 0 && wr
!= 1)) {
5920 netdev_err(np
->dev
, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5921 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR
),
5922 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR
));
5925 val
= nr64_ipp(IPP_CFIG
);
5926 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5927 IPP_CFIG_DFIFO_ECC_EN
|
5928 IPP_CFIG_DROP_BAD_CRC
|
5930 nw64_ipp(IPP_CFIG
, val
);
5932 (void) niu_ipp_reset(np
);
5935 static int niu_init_hw(struct niu
*np
)
5939 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize TXC\n");
5940 niu_txc_enable_port(np
, 1);
5941 niu_txc_port_dma_enable(np
, 1);
5942 niu_txc_set_imask(np
, 0);
5944 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize TX channels\n");
5945 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5946 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5948 err
= niu_init_one_tx_channel(np
, rp
);
5953 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize RX channels\n");
5954 err
= niu_init_rx_channels(np
);
5956 goto out_uninit_tx_channels
;
5958 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize classifier\n");
5959 err
= niu_init_classifier_hw(np
);
5961 goto out_uninit_rx_channels
;
5963 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize ZCP\n");
5964 err
= niu_init_zcp(np
);
5966 goto out_uninit_rx_channels
;
5968 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize IPP\n");
5969 err
= niu_init_ipp(np
);
5971 goto out_uninit_rx_channels
;
5973 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize MAC\n");
5974 err
= niu_init_mac(np
);
5976 goto out_uninit_ipp
;
5981 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit IPP\n");
5982 niu_disable_ipp(np
);
5984 out_uninit_rx_channels
:
5985 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit RX channels\n");
5986 niu_stop_rx_channels(np
);
5987 niu_reset_rx_channels(np
);
5989 out_uninit_tx_channels
:
5990 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit TX channels\n");
5991 niu_stop_tx_channels(np
);
5992 niu_reset_tx_channels(np
);
5997 static void niu_stop_hw(struct niu
*np
)
5999 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable interrupts\n");
6000 niu_enable_interrupts(np
, 0);
6002 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable RX MAC\n");
6003 niu_enable_rx_mac(np
, 0);
6005 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable IPP\n");
6006 niu_disable_ipp(np
);
6008 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Stop TX channels\n");
6009 niu_stop_tx_channels(np
);
6011 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Stop RX channels\n");
6012 niu_stop_rx_channels(np
);
6014 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Reset TX channels\n");
6015 niu_reset_tx_channels(np
);
6017 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Reset RX channels\n");
6018 niu_reset_rx_channels(np
);
6021 static void niu_set_irq_name(struct niu
*np
)
6023 int port
= np
->port
;
6026 sprintf(np
->irq_name
[0], "%s:MAC", np
->dev
->name
);
6029 sprintf(np
->irq_name
[1], "%s:MIF", np
->dev
->name
);
6030 sprintf(np
->irq_name
[2], "%s:SYSERR", np
->dev
->name
);
6034 for (i
= 0; i
< np
->num_ldg
- j
; i
++) {
6035 if (i
< np
->num_rx_rings
)
6036 sprintf(np
->irq_name
[i
+j
], "%s-rx-%d",
6038 else if (i
< np
->num_tx_rings
+ np
->num_rx_rings
)
6039 sprintf(np
->irq_name
[i
+j
], "%s-tx-%d", np
->dev
->name
,
6040 i
- np
->num_rx_rings
);
6044 static int niu_request_irq(struct niu
*np
)
6048 niu_set_irq_name(np
);
6051 for (i
= 0; i
< np
->num_ldg
; i
++) {
6052 struct niu_ldg
*lp
= &np
->ldg
[i
];
6054 err
= request_irq(lp
->irq
, niu_interrupt
,
6055 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
6056 np
->irq_name
[i
], lp
);
6065 for (j
= 0; j
< i
; j
++) {
6066 struct niu_ldg
*lp
= &np
->ldg
[j
];
6068 free_irq(lp
->irq
, lp
);
6073 static void niu_free_irq(struct niu
*np
)
6077 for (i
= 0; i
< np
->num_ldg
; i
++) {
6078 struct niu_ldg
*lp
= &np
->ldg
[i
];
6080 free_irq(lp
->irq
, lp
);
6084 static void niu_enable_napi(struct niu
*np
)
6088 for (i
= 0; i
< np
->num_ldg
; i
++)
6089 napi_enable(&np
->ldg
[i
].napi
);
6092 static void niu_disable_napi(struct niu
*np
)
6096 for (i
= 0; i
< np
->num_ldg
; i
++)
6097 napi_disable(&np
->ldg
[i
].napi
);
6100 static int niu_open(struct net_device
*dev
)
6102 struct niu
*np
= netdev_priv(dev
);
6105 netif_carrier_off(dev
);
6107 err
= niu_alloc_channels(np
);
6111 err
= niu_enable_interrupts(np
, 0);
6113 goto out_free_channels
;
6115 err
= niu_request_irq(np
);
6117 goto out_free_channels
;
6119 niu_enable_napi(np
);
6121 spin_lock_irq(&np
->lock
);
6123 err
= niu_init_hw(np
);
6125 init_timer(&np
->timer
);
6126 np
->timer
.expires
= jiffies
+ HZ
;
6127 np
->timer
.data
= (unsigned long) np
;
6128 np
->timer
.function
= niu_timer
;
6130 err
= niu_enable_interrupts(np
, 1);
6135 spin_unlock_irq(&np
->lock
);
6138 niu_disable_napi(np
);
6142 netif_tx_start_all_queues(dev
);
6144 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6145 netif_carrier_on(dev
);
6147 add_timer(&np
->timer
);
6155 niu_free_channels(np
);
6161 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
6163 cancel_work_sync(&np
->reset_task
);
6165 niu_disable_napi(np
);
6166 netif_tx_stop_all_queues(dev
);
6168 del_timer_sync(&np
->timer
);
6170 spin_lock_irq(&np
->lock
);
6174 spin_unlock_irq(&np
->lock
);
6177 static int niu_close(struct net_device
*dev
)
6179 struct niu
*np
= netdev_priv(dev
);
6181 niu_full_shutdown(np
, dev
);
6185 niu_free_channels(np
);
6187 niu_handle_led(np
, 0);
6192 static void niu_sync_xmac_stats(struct niu
*np
)
6194 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
6196 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
6197 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
6199 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
6200 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
6201 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
6202 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
6203 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
6204 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
6205 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
6206 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
6207 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
6208 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
6209 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
6210 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
6211 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
6212 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
6213 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
6214 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
6217 static void niu_sync_bmac_stats(struct niu
*np
)
6219 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
6221 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
6222 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
6224 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
6225 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6226 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6227 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
6230 static void niu_sync_mac_stats(struct niu
*np
)
6232 if (np
->flags
& NIU_FLAGS_XMAC
)
6233 niu_sync_xmac_stats(np
);
6235 niu_sync_bmac_stats(np
);
6238 static void niu_get_rx_stats(struct niu
*np
)
6240 unsigned long pkts
, dropped
, errors
, bytes
;
6243 pkts
= dropped
= errors
= bytes
= 0;
6244 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6245 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6247 niu_sync_rx_discard_stats(np
, rp
, 0);
6249 pkts
+= rp
->rx_packets
;
6250 bytes
+= rp
->rx_bytes
;
6251 dropped
+= rp
->rx_dropped
;
6252 errors
+= rp
->rx_errors
;
6254 np
->dev
->stats
.rx_packets
= pkts
;
6255 np
->dev
->stats
.rx_bytes
= bytes
;
6256 np
->dev
->stats
.rx_dropped
= dropped
;
6257 np
->dev
->stats
.rx_errors
= errors
;
6260 static void niu_get_tx_stats(struct niu
*np
)
6262 unsigned long pkts
, errors
, bytes
;
6265 pkts
= errors
= bytes
= 0;
6266 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6267 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6269 pkts
+= rp
->tx_packets
;
6270 bytes
+= rp
->tx_bytes
;
6271 errors
+= rp
->tx_errors
;
6273 np
->dev
->stats
.tx_packets
= pkts
;
6274 np
->dev
->stats
.tx_bytes
= bytes
;
6275 np
->dev
->stats
.tx_errors
= errors
;
6278 static struct net_device_stats
*niu_get_stats(struct net_device
*dev
)
6280 struct niu
*np
= netdev_priv(dev
);
6282 niu_get_rx_stats(np
);
6283 niu_get_tx_stats(np
);
6288 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
6292 for (i
= 0; i
< 16; i
++)
6293 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
6296 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
6300 for (i
= 0; i
< 16; i
++)
6301 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
6304 static void niu_load_hash(struct niu
*np
, u16
*hash
)
6306 if (np
->flags
& NIU_FLAGS_XMAC
)
6307 niu_load_hash_xmac(np
, hash
);
6309 niu_load_hash_bmac(np
, hash
);
6312 static void niu_set_rx_mode(struct net_device
*dev
)
6314 struct niu
*np
= netdev_priv(dev
);
6315 int i
, alt_cnt
, err
;
6316 struct dev_addr_list
*addr
;
6317 struct netdev_hw_addr
*ha
;
6318 unsigned long flags
;
6319 u16 hash
[16] = { 0, };
6321 spin_lock_irqsave(&np
->lock
, flags
);
6322 niu_enable_rx_mac(np
, 0);
6324 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
6325 if (dev
->flags
& IFF_PROMISC
)
6326 np
->flags
|= NIU_FLAGS_PROMISC
;
6327 if ((dev
->flags
& IFF_ALLMULTI
) || (!netdev_mc_empty(dev
)))
6328 np
->flags
|= NIU_FLAGS_MCAST
;
6330 alt_cnt
= netdev_uc_count(dev
);
6331 if (alt_cnt
> niu_num_alt_addr(np
)) {
6333 np
->flags
|= NIU_FLAGS_PROMISC
;
6339 netdev_for_each_uc_addr(ha
, dev
) {
6340 err
= niu_set_alt_mac(np
, index
, ha
->addr
);
6342 netdev_warn(dev
, "Error %d adding alt mac %d\n",
6344 err
= niu_enable_alt_mac(np
, index
, 1);
6346 netdev_warn(dev
, "Error %d enabling alt mac %d\n",
6353 if (np
->flags
& NIU_FLAGS_XMAC
)
6357 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
6358 err
= niu_enable_alt_mac(np
, i
, 0);
6360 netdev_warn(dev
, "Error %d disabling alt mac %d\n",
6364 if (dev
->flags
& IFF_ALLMULTI
) {
6365 for (i
= 0; i
< 16; i
++)
6367 } else if (!netdev_mc_empty(dev
)) {
6368 netdev_for_each_mc_addr(addr
, dev
) {
6369 u32 crc
= ether_crc_le(ETH_ALEN
, addr
->da_addr
);
6372 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
6376 if (np
->flags
& NIU_FLAGS_MCAST
)
6377 niu_load_hash(np
, hash
);
6379 niu_enable_rx_mac(np
, 1);
6380 spin_unlock_irqrestore(&np
->lock
, flags
);
6383 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
6385 struct niu
*np
= netdev_priv(dev
);
6386 struct sockaddr
*addr
= p
;
6387 unsigned long flags
;
6389 if (!is_valid_ether_addr(addr
->sa_data
))
6392 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
6394 if (!netif_running(dev
))
6397 spin_lock_irqsave(&np
->lock
, flags
);
6398 niu_enable_rx_mac(np
, 0);
6399 niu_set_primary_mac(np
, dev
->dev_addr
);
6400 niu_enable_rx_mac(np
, 1);
6401 spin_unlock_irqrestore(&np
->lock
, flags
);
6406 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6411 static void niu_netif_stop(struct niu
*np
)
6413 np
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6415 niu_disable_napi(np
);
6417 netif_tx_disable(np
->dev
);
6420 static void niu_netif_start(struct niu
*np
)
6422 /* NOTE: unconditional netif_wake_queue is only appropriate
6423 * so long as all callers are assured to have free tx slots
6424 * (such as after niu_init_hw).
6426 netif_tx_wake_all_queues(np
->dev
);
6428 niu_enable_napi(np
);
6430 niu_enable_interrupts(np
, 1);
6433 static void niu_reset_buffers(struct niu
*np
)
6438 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6439 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6441 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
6444 page
= rp
->rxhash
[j
];
6447 (struct page
*) page
->mapping
;
6448 u64 base
= page
->index
;
6449 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6450 rp
->rbr
[k
++] = cpu_to_le32(base
);
6454 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6455 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6460 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6462 rp
->rbr_pending
= 0;
6463 rp
->rbr_refill_pending
= 0;
6467 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6468 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6470 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6471 if (rp
->tx_buffs
[j
].skb
)
6472 (void) release_tx_packet(np
, rp
, j
);
6475 rp
->pending
= MAX_TX_RING_SIZE
;
6483 static void niu_reset_task(struct work_struct
*work
)
6485 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6486 unsigned long flags
;
6489 spin_lock_irqsave(&np
->lock
, flags
);
6490 if (!netif_running(np
->dev
)) {
6491 spin_unlock_irqrestore(&np
->lock
, flags
);
6495 spin_unlock_irqrestore(&np
->lock
, flags
);
6497 del_timer_sync(&np
->timer
);
6501 spin_lock_irqsave(&np
->lock
, flags
);
6505 spin_unlock_irqrestore(&np
->lock
, flags
);
6507 niu_reset_buffers(np
);
6509 spin_lock_irqsave(&np
->lock
, flags
);
6511 err
= niu_init_hw(np
);
6513 np
->timer
.expires
= jiffies
+ HZ
;
6514 add_timer(&np
->timer
);
6515 niu_netif_start(np
);
6518 spin_unlock_irqrestore(&np
->lock
, flags
);
6521 static void niu_tx_timeout(struct net_device
*dev
)
6523 struct niu
*np
= netdev_priv(dev
);
6525 dev_err(np
->device
, "%s: Transmit timed out, resetting\n",
6528 schedule_work(&np
->reset_task
);
6531 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6532 u64 mapping
, u64 len
, u64 mark
,
6535 __le64
*desc
= &rp
->descr
[index
];
6537 *desc
= cpu_to_le64(mark
|
6538 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6539 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6540 (mapping
& TX_DESC_SAD
));
6543 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6544 u64 pad_bytes
, u64 len
)
6546 u16 eth_proto
, eth_proto_inner
;
6547 u64 csum_bits
, l3off
, ihl
, ret
;
6551 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6552 eth_proto_inner
= eth_proto
;
6553 if (eth_proto
== ETH_P_8021Q
) {
6554 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6555 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6557 eth_proto_inner
= be16_to_cpu(val
);
6561 switch (skb
->protocol
) {
6562 case cpu_to_be16(ETH_P_IP
):
6563 ip_proto
= ip_hdr(skb
)->protocol
;
6564 ihl
= ip_hdr(skb
)->ihl
;
6566 case cpu_to_be16(ETH_P_IPV6
):
6567 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6576 csum_bits
= TXHDR_CSUM_NONE
;
6577 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6580 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6582 (ip_proto
== IPPROTO_UDP
?
6583 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6585 start
= skb_transport_offset(skb
) -
6586 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6587 stuff
= start
+ skb
->csum_offset
;
6589 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6590 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6593 l3off
= skb_network_offset(skb
) -
6594 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6596 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6597 (len
<< TXHDR_LEN_SHIFT
) |
6598 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6599 (ihl
<< TXHDR_IHL_SHIFT
) |
6600 ((eth_proto_inner
< 1536) ? TXHDR_LLC
: 0) |
6601 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6602 (ipv6
? TXHDR_IP_VER
: 0) |
6608 static netdev_tx_t
niu_start_xmit(struct sk_buff
*skb
,
6609 struct net_device
*dev
)
6611 struct niu
*np
= netdev_priv(dev
);
6612 unsigned long align
, headroom
;
6613 struct netdev_queue
*txq
;
6614 struct tx_ring_info
*rp
;
6615 struct tx_pkt_hdr
*tp
;
6616 unsigned int len
, nfg
;
6617 struct ethhdr
*ehdr
;
6621 i
= skb_get_queue_mapping(skb
);
6622 rp
= &np
->tx_rings
[i
];
6623 txq
= netdev_get_tx_queue(dev
, i
);
6625 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6626 netif_tx_stop_queue(txq
);
6627 dev_err(np
->device
, "%s: BUG! Tx ring full when queue awake!\n", dev
->name
);
6629 return NETDEV_TX_BUSY
;
6632 if (skb
->len
< ETH_ZLEN
) {
6633 unsigned int pad_bytes
= ETH_ZLEN
- skb
->len
;
6635 if (skb_pad(skb
, pad_bytes
))
6637 skb_put(skb
, pad_bytes
);
6640 len
= sizeof(struct tx_pkt_hdr
) + 15;
6641 if (skb_headroom(skb
) < len
) {
6642 struct sk_buff
*skb_new
;
6644 skb_new
= skb_realloc_headroom(skb
, len
);
6654 align
= ((unsigned long) skb
->data
& (16 - 1));
6655 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6657 ehdr
= (struct ethhdr
*) skb
->data
;
6658 tp
= (struct tx_pkt_hdr
*) skb_push(skb
, headroom
);
6660 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6661 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6664 len
= skb_headlen(skb
);
6665 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6666 len
, DMA_TO_DEVICE
);
6670 rp
->tx_buffs
[prod
].skb
= skb
;
6671 rp
->tx_buffs
[prod
].mapping
= mapping
;
6674 if (++rp
->mark_counter
== rp
->mark_freq
) {
6675 rp
->mark_counter
= 0;
6676 mrk
|= TX_DESC_MARK
;
6681 nfg
= skb_shinfo(skb
)->nr_frags
;
6683 tlen
-= MAX_TX_DESC_LEN
;
6688 unsigned int this_len
= len
;
6690 if (this_len
> MAX_TX_DESC_LEN
)
6691 this_len
= MAX_TX_DESC_LEN
;
6693 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6696 prod
= NEXT_TX(rp
, prod
);
6697 mapping
+= this_len
;
6701 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6702 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6705 mapping
= np
->ops
->map_page(np
->device
, frag
->page
,
6706 frag
->page_offset
, len
,
6709 rp
->tx_buffs
[prod
].skb
= NULL
;
6710 rp
->tx_buffs
[prod
].mapping
= mapping
;
6712 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6714 prod
= NEXT_TX(rp
, prod
);
6717 if (prod
< rp
->prod
)
6718 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6721 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6723 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6724 netif_tx_stop_queue(txq
);
6725 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6726 netif_tx_wake_queue(txq
);
6730 return NETDEV_TX_OK
;
6738 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6740 struct niu
*np
= netdev_priv(dev
);
6741 int err
, orig_jumbo
, new_jumbo
;
6743 if (new_mtu
< 68 || new_mtu
> NIU_MAX_MTU
)
6746 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6747 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6751 if (!netif_running(dev
) ||
6752 (orig_jumbo
== new_jumbo
))
6755 niu_full_shutdown(np
, dev
);
6757 niu_free_channels(np
);
6759 niu_enable_napi(np
);
6761 err
= niu_alloc_channels(np
);
6765 spin_lock_irq(&np
->lock
);
6767 err
= niu_init_hw(np
);
6769 init_timer(&np
->timer
);
6770 np
->timer
.expires
= jiffies
+ HZ
;
6771 np
->timer
.data
= (unsigned long) np
;
6772 np
->timer
.function
= niu_timer
;
6774 err
= niu_enable_interrupts(np
, 1);
6779 spin_unlock_irq(&np
->lock
);
6782 netif_tx_start_all_queues(dev
);
6783 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6784 netif_carrier_on(dev
);
6786 add_timer(&np
->timer
);
6792 static void niu_get_drvinfo(struct net_device
*dev
,
6793 struct ethtool_drvinfo
*info
)
6795 struct niu
*np
= netdev_priv(dev
);
6796 struct niu_vpd
*vpd
= &np
->vpd
;
6798 strcpy(info
->driver
, DRV_MODULE_NAME
);
6799 strcpy(info
->version
, DRV_MODULE_VERSION
);
6800 sprintf(info
->fw_version
, "%d.%d",
6801 vpd
->fcode_major
, vpd
->fcode_minor
);
6802 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6803 strcpy(info
->bus_info
, pci_name(np
->pdev
));
6806 static int niu_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6808 struct niu
*np
= netdev_priv(dev
);
6809 struct niu_link_config
*lp
;
6811 lp
= &np
->link_config
;
6813 memset(cmd
, 0, sizeof(*cmd
));
6814 cmd
->phy_address
= np
->phy_addr
;
6815 cmd
->supported
= lp
->supported
;
6816 cmd
->advertising
= lp
->active_advertising
;
6817 cmd
->autoneg
= lp
->active_autoneg
;
6818 cmd
->speed
= lp
->active_speed
;
6819 cmd
->duplex
= lp
->active_duplex
;
6820 cmd
->port
= (np
->flags
& NIU_FLAGS_FIBER
) ? PORT_FIBRE
: PORT_TP
;
6821 cmd
->transceiver
= (np
->flags
& NIU_FLAGS_XCVR_SERDES
) ?
6822 XCVR_EXTERNAL
: XCVR_INTERNAL
;
6827 static int niu_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6829 struct niu
*np
= netdev_priv(dev
);
6830 struct niu_link_config
*lp
= &np
->link_config
;
6832 lp
->advertising
= cmd
->advertising
;
6833 lp
->speed
= cmd
->speed
;
6834 lp
->duplex
= cmd
->duplex
;
6835 lp
->autoneg
= cmd
->autoneg
;
6836 return niu_init_link(np
);
6839 static u32
niu_get_msglevel(struct net_device
*dev
)
6841 struct niu
*np
= netdev_priv(dev
);
6842 return np
->msg_enable
;
6845 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6847 struct niu
*np
= netdev_priv(dev
);
6848 np
->msg_enable
= value
;
6851 static int niu_nway_reset(struct net_device
*dev
)
6853 struct niu
*np
= netdev_priv(dev
);
6855 if (np
->link_config
.autoneg
)
6856 return niu_init_link(np
);
6861 static int niu_get_eeprom_len(struct net_device
*dev
)
6863 struct niu
*np
= netdev_priv(dev
);
6865 return np
->eeprom_len
;
6868 static int niu_get_eeprom(struct net_device
*dev
,
6869 struct ethtool_eeprom
*eeprom
, u8
*data
)
6871 struct niu
*np
= netdev_priv(dev
);
6872 u32 offset
, len
, val
;
6874 offset
= eeprom
->offset
;
6877 if (offset
+ len
< offset
)
6879 if (offset
>= np
->eeprom_len
)
6881 if (offset
+ len
> np
->eeprom_len
)
6882 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6885 u32 b_offset
, b_count
;
6887 b_offset
= offset
& 3;
6888 b_count
= 4 - b_offset
;
6892 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6893 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6899 val
= nr64(ESPC_NCR(offset
/ 4));
6900 memcpy(data
, &val
, 4);
6906 val
= nr64(ESPC_NCR(offset
/ 4));
6907 memcpy(data
, &val
, len
);
6912 static void niu_ethflow_to_l3proto(int flow_type
, u8
*pid
)
6914 switch (flow_type
) {
6925 *pid
= IPPROTO_SCTP
;
6941 static int niu_class_to_ethflow(u64
class, int *flow_type
)
6944 case CLASS_CODE_TCP_IPV4
:
6945 *flow_type
= TCP_V4_FLOW
;
6947 case CLASS_CODE_UDP_IPV4
:
6948 *flow_type
= UDP_V4_FLOW
;
6950 case CLASS_CODE_AH_ESP_IPV4
:
6951 *flow_type
= AH_V4_FLOW
;
6953 case CLASS_CODE_SCTP_IPV4
:
6954 *flow_type
= SCTP_V4_FLOW
;
6956 case CLASS_CODE_TCP_IPV6
:
6957 *flow_type
= TCP_V6_FLOW
;
6959 case CLASS_CODE_UDP_IPV6
:
6960 *flow_type
= UDP_V6_FLOW
;
6962 case CLASS_CODE_AH_ESP_IPV6
:
6963 *flow_type
= AH_V6_FLOW
;
6965 case CLASS_CODE_SCTP_IPV6
:
6966 *flow_type
= SCTP_V6_FLOW
;
6968 case CLASS_CODE_USER_PROG1
:
6969 case CLASS_CODE_USER_PROG2
:
6970 case CLASS_CODE_USER_PROG3
:
6971 case CLASS_CODE_USER_PROG4
:
6972 *flow_type
= IP_USER_FLOW
;
6981 static int niu_ethflow_to_class(int flow_type
, u64
*class)
6983 switch (flow_type
) {
6985 *class = CLASS_CODE_TCP_IPV4
;
6988 *class = CLASS_CODE_UDP_IPV4
;
6992 *class = CLASS_CODE_AH_ESP_IPV4
;
6995 *class = CLASS_CODE_SCTP_IPV4
;
6998 *class = CLASS_CODE_TCP_IPV6
;
7001 *class = CLASS_CODE_UDP_IPV6
;
7005 *class = CLASS_CODE_AH_ESP_IPV6
;
7008 *class = CLASS_CODE_SCTP_IPV6
;
7017 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
7021 if (flow_key
& FLOW_KEY_L2DA
)
7022 ethflow
|= RXH_L2DA
;
7023 if (flow_key
& FLOW_KEY_VLAN
)
7024 ethflow
|= RXH_VLAN
;
7025 if (flow_key
& FLOW_KEY_IPSA
)
7026 ethflow
|= RXH_IP_SRC
;
7027 if (flow_key
& FLOW_KEY_IPDA
)
7028 ethflow
|= RXH_IP_DST
;
7029 if (flow_key
& FLOW_KEY_PROTO
)
7030 ethflow
|= RXH_L3_PROTO
;
7031 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
7032 ethflow
|= RXH_L4_B_0_1
;
7033 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
7034 ethflow
|= RXH_L4_B_2_3
;
7040 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
7044 if (ethflow
& RXH_L2DA
)
7045 key
|= FLOW_KEY_L2DA
;
7046 if (ethflow
& RXH_VLAN
)
7047 key
|= FLOW_KEY_VLAN
;
7048 if (ethflow
& RXH_IP_SRC
)
7049 key
|= FLOW_KEY_IPSA
;
7050 if (ethflow
& RXH_IP_DST
)
7051 key
|= FLOW_KEY_IPDA
;
7052 if (ethflow
& RXH_L3_PROTO
)
7053 key
|= FLOW_KEY_PROTO
;
7054 if (ethflow
& RXH_L4_B_0_1
)
7055 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
7056 if (ethflow
& RXH_L4_B_2_3
)
7057 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
7065 static int niu_get_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7071 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7074 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7076 nfc
->data
= RXH_DISCARD
;
7078 nfc
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
7079 CLASS_CODE_USER_PROG1
]);
7083 static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry
*tp
,
7084 struct ethtool_rx_flow_spec
*fsp
)
7087 fsp
->h_u
.tcp_ip4_spec
.ip4src
= (tp
->key
[3] & TCAM_V4KEY3_SADDR
) >>
7088 TCAM_V4KEY3_SADDR_SHIFT
;
7089 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= (tp
->key
[3] & TCAM_V4KEY3_DADDR
) >>
7090 TCAM_V4KEY3_DADDR_SHIFT
;
7091 fsp
->m_u
.tcp_ip4_spec
.ip4src
= (tp
->key_mask
[3] & TCAM_V4KEY3_SADDR
) >>
7092 TCAM_V4KEY3_SADDR_SHIFT
;
7093 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= (tp
->key_mask
[3] & TCAM_V4KEY3_DADDR
) >>
7094 TCAM_V4KEY3_DADDR_SHIFT
;
7096 fsp
->h_u
.tcp_ip4_spec
.ip4src
=
7097 cpu_to_be32(fsp
->h_u
.tcp_ip4_spec
.ip4src
);
7098 fsp
->m_u
.tcp_ip4_spec
.ip4src
=
7099 cpu_to_be32(fsp
->m_u
.tcp_ip4_spec
.ip4src
);
7100 fsp
->h_u
.tcp_ip4_spec
.ip4dst
=
7101 cpu_to_be32(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
7102 fsp
->m_u
.tcp_ip4_spec
.ip4dst
=
7103 cpu_to_be32(fsp
->m_u
.tcp_ip4_spec
.ip4dst
);
7105 fsp
->h_u
.tcp_ip4_spec
.tos
= (tp
->key
[2] & TCAM_V4KEY2_TOS
) >>
7106 TCAM_V4KEY2_TOS_SHIFT
;
7107 fsp
->m_u
.tcp_ip4_spec
.tos
= (tp
->key_mask
[2] & TCAM_V4KEY2_TOS
) >>
7108 TCAM_V4KEY2_TOS_SHIFT
;
7110 switch (fsp
->flow_type
) {
7114 fsp
->h_u
.tcp_ip4_spec
.psrc
=
7115 ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7116 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7117 fsp
->h_u
.tcp_ip4_spec
.pdst
=
7118 ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7119 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7120 fsp
->m_u
.tcp_ip4_spec
.psrc
=
7121 ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7122 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7123 fsp
->m_u
.tcp_ip4_spec
.pdst
=
7124 ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7125 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7127 fsp
->h_u
.tcp_ip4_spec
.psrc
=
7128 cpu_to_be16(fsp
->h_u
.tcp_ip4_spec
.psrc
);
7129 fsp
->h_u
.tcp_ip4_spec
.pdst
=
7130 cpu_to_be16(fsp
->h_u
.tcp_ip4_spec
.pdst
);
7131 fsp
->m_u
.tcp_ip4_spec
.psrc
=
7132 cpu_to_be16(fsp
->m_u
.tcp_ip4_spec
.psrc
);
7133 fsp
->m_u
.tcp_ip4_spec
.pdst
=
7134 cpu_to_be16(fsp
->m_u
.tcp_ip4_spec
.pdst
);
7138 fsp
->h_u
.ah_ip4_spec
.spi
=
7139 (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7140 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7141 fsp
->m_u
.ah_ip4_spec
.spi
=
7142 (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7143 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7145 fsp
->h_u
.ah_ip4_spec
.spi
=
7146 cpu_to_be32(fsp
->h_u
.ah_ip4_spec
.spi
);
7147 fsp
->m_u
.ah_ip4_spec
.spi
=
7148 cpu_to_be32(fsp
->m_u
.ah_ip4_spec
.spi
);
7151 fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
=
7152 (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7153 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7154 fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
=
7155 (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7156 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7158 fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
=
7159 cpu_to_be32(fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
);
7160 fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
=
7161 cpu_to_be32(fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
);
7163 fsp
->h_u
.usr_ip4_spec
.proto
=
7164 (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7165 TCAM_V4KEY2_PROTO_SHIFT
;
7166 fsp
->m_u
.usr_ip4_spec
.proto
=
7167 (tp
->key_mask
[2] & TCAM_V4KEY2_PROTO
) >>
7168 TCAM_V4KEY2_PROTO_SHIFT
;
7170 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
7177 static int niu_get_ethtool_tcam_entry(struct niu
*np
,
7178 struct ethtool_rxnfc
*nfc
)
7180 struct niu_parent
*parent
= np
->parent
;
7181 struct niu_tcam_entry
*tp
;
7182 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7187 idx
= tcam_get_index(np
, (u16
)nfc
->fs
.location
);
7189 tp
= &parent
->tcam
[idx
];
7191 netdev_info(np
->dev
, "niu%d: entry [%d] invalid for idx[%d]\n",
7192 parent
->index
, (u16
)nfc
->fs
.location
, idx
);
7196 /* fill the flow spec entry */
7197 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7198 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7199 ret
= niu_class_to_ethflow(class, &fsp
->flow_type
);
7202 netdev_info(np
->dev
, "niu%d: niu_class_to_ethflow failed\n",
7208 if (fsp
->flow_type
== AH_V4_FLOW
|| fsp
->flow_type
== AH_V6_FLOW
) {
7209 u32 proto
= (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7210 TCAM_V4KEY2_PROTO_SHIFT
;
7211 if (proto
== IPPROTO_ESP
) {
7212 if (fsp
->flow_type
== AH_V4_FLOW
)
7213 fsp
->flow_type
= ESP_V4_FLOW
;
7215 fsp
->flow_type
= ESP_V6_FLOW
;
7219 switch (fsp
->flow_type
) {
7225 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7232 /* Not yet implemented */
7236 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7246 if (tp
->assoc_data
& TCAM_ASSOCDATA_DISC
)
7247 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
7249 fsp
->ring_cookie
= (tp
->assoc_data
& TCAM_ASSOCDATA_OFFSET
) >>
7250 TCAM_ASSOCDATA_OFFSET_SHIFT
;
7252 /* put the tcam size here */
7253 nfc
->data
= tcam_get_size(np
);
7258 static int niu_get_ethtool_tcam_all(struct niu
*np
,
7259 struct ethtool_rxnfc
*nfc
,
7262 struct niu_parent
*parent
= np
->parent
;
7263 struct niu_tcam_entry
*tp
;
7266 unsigned long flags
;
7269 /* put the tcam size here */
7270 nfc
->data
= tcam_get_size(np
);
7272 niu_lock_parent(np
, flags
);
7273 n_entries
= nfc
->rule_cnt
;
7274 for (cnt
= 0, i
= 0; i
< nfc
->data
; i
++) {
7275 idx
= tcam_get_index(np
, i
);
7276 tp
= &parent
->tcam
[idx
];
7282 niu_unlock_parent(np
, flags
);
7284 if (n_entries
!= cnt
) {
7285 /* print warning, this should not happen */
7286 netdev_info(np
->dev
, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
7287 np
->parent
->index
, __func__
, n_entries
, cnt
);
7293 static int niu_get_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
7296 struct niu
*np
= netdev_priv(dev
);
7301 ret
= niu_get_hash_opts(np
, cmd
);
7303 case ETHTOOL_GRXRINGS
:
7304 cmd
->data
= np
->num_rx_rings
;
7306 case ETHTOOL_GRXCLSRLCNT
:
7307 cmd
->rule_cnt
= tcam_get_valid_entry_cnt(np
);
7309 case ETHTOOL_GRXCLSRULE
:
7310 ret
= niu_get_ethtool_tcam_entry(np
, cmd
);
7312 case ETHTOOL_GRXCLSRLALL
:
7313 ret
= niu_get_ethtool_tcam_all(np
, cmd
, (u32
*)rule_locs
);
7323 static int niu_set_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7327 unsigned long flags
;
7329 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7332 if (class < CLASS_CODE_USER_PROG1
||
7333 class > CLASS_CODE_SCTP_IPV6
)
7336 if (nfc
->data
& RXH_DISCARD
) {
7337 niu_lock_parent(np
, flags
);
7338 flow_key
= np
->parent
->tcam_key
[class -
7339 CLASS_CODE_USER_PROG1
];
7340 flow_key
|= TCAM_KEY_DISC
;
7341 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7342 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7343 niu_unlock_parent(np
, flags
);
7346 /* Discard was set before, but is not set now */
7347 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7349 niu_lock_parent(np
, flags
);
7350 flow_key
= np
->parent
->tcam_key
[class -
7351 CLASS_CODE_USER_PROG1
];
7352 flow_key
&= ~TCAM_KEY_DISC
;
7353 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
7355 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
7357 niu_unlock_parent(np
, flags
);
7361 if (!niu_ethflow_to_flowkey(nfc
->data
, &flow_key
))
7364 niu_lock_parent(np
, flags
);
7365 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7366 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7367 niu_unlock_parent(np
, flags
);
7372 static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec
*fsp
,
7373 struct niu_tcam_entry
*tp
,
7374 int l2_rdc_tab
, u64
class)
7377 u32 sip
, dip
, sipm
, dipm
, spi
, spim
;
7378 u16 sport
, dport
, spm
, dpm
;
7380 sip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4src
);
7381 sipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4src
);
7382 dip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
7383 dipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4dst
);
7385 tp
->key
[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7386 tp
->key_mask
[0] = TCAM_V4KEY0_CLASS_CODE
;
7387 tp
->key
[1] = (u64
)l2_rdc_tab
<< TCAM_V4KEY1_L2RDCNUM_SHIFT
;
7388 tp
->key_mask
[1] = TCAM_V4KEY1_L2RDCNUM
;
7390 tp
->key
[3] = (u64
)sip
<< TCAM_V4KEY3_SADDR_SHIFT
;
7393 tp
->key_mask
[3] = (u64
)sipm
<< TCAM_V4KEY3_SADDR_SHIFT
;
7394 tp
->key_mask
[3] |= dipm
;
7396 tp
->key
[2] |= ((u64
)fsp
->h_u
.tcp_ip4_spec
.tos
<<
7397 TCAM_V4KEY2_TOS_SHIFT
);
7398 tp
->key_mask
[2] |= ((u64
)fsp
->m_u
.tcp_ip4_spec
.tos
<<
7399 TCAM_V4KEY2_TOS_SHIFT
);
7400 switch (fsp
->flow_type
) {
7404 sport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.psrc
);
7405 spm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.psrc
);
7406 dport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.pdst
);
7407 dpm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.pdst
);
7409 tp
->key
[2] |= (((u64
)sport
<< 16) | dport
);
7410 tp
->key_mask
[2] |= (((u64
)spm
<< 16) | dpm
);
7411 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7415 spi
= be32_to_cpu(fsp
->h_u
.ah_ip4_spec
.spi
);
7416 spim
= be32_to_cpu(fsp
->m_u
.ah_ip4_spec
.spi
);
7419 tp
->key_mask
[2] |= spim
;
7420 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7423 spi
= be32_to_cpu(fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
);
7424 spim
= be32_to_cpu(fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
);
7427 tp
->key_mask
[2] |= spim
;
7428 pid
= fsp
->h_u
.usr_ip4_spec
.proto
;
7434 tp
->key
[2] |= ((u64
)pid
<< TCAM_V4KEY2_PROTO_SHIFT
);
7436 tp
->key_mask
[2] |= TCAM_V4KEY2_PROTO
;
7440 static int niu_add_ethtool_tcam_entry(struct niu
*np
,
7441 struct ethtool_rxnfc
*nfc
)
7443 struct niu_parent
*parent
= np
->parent
;
7444 struct niu_tcam_entry
*tp
;
7445 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7446 struct niu_rdc_tables
*rdc_table
= &parent
->rdc_group_cfg
[np
->port
];
7447 int l2_rdc_table
= rdc_table
->first_table_num
;
7450 unsigned long flags
;
7455 idx
= nfc
->fs
.location
;
7456 if (idx
>= tcam_get_size(np
))
7459 if (fsp
->flow_type
== IP_USER_FLOW
) {
7461 int add_usr_cls
= 0;
7463 struct ethtool_usrip4_spec
*uspec
= &fsp
->h_u
.usr_ip4_spec
;
7464 struct ethtool_usrip4_spec
*umask
= &fsp
->m_u
.usr_ip4_spec
;
7466 niu_lock_parent(np
, flags
);
7468 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7469 if (parent
->l3_cls
[i
]) {
7470 if (uspec
->proto
== parent
->l3_cls_pid
[i
]) {
7471 class = parent
->l3_cls
[i
];
7472 parent
->l3_cls_refcnt
[i
]++;
7477 /* Program new user IP class */
7480 class = CLASS_CODE_USER_PROG1
;
7483 class = CLASS_CODE_USER_PROG2
;
7486 class = CLASS_CODE_USER_PROG3
;
7489 class = CLASS_CODE_USER_PROG4
;
7494 if (uspec
->ip_ver
== ETH_RX_NFC_IP6
)
7496 ret
= tcam_user_ip_class_set(np
, class, ipv6
,
7503 ret
= tcam_user_ip_class_enable(np
, class, 1);
7506 parent
->l3_cls
[i
] = class;
7507 parent
->l3_cls_pid
[i
] = uspec
->proto
;
7508 parent
->l3_cls_refcnt
[i
]++;
7514 netdev_info(np
->dev
, "niu%d: %s(): Could not find/insert class for pid %d\n",
7515 parent
->index
, __func__
, uspec
->proto
);
7519 niu_unlock_parent(np
, flags
);
7521 if (!niu_ethflow_to_class(fsp
->flow_type
, &class)) {
7526 niu_lock_parent(np
, flags
);
7528 idx
= tcam_get_index(np
, idx
);
7529 tp
= &parent
->tcam
[idx
];
7531 memset(tp
, 0, sizeof(*tp
));
7533 /* fill in the tcam key and mask */
7534 switch (fsp
->flow_type
) {
7540 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
, class);
7547 /* Not yet implemented */
7548 netdev_info(np
->dev
, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7549 parent
->index
, __func__
, fsp
->flow_type
);
7553 if (fsp
->h_u
.usr_ip4_spec
.ip_ver
== ETH_RX_NFC_IP4
) {
7554 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
,
7557 /* Not yet implemented */
7558 netdev_info(np
->dev
, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
7559 parent
->index
, __func__
);
7565 netdev_info(np
->dev
, "niu%d: In %s(): Unknown flow type %d\n",
7566 parent
->index
, __func__
, fsp
->flow_type
);
7571 /* fill in the assoc data */
7572 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
7573 tp
->assoc_data
= TCAM_ASSOCDATA_DISC
;
7575 if (fsp
->ring_cookie
>= np
->num_rx_rings
) {
7576 netdev_info(np
->dev
, "niu%d: In %s(): Invalid RX ring %lld\n",
7577 parent
->index
, __func__
,
7578 (long long)fsp
->ring_cookie
);
7582 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
7583 (fsp
->ring_cookie
<<
7584 TCAM_ASSOCDATA_OFFSET_SHIFT
));
7587 err
= tcam_write(np
, idx
, tp
->key
, tp
->key_mask
);
7592 err
= tcam_assoc_write(np
, idx
, tp
->assoc_data
);
7598 /* validate the entry */
7600 np
->clas
.tcam_valid_entries
++;
7602 niu_unlock_parent(np
, flags
);
7607 static int niu_del_ethtool_tcam_entry(struct niu
*np
, u32 loc
)
7609 struct niu_parent
*parent
= np
->parent
;
7610 struct niu_tcam_entry
*tp
;
7612 unsigned long flags
;
7616 if (loc
>= tcam_get_size(np
))
7619 niu_lock_parent(np
, flags
);
7621 idx
= tcam_get_index(np
, loc
);
7622 tp
= &parent
->tcam
[idx
];
7624 /* if the entry is of a user defined class, then update*/
7625 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7626 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7628 if (class >= CLASS_CODE_USER_PROG1
&& class <= CLASS_CODE_USER_PROG4
) {
7630 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7631 if (parent
->l3_cls
[i
] == class) {
7632 parent
->l3_cls_refcnt
[i
]--;
7633 if (!parent
->l3_cls_refcnt
[i
]) {
7635 ret
= tcam_user_ip_class_enable(np
,
7640 parent
->l3_cls
[i
] = 0;
7641 parent
->l3_cls_pid
[i
] = 0;
7646 if (i
== NIU_L3_PROG_CLS
) {
7647 netdev_info(np
->dev
, "niu%d: In %s(): Usr class 0x%llx not found\n",
7648 parent
->index
, __func__
,
7649 (unsigned long long)class);
7655 ret
= tcam_flush(np
, idx
);
7659 /* invalidate the entry */
7661 np
->clas
.tcam_valid_entries
--;
7663 niu_unlock_parent(np
, flags
);
7668 static int niu_set_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
7670 struct niu
*np
= netdev_priv(dev
);
7675 ret
= niu_set_hash_opts(np
, cmd
);
7677 case ETHTOOL_SRXCLSRLINS
:
7678 ret
= niu_add_ethtool_tcam_entry(np
, cmd
);
7680 case ETHTOOL_SRXCLSRLDEL
:
7681 ret
= niu_del_ethtool_tcam_entry(np
, cmd
->fs
.location
);
7691 static const struct {
7692 const char string
[ETH_GSTRING_LEN
];
7693 } niu_xmac_stat_keys
[] = {
7696 { "tx_fifo_errors" },
7697 { "tx_overflow_errors" },
7698 { "tx_max_pkt_size_errors" },
7699 { "tx_underflow_errors" },
7700 { "rx_local_faults" },
7701 { "rx_remote_faults" },
7702 { "rx_link_faults" },
7703 { "rx_align_errors" },
7715 { "rx_code_violations" },
7716 { "rx_len_errors" },
7717 { "rx_crc_errors" },
7718 { "rx_underflows" },
7720 { "pause_off_state" },
7721 { "pause_on_state" },
7722 { "pause_received" },
7725 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
7727 static const struct {
7728 const char string
[ETH_GSTRING_LEN
];
7729 } niu_bmac_stat_keys
[] = {
7730 { "tx_underflow_errors" },
7731 { "tx_max_pkt_size_errors" },
7736 { "rx_align_errors" },
7737 { "rx_crc_errors" },
7738 { "rx_len_errors" },
7739 { "pause_off_state" },
7740 { "pause_on_state" },
7741 { "pause_received" },
7744 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
7746 static const struct {
7747 const char string
[ETH_GSTRING_LEN
];
7748 } niu_rxchan_stat_keys
[] = {
7756 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
7758 static const struct {
7759 const char string
[ETH_GSTRING_LEN
];
7760 } niu_txchan_stat_keys
[] = {
7767 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
7769 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
7771 struct niu
*np
= netdev_priv(dev
);
7774 if (stringset
!= ETH_SS_STATS
)
7777 if (np
->flags
& NIU_FLAGS_XMAC
) {
7778 memcpy(data
, niu_xmac_stat_keys
,
7779 sizeof(niu_xmac_stat_keys
));
7780 data
+= sizeof(niu_xmac_stat_keys
);
7782 memcpy(data
, niu_bmac_stat_keys
,
7783 sizeof(niu_bmac_stat_keys
));
7784 data
+= sizeof(niu_bmac_stat_keys
);
7786 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7787 memcpy(data
, niu_rxchan_stat_keys
,
7788 sizeof(niu_rxchan_stat_keys
));
7789 data
+= sizeof(niu_rxchan_stat_keys
);
7791 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7792 memcpy(data
, niu_txchan_stat_keys
,
7793 sizeof(niu_txchan_stat_keys
));
7794 data
+= sizeof(niu_txchan_stat_keys
);
7798 static int niu_get_sset_count(struct net_device
*dev
, int stringset
)
7800 struct niu
*np
= netdev_priv(dev
);
7802 if (stringset
!= ETH_SS_STATS
)
7805 return ((np
->flags
& NIU_FLAGS_XMAC
?
7806 NUM_XMAC_STAT_KEYS
:
7807 NUM_BMAC_STAT_KEYS
) +
7808 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
7809 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
));
7812 static void niu_get_ethtool_stats(struct net_device
*dev
,
7813 struct ethtool_stats
*stats
, u64
*data
)
7815 struct niu
*np
= netdev_priv(dev
);
7818 niu_sync_mac_stats(np
);
7819 if (np
->flags
& NIU_FLAGS_XMAC
) {
7820 memcpy(data
, &np
->mac_stats
.xmac
,
7821 sizeof(struct niu_xmac_stats
));
7822 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
7824 memcpy(data
, &np
->mac_stats
.bmac
,
7825 sizeof(struct niu_bmac_stats
));
7826 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
7828 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7829 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
7831 niu_sync_rx_discard_stats(np
, rp
, 0);
7833 data
[0] = rp
->rx_channel
;
7834 data
[1] = rp
->rx_packets
;
7835 data
[2] = rp
->rx_bytes
;
7836 data
[3] = rp
->rx_dropped
;
7837 data
[4] = rp
->rx_errors
;
7840 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7841 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
7843 data
[0] = rp
->tx_channel
;
7844 data
[1] = rp
->tx_packets
;
7845 data
[2] = rp
->tx_bytes
;
7846 data
[3] = rp
->tx_errors
;
7851 static u64
niu_led_state_save(struct niu
*np
)
7853 if (np
->flags
& NIU_FLAGS_XMAC
)
7854 return nr64_mac(XMAC_CONFIG
);
7856 return nr64_mac(BMAC_XIF_CONFIG
);
7859 static void niu_led_state_restore(struct niu
*np
, u64 val
)
7861 if (np
->flags
& NIU_FLAGS_XMAC
)
7862 nw64_mac(XMAC_CONFIG
, val
);
7864 nw64_mac(BMAC_XIF_CONFIG
, val
);
7867 static void niu_force_led(struct niu
*np
, int on
)
7871 if (np
->flags
& NIU_FLAGS_XMAC
) {
7873 bit
= XMAC_CONFIG_FORCE_LED_ON
;
7875 reg
= BMAC_XIF_CONFIG
;
7876 bit
= BMAC_XIF_CONFIG_LINK_LED
;
7879 val
= nr64_mac(reg
);
7887 static int niu_phys_id(struct net_device
*dev
, u32 data
)
7889 struct niu
*np
= netdev_priv(dev
);
7893 if (!netif_running(dev
))
7899 orig_led_state
= niu_led_state_save(np
);
7900 for (i
= 0; i
< (data
* 2); i
++) {
7901 int on
= ((i
% 2) == 0);
7903 niu_force_led(np
, on
);
7905 if (msleep_interruptible(500))
7908 niu_led_state_restore(np
, orig_led_state
);
7913 static const struct ethtool_ops niu_ethtool_ops
= {
7914 .get_drvinfo
= niu_get_drvinfo
,
7915 .get_link
= ethtool_op_get_link
,
7916 .get_msglevel
= niu_get_msglevel
,
7917 .set_msglevel
= niu_set_msglevel
,
7918 .nway_reset
= niu_nway_reset
,
7919 .get_eeprom_len
= niu_get_eeprom_len
,
7920 .get_eeprom
= niu_get_eeprom
,
7921 .get_settings
= niu_get_settings
,
7922 .set_settings
= niu_set_settings
,
7923 .get_strings
= niu_get_strings
,
7924 .get_sset_count
= niu_get_sset_count
,
7925 .get_ethtool_stats
= niu_get_ethtool_stats
,
7926 .phys_id
= niu_phys_id
,
7927 .get_rxnfc
= niu_get_nfc
,
7928 .set_rxnfc
= niu_set_nfc
,
7931 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
7934 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
7936 if (ldn
< 0 || ldn
> LDN_MAX
)
7939 parent
->ldg_map
[ldn
] = ldg
;
7941 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
7942 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7943 * the firmware, and we're not supposed to change them.
7944 * Validate the mapping, because if it's wrong we probably
7945 * won't get any interrupts and that's painful to debug.
7947 if (nr64(LDG_NUM(ldn
)) != ldg
) {
7948 dev_err(np
->device
, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7950 (unsigned long long) nr64(LDG_NUM(ldn
)));
7954 nw64(LDG_NUM(ldn
), ldg
);
7959 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
7961 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
7965 nw64(LDG_TIMER_RES
, res
);
7970 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
7972 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
7973 (func
< 0 || func
> 3) ||
7974 (vector
< 0 || vector
> 0x1f))
7977 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
7982 static int __devinit
niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
7984 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
7985 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
7988 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
7992 nw64(ESPC_PIO_STAT
, frame
);
7996 frame
= nr64(ESPC_PIO_STAT
);
7997 if (frame
& ESPC_PIO_STAT_READ_END
)
8000 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
8001 dev_err(np
->device
, "EEPROM read timeout frame[%llx]\n",
8002 (unsigned long long) frame
);
8007 nw64(ESPC_PIO_STAT
, frame
);
8011 frame
= nr64(ESPC_PIO_STAT
);
8012 if (frame
& ESPC_PIO_STAT_READ_END
)
8015 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
8016 dev_err(np
->device
, "EEPROM read timeout frame[%llx]\n",
8017 (unsigned long long) frame
);
8021 frame
= nr64(ESPC_PIO_STAT
);
8022 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
8025 static int __devinit
niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
8027 int err
= niu_pci_eeprom_read(np
, off
);
8033 err
= niu_pci_eeprom_read(np
, off
+ 1);
8036 val
|= (err
& 0xff);
8041 static int __devinit
niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
8043 int err
= niu_pci_eeprom_read(np
, off
);
8050 err
= niu_pci_eeprom_read(np
, off
+ 1);
8054 val
|= (err
& 0xff) << 8;
8059 static int __devinit
niu_pci_vpd_get_propname(struct niu
*np
,
8066 for (i
= 0; i
< namebuf_len
; i
++) {
8067 int err
= niu_pci_eeprom_read(np
, off
+ i
);
8074 if (i
>= namebuf_len
)
8080 static void __devinit
niu_vpd_parse_version(struct niu
*np
)
8082 struct niu_vpd
*vpd
= &np
->vpd
;
8083 int len
= strlen(vpd
->version
) + 1;
8084 const char *s
= vpd
->version
;
8087 for (i
= 0; i
< len
- 5; i
++) {
8088 if (!strncmp(s
+ i
, "FCode ", 6))
8095 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
8097 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8098 "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8099 vpd
->fcode_major
, vpd
->fcode_minor
);
8100 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
8101 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
8102 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
8103 np
->flags
|= NIU_FLAGS_VPD_VALID
;
8106 /* ESPC_PIO_EN_ENABLE must be set */
8107 static int __devinit
niu_pci_vpd_scan_props(struct niu
*np
,
8110 unsigned int found_mask
= 0;
8111 #define FOUND_MASK_MODEL 0x00000001
8112 #define FOUND_MASK_BMODEL 0x00000002
8113 #define FOUND_MASK_VERS 0x00000004
8114 #define FOUND_MASK_MAC 0x00000008
8115 #define FOUND_MASK_NMAC 0x00000010
8116 #define FOUND_MASK_PHY 0x00000020
8117 #define FOUND_MASK_ALL 0x0000003f
8119 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8120 "VPD_SCAN: start[%x] end[%x]\n", start
, end
);
8121 while (start
< end
) {
8122 int len
, err
, instance
, type
, prop_len
;
8127 if (found_mask
== FOUND_MASK_ALL
) {
8128 niu_vpd_parse_version(np
);
8132 err
= niu_pci_eeprom_read(np
, start
+ 2);
8138 instance
= niu_pci_eeprom_read(np
, start
);
8139 type
= niu_pci_eeprom_read(np
, start
+ 3);
8140 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
8141 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
8147 if (!strcmp(namebuf
, "model")) {
8148 prop_buf
= np
->vpd
.model
;
8149 max_len
= NIU_VPD_MODEL_MAX
;
8150 found_mask
|= FOUND_MASK_MODEL
;
8151 } else if (!strcmp(namebuf
, "board-model")) {
8152 prop_buf
= np
->vpd
.board_model
;
8153 max_len
= NIU_VPD_BD_MODEL_MAX
;
8154 found_mask
|= FOUND_MASK_BMODEL
;
8155 } else if (!strcmp(namebuf
, "version")) {
8156 prop_buf
= np
->vpd
.version
;
8157 max_len
= NIU_VPD_VERSION_MAX
;
8158 found_mask
|= FOUND_MASK_VERS
;
8159 } else if (!strcmp(namebuf
, "local-mac-address")) {
8160 prop_buf
= np
->vpd
.local_mac
;
8162 found_mask
|= FOUND_MASK_MAC
;
8163 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
8164 prop_buf
= &np
->vpd
.mac_num
;
8166 found_mask
|= FOUND_MASK_NMAC
;
8167 } else if (!strcmp(namebuf
, "phy-type")) {
8168 prop_buf
= np
->vpd
.phy_type
;
8169 max_len
= NIU_VPD_PHY_TYPE_MAX
;
8170 found_mask
|= FOUND_MASK_PHY
;
8173 if (max_len
&& prop_len
> max_len
) {
8174 dev_err(np
->device
, "Property '%s' length (%d) is too long\n", namebuf
, prop_len
);
8179 u32 off
= start
+ 5 + err
;
8182 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8183 "VPD_SCAN: Reading in property [%s] len[%d]\n",
8185 for (i
= 0; i
< prop_len
; i
++)
8186 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
8195 /* ESPC_PIO_EN_ENABLE must be set */
8196 static void __devinit
niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
8201 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
8207 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
8208 u32 here
= start
+ offset
;
8211 err
= niu_pci_eeprom_read(np
, here
);
8215 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
8219 here
= start
+ offset
+ 3;
8220 end
= start
+ offset
+ err
;
8224 err
= niu_pci_vpd_scan_props(np
, here
, end
);
8225 if (err
< 0 || err
== 1)
8230 /* ESPC_PIO_EN_ENABLE must be set */
8231 static u32 __devinit
niu_pci_vpd_offset(struct niu
*np
)
8233 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
8236 while (start
< end
) {
8239 /* ROM header signature? */
8240 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8244 /* Apply offset to PCI data structure. */
8245 err
= niu_pci_eeprom_read16(np
, start
+ 23);
8250 /* Check for "PCIR" signature. */
8251 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8254 err
= niu_pci_eeprom_read16(np
, start
+ 2);
8258 /* Check for OBP image type. */
8259 err
= niu_pci_eeprom_read(np
, start
+ 20);
8263 err
= niu_pci_eeprom_read(np
, ret
+ 2);
8267 start
= ret
+ (err
* 512);
8271 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
8276 err
= niu_pci_eeprom_read(np
, ret
+ 0);
8286 static int __devinit
niu_phy_type_prop_decode(struct niu
*np
,
8287 const char *phy_prop
)
8289 if (!strcmp(phy_prop
, "mif")) {
8290 /* 1G copper, MII */
8291 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8293 np
->mac_xcvr
= MAC_XCVR_MII
;
8294 } else if (!strcmp(phy_prop
, "xgf")) {
8295 /* 10G fiber, XPCS */
8296 np
->flags
|= (NIU_FLAGS_10G
|
8298 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8299 } else if (!strcmp(phy_prop
, "pcs")) {
8301 np
->flags
&= ~NIU_FLAGS_10G
;
8302 np
->flags
|= NIU_FLAGS_FIBER
;
8303 np
->mac_xcvr
= MAC_XCVR_PCS
;
8304 } else if (!strcmp(phy_prop
, "xgc")) {
8305 /* 10G copper, XPCS */
8306 np
->flags
|= NIU_FLAGS_10G
;
8307 np
->flags
&= ~NIU_FLAGS_FIBER
;
8308 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8309 } else if (!strcmp(phy_prop
, "xgsd") || !strcmp(phy_prop
, "gsd")) {
8310 /* 10G Serdes or 1G Serdes, default to 10G */
8311 np
->flags
|= NIU_FLAGS_10G
;
8312 np
->flags
&= ~NIU_FLAGS_FIBER
;
8313 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8314 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8321 static int niu_pci_vpd_get_nports(struct niu
*np
)
8325 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
8326 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
8327 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
8328 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
8329 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
8331 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
8332 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
8333 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
8334 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
8341 static void __devinit
niu_pci_vpd_validate(struct niu
*np
)
8343 struct net_device
*dev
= np
->dev
;
8344 struct niu_vpd
*vpd
= &np
->vpd
;
8347 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
8348 dev_err(np
->device
, "VPD MAC invalid, falling back to SPROM\n");
8350 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8354 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8355 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8356 np
->flags
|= NIU_FLAGS_10G
;
8357 np
->flags
&= ~NIU_FLAGS_FIBER
;
8358 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8359 np
->mac_xcvr
= MAC_XCVR_PCS
;
8361 np
->flags
|= NIU_FLAGS_FIBER
;
8362 np
->flags
&= ~NIU_FLAGS_10G
;
8364 if (np
->flags
& NIU_FLAGS_10G
)
8365 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8366 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8367 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
8368 NIU_FLAGS_HOTPLUG_PHY
);
8369 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8370 dev_err(np
->device
, "Illegal phy string [%s]\n",
8372 dev_err(np
->device
, "Falling back to SPROM\n");
8373 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8377 memcpy(dev
->perm_addr
, vpd
->local_mac
, ETH_ALEN
);
8379 val8
= dev
->perm_addr
[5];
8380 dev
->perm_addr
[5] += np
->port
;
8381 if (dev
->perm_addr
[5] < val8
)
8382 dev
->perm_addr
[4]++;
8384 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8387 static int __devinit
niu_pci_probe_sprom(struct niu
*np
)
8389 struct net_device
*dev
= np
->dev
;
8394 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
8395 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
8398 np
->eeprom_len
= len
;
8400 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8401 "SPROM: Image size %llu\n", (unsigned long long)val
);
8404 for (i
= 0; i
< len
; i
++) {
8405 val
= nr64(ESPC_NCR(i
));
8406 sum
+= (val
>> 0) & 0xff;
8407 sum
+= (val
>> 8) & 0xff;
8408 sum
+= (val
>> 16) & 0xff;
8409 sum
+= (val
>> 24) & 0xff;
8411 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8412 "SPROM: Checksum %x\n", (int)(sum
& 0xff));
8413 if ((sum
& 0xff) != 0xab) {
8414 dev_err(np
->device
, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum
& 0xff));
8418 val
= nr64(ESPC_PHY_TYPE
);
8421 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
8422 ESPC_PHY_TYPE_PORT0_SHIFT
;
8425 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
8426 ESPC_PHY_TYPE_PORT1_SHIFT
;
8429 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
8430 ESPC_PHY_TYPE_PORT2_SHIFT
;
8433 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
8434 ESPC_PHY_TYPE_PORT3_SHIFT
;
8437 dev_err(np
->device
, "Bogus port number %u\n",
8441 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8442 "SPROM: PHY type %x\n", val8
);
8445 case ESPC_PHY_TYPE_1G_COPPER
:
8446 /* 1G copper, MII */
8447 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8449 np
->mac_xcvr
= MAC_XCVR_MII
;
8452 case ESPC_PHY_TYPE_1G_FIBER
:
8454 np
->flags
&= ~NIU_FLAGS_10G
;
8455 np
->flags
|= NIU_FLAGS_FIBER
;
8456 np
->mac_xcvr
= MAC_XCVR_PCS
;
8459 case ESPC_PHY_TYPE_10G_COPPER
:
8460 /* 10G copper, XPCS */
8461 np
->flags
|= NIU_FLAGS_10G
;
8462 np
->flags
&= ~NIU_FLAGS_FIBER
;
8463 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8466 case ESPC_PHY_TYPE_10G_FIBER
:
8467 /* 10G fiber, XPCS */
8468 np
->flags
|= (NIU_FLAGS_10G
|
8470 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8474 dev_err(np
->device
, "Bogus SPROM phy type %u\n", val8
);
8478 val
= nr64(ESPC_MAC_ADDR0
);
8479 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8480 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val
);
8481 dev
->perm_addr
[0] = (val
>> 0) & 0xff;
8482 dev
->perm_addr
[1] = (val
>> 8) & 0xff;
8483 dev
->perm_addr
[2] = (val
>> 16) & 0xff;
8484 dev
->perm_addr
[3] = (val
>> 24) & 0xff;
8486 val
= nr64(ESPC_MAC_ADDR1
);
8487 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8488 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val
);
8489 dev
->perm_addr
[4] = (val
>> 0) & 0xff;
8490 dev
->perm_addr
[5] = (val
>> 8) & 0xff;
8492 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
8493 dev_err(np
->device
, "SPROM MAC address invalid [ %pM ]\n",
8498 val8
= dev
->perm_addr
[5];
8499 dev
->perm_addr
[5] += np
->port
;
8500 if (dev
->perm_addr
[5] < val8
)
8501 dev
->perm_addr
[4]++;
8503 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8505 val
= nr64(ESPC_MOD_STR_LEN
);
8506 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8507 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val
);
8511 for (i
= 0; i
< val
; i
+= 4) {
8512 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
8514 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
8515 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
8516 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
8517 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
8519 np
->vpd
.model
[val
] = '\0';
8521 val
= nr64(ESPC_BD_MOD_STR_LEN
);
8522 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8523 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val
);
8527 for (i
= 0; i
< val
; i
+= 4) {
8528 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
8530 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
8531 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
8532 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
8533 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
8535 np
->vpd
.board_model
[val
] = '\0';
8538 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
8539 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8540 "SPROM: NUM_PORTS_MACS[%d]\n", np
->vpd
.mac_num
);
8545 static int __devinit
niu_get_and_validate_port(struct niu
*np
)
8547 struct niu_parent
*parent
= np
->parent
;
8550 np
->flags
|= NIU_FLAGS_XMAC
;
8552 if (!parent
->num_ports
) {
8553 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8554 parent
->num_ports
= 2;
8556 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
8557 if (!parent
->num_ports
) {
8558 /* Fall back to SPROM as last resort.
8559 * This will fail on most cards.
8561 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
8562 ESPC_NUM_PORTS_MACS_VAL
;
8564 /* All of the current probing methods fail on
8565 * Maramba on-board parts.
8567 if (!parent
->num_ports
)
8568 parent
->num_ports
= 4;
8573 if (np
->port
>= parent
->num_ports
)
8579 static int __devinit
phy_record(struct niu_parent
*parent
,
8580 struct phy_probe_info
*p
,
8581 int dev_id_1
, int dev_id_2
, u8 phy_port
,
8584 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
8587 if (dev_id_1
< 0 || dev_id_2
< 0)
8589 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
8590 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
8591 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
) &&
8592 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8706
))
8595 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
8599 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8601 type
== PHY_TYPE_PMA_PMD
? "PMA/PMD" :
8602 type
== PHY_TYPE_PCS
? "PCS" : "MII",
8605 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
8606 pr_err("Too many PHY ports\n");
8610 p
->phy_id
[type
][idx
] = id
;
8611 p
->phy_port
[type
][idx
] = phy_port
;
8612 p
->cur
[type
] = idx
+ 1;
8616 static int __devinit
port_has_10g(struct phy_probe_info
*p
, int port
)
8620 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
8621 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
8624 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
8625 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
8632 static int __devinit
count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
8638 for (port
= 8; port
< 32; port
++) {
8639 if (port_has_10g(p
, port
)) {
8649 static int __devinit
count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
8652 if (p
->cur
[PHY_TYPE_MII
])
8653 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
8655 return p
->cur
[PHY_TYPE_MII
];
8658 static void __devinit
niu_n2_divide_channels(struct niu_parent
*parent
)
8660 int num_ports
= parent
->num_ports
;
8663 for (i
= 0; i
< num_ports
; i
++) {
8664 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
8665 parent
->txchan_per_port
[i
] = (16 / num_ports
);
8667 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8669 parent
->rxchan_per_port
[i
],
8670 parent
->txchan_per_port
[i
]);
8674 static void __devinit
niu_divide_channels(struct niu_parent
*parent
,
8675 int num_10g
, int num_1g
)
8677 int num_ports
= parent
->num_ports
;
8678 int rx_chans_per_10g
, rx_chans_per_1g
;
8679 int tx_chans_per_10g
, tx_chans_per_1g
;
8680 int i
, tot_rx
, tot_tx
;
8682 if (!num_10g
|| !num_1g
) {
8683 rx_chans_per_10g
= rx_chans_per_1g
=
8684 (NIU_NUM_RXCHAN
/ num_ports
);
8685 tx_chans_per_10g
= tx_chans_per_1g
=
8686 (NIU_NUM_TXCHAN
/ num_ports
);
8688 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
8689 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
8690 (rx_chans_per_1g
* num_1g
)) /
8693 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
8694 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
8695 (tx_chans_per_1g
* num_1g
)) /
8699 tot_rx
= tot_tx
= 0;
8700 for (i
= 0; i
< num_ports
; i
++) {
8701 int type
= phy_decode(parent
->port_phy
, i
);
8703 if (type
== PORT_TYPE_10G
) {
8704 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
8705 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
8707 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
8708 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
8710 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8712 parent
->rxchan_per_port
[i
],
8713 parent
->txchan_per_port
[i
]);
8714 tot_rx
+= parent
->rxchan_per_port
[i
];
8715 tot_tx
+= parent
->txchan_per_port
[i
];
8718 if (tot_rx
> NIU_NUM_RXCHAN
) {
8719 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8720 parent
->index
, tot_rx
);
8721 for (i
= 0; i
< num_ports
; i
++)
8722 parent
->rxchan_per_port
[i
] = 1;
8724 if (tot_tx
> NIU_NUM_TXCHAN
) {
8725 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8726 parent
->index
, tot_tx
);
8727 for (i
= 0; i
< num_ports
; i
++)
8728 parent
->txchan_per_port
[i
] = 1;
8730 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
8731 pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8732 parent
->index
, tot_rx
, tot_tx
);
8736 static void __devinit
niu_divide_rdc_groups(struct niu_parent
*parent
,
8737 int num_10g
, int num_1g
)
8739 int i
, num_ports
= parent
->num_ports
;
8740 int rdc_group
, rdc_groups_per_port
;
8741 int rdc_channel_base
;
8744 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
8746 rdc_channel_base
= 0;
8748 for (i
= 0; i
< num_ports
; i
++) {
8749 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
8750 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
8751 int this_channel_offset
;
8753 tp
->first_table_num
= rdc_group
;
8754 tp
->num_tables
= rdc_groups_per_port
;
8755 this_channel_offset
= 0;
8756 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
8757 struct rdc_table
*rt
= &tp
->tables
[grp
];
8760 pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8761 parent
->index
, i
, tp
->first_table_num
+ grp
);
8762 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
8763 rt
->rxdma_channel
[slot
] =
8764 rdc_channel_base
+ this_channel_offset
;
8766 pr_cont("%d ", rt
->rxdma_channel
[slot
]);
8768 if (++this_channel_offset
== num_channels
)
8769 this_channel_offset
= 0;
8774 parent
->rdc_default
[i
] = rdc_channel_base
;
8776 rdc_channel_base
+= num_channels
;
8777 rdc_group
+= rdc_groups_per_port
;
8781 static int __devinit
fill_phy_probe_info(struct niu
*np
,
8782 struct niu_parent
*parent
,
8783 struct phy_probe_info
*info
)
8785 unsigned long flags
;
8788 memset(info
, 0, sizeof(*info
));
8790 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
8791 niu_lock_parent(np
, flags
);
8793 for (port
= 8; port
< 32; port
++) {
8794 int dev_id_1
, dev_id_2
;
8796 dev_id_1
= mdio_read(np
, port
,
8797 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
8798 dev_id_2
= mdio_read(np
, port
,
8799 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
8800 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8804 dev_id_1
= mdio_read(np
, port
,
8805 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
8806 dev_id_2
= mdio_read(np
, port
,
8807 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
8808 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8812 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
8813 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
8814 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8819 niu_unlock_parent(np
, flags
);
8824 static int __devinit
walk_phys(struct niu
*np
, struct niu_parent
*parent
)
8826 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
8827 int lowest_10g
, lowest_1g
;
8828 int num_10g
, num_1g
;
8832 num_10g
= num_1g
= 0;
8834 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8835 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8838 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
8839 parent
->num_ports
= 4;
8840 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8841 phy_encode(PORT_TYPE_1G
, 1) |
8842 phy_encode(PORT_TYPE_1G
, 2) |
8843 phy_encode(PORT_TYPE_1G
, 3));
8844 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8847 parent
->num_ports
= 2;
8848 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8849 phy_encode(PORT_TYPE_10G
, 1));
8850 } else if ((np
->flags
& NIU_FLAGS_XCVR_SERDES
) &&
8851 (parent
->plat_type
== PLAT_TYPE_NIU
)) {
8852 /* this is the Monza case */
8853 if (np
->flags
& NIU_FLAGS_10G
) {
8854 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8855 phy_encode(PORT_TYPE_10G
, 1));
8857 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8858 phy_encode(PORT_TYPE_1G
, 1));
8861 err
= fill_phy_probe_info(np
, parent
, info
);
8865 num_10g
= count_10g_ports(info
, &lowest_10g
);
8866 num_1g
= count_1g_ports(info
, &lowest_1g
);
8868 switch ((num_10g
<< 4) | num_1g
) {
8870 if (lowest_1g
== 10)
8871 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8872 else if (lowest_1g
== 26)
8873 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8875 goto unknown_vg_1g_port
;
8879 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8880 phy_encode(PORT_TYPE_10G
, 1) |
8881 phy_encode(PORT_TYPE_1G
, 2) |
8882 phy_encode(PORT_TYPE_1G
, 3));
8886 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8887 phy_encode(PORT_TYPE_10G
, 1));
8891 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
8895 if (lowest_1g
== 10)
8896 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8897 else if (lowest_1g
== 26)
8898 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8900 goto unknown_vg_1g_port
;
8904 if ((lowest_10g
& 0x7) == 0)
8905 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8906 phy_encode(PORT_TYPE_1G
, 1) |
8907 phy_encode(PORT_TYPE_1G
, 2) |
8908 phy_encode(PORT_TYPE_1G
, 3));
8910 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8911 phy_encode(PORT_TYPE_10G
, 1) |
8912 phy_encode(PORT_TYPE_1G
, 2) |
8913 phy_encode(PORT_TYPE_1G
, 3));
8917 if (lowest_1g
== 10)
8918 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8919 else if (lowest_1g
== 26)
8920 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8922 goto unknown_vg_1g_port
;
8924 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8925 phy_encode(PORT_TYPE_1G
, 1) |
8926 phy_encode(PORT_TYPE_1G
, 2) |
8927 phy_encode(PORT_TYPE_1G
, 3));
8931 pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8937 parent
->port_phy
= val
;
8939 if (parent
->plat_type
== PLAT_TYPE_NIU
)
8940 niu_n2_divide_channels(parent
);
8942 niu_divide_channels(parent
, num_10g
, num_1g
);
8944 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
8949 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g
);
8953 static int __devinit
niu_probe_ports(struct niu
*np
)
8955 struct niu_parent
*parent
= np
->parent
;
8958 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
8959 err
= walk_phys(np
, parent
);
8963 niu_set_ldg_timer_res(np
, 2);
8964 for (i
= 0; i
<= LDN_MAX
; i
++)
8965 niu_ldn_irq_enable(np
, i
, 0);
8968 if (parent
->port_phy
== PORT_PHY_INVALID
)
8974 static int __devinit
niu_classifier_swstate_init(struct niu
*np
)
8976 struct niu_classifier
*cp
= &np
->clas
;
8978 cp
->tcam_top
= (u16
) np
->port
;
8979 cp
->tcam_sz
= np
->parent
->tcam_num_entries
/ np
->parent
->num_ports
;
8980 cp
->h1_init
= 0xffffffff;
8981 cp
->h2_init
= 0xffff;
8983 return fflp_early_init(np
);
8986 static void __devinit
niu_link_config_init(struct niu
*np
)
8988 struct niu_link_config
*lp
= &np
->link_config
;
8990 lp
->advertising
= (ADVERTISED_10baseT_Half
|
8991 ADVERTISED_10baseT_Full
|
8992 ADVERTISED_100baseT_Half
|
8993 ADVERTISED_100baseT_Full
|
8994 ADVERTISED_1000baseT_Half
|
8995 ADVERTISED_1000baseT_Full
|
8996 ADVERTISED_10000baseT_Full
|
8997 ADVERTISED_Autoneg
);
8998 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
8999 lp
->duplex
= DUPLEX_FULL
;
9000 lp
->active_duplex
= DUPLEX_INVALID
;
9003 lp
->loopback_mode
= LOOPBACK_MAC
;
9004 lp
->active_speed
= SPEED_10000
;
9005 lp
->active_duplex
= DUPLEX_FULL
;
9007 lp
->loopback_mode
= LOOPBACK_DISABLED
;
9011 static int __devinit
niu_init_mac_ipp_pcs_base(struct niu
*np
)
9015 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
9016 np
->ipp_off
= 0x00000;
9017 np
->pcs_off
= 0x04000;
9018 np
->xpcs_off
= 0x02000;
9022 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
9023 np
->ipp_off
= 0x08000;
9024 np
->pcs_off
= 0x0a000;
9025 np
->xpcs_off
= 0x08000;
9029 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
9030 np
->ipp_off
= 0x04000;
9031 np
->pcs_off
= 0x0e000;
9032 np
->xpcs_off
= ~0UL;
9036 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
9037 np
->ipp_off
= 0x0c000;
9038 np
->pcs_off
= 0x12000;
9039 np
->xpcs_off
= ~0UL;
9043 dev_err(np
->device
, "Port %u is invalid, cannot compute MAC block offset\n", np
->port
);
9050 static void __devinit
niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
9052 struct msix_entry msi_vec
[NIU_NUM_LDG
];
9053 struct niu_parent
*parent
= np
->parent
;
9054 struct pci_dev
*pdev
= np
->pdev
;
9055 int i
, num_irqs
, err
;
9058 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
9059 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
9060 ldg_num_map
[i
] = first_ldg
+ i
;
9062 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
9063 parent
->txchan_per_port
[np
->port
] +
9064 (np
->port
== 0 ? 3 : 1));
9065 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
9068 for (i
= 0; i
< num_irqs
; i
++) {
9069 msi_vec
[i
].vector
= 0;
9070 msi_vec
[i
].entry
= i
;
9073 err
= pci_enable_msix(pdev
, msi_vec
, num_irqs
);
9075 np
->flags
&= ~NIU_FLAGS_MSIX
;
9083 np
->flags
|= NIU_FLAGS_MSIX
;
9084 for (i
= 0; i
< num_irqs
; i
++)
9085 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
9086 np
->num_ldg
= num_irqs
;
9089 static int __devinit
niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
9091 #ifdef CONFIG_SPARC64
9092 struct of_device
*op
= np
->op
;
9093 const u32
*int_prop
;
9096 int_prop
= of_get_property(op
->node
, "interrupts", NULL
);
9100 for (i
= 0; i
< op
->num_irqs
; i
++) {
9101 ldg_num_map
[i
] = int_prop
[i
];
9102 np
->ldg
[i
].irq
= op
->irqs
[i
];
9105 np
->num_ldg
= op
->num_irqs
;
9113 static int __devinit
niu_ldg_init(struct niu
*np
)
9115 struct niu_parent
*parent
= np
->parent
;
9116 u8 ldg_num_map
[NIU_NUM_LDG
];
9117 int first_chan
, num_chan
;
9118 int i
, err
, ldg_rotor
;
9122 np
->ldg
[0].irq
= np
->dev
->irq
;
9123 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
9124 err
= niu_n2_irq_init(np
, ldg_num_map
);
9128 niu_try_msix(np
, ldg_num_map
);
9131 for (i
= 0; i
< np
->num_ldg
; i
++) {
9132 struct niu_ldg
*lp
= &np
->ldg
[i
];
9134 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
9137 lp
->ldg_num
= ldg_num_map
[i
];
9138 lp
->timer
= 2; /* XXX */
9140 /* On N2 NIU the firmware has setup the SID mappings so they go
9141 * to the correct values that will route the LDG to the proper
9142 * interrupt in the NCU interrupt table.
9144 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
9145 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
9151 /* We adopt the LDG assignment ordering used by the N2 NIU
9152 * 'interrupt' properties because that simplifies a lot of
9153 * things. This ordering is:
9156 * MIF (if port zero)
9157 * SYSERR (if port zero)
9164 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
9170 if (ldg_rotor
== np
->num_ldg
)
9174 err
= niu_ldg_assign_ldn(np
, parent
,
9175 ldg_num_map
[ldg_rotor
],
9181 if (ldg_rotor
== np
->num_ldg
)
9184 err
= niu_ldg_assign_ldn(np
, parent
,
9185 ldg_num_map
[ldg_rotor
],
9191 if (ldg_rotor
== np
->num_ldg
)
9197 for (i
= 0; i
< port
; i
++)
9198 first_chan
+= parent
->rxchan_per_port
[port
];
9199 num_chan
= parent
->rxchan_per_port
[port
];
9201 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9202 err
= niu_ldg_assign_ldn(np
, parent
,
9203 ldg_num_map
[ldg_rotor
],
9208 if (ldg_rotor
== np
->num_ldg
)
9213 for (i
= 0; i
< port
; i
++)
9214 first_chan
+= parent
->txchan_per_port
[port
];
9215 num_chan
= parent
->txchan_per_port
[port
];
9216 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9217 err
= niu_ldg_assign_ldn(np
, parent
,
9218 ldg_num_map
[ldg_rotor
],
9223 if (ldg_rotor
== np
->num_ldg
)
9230 static void __devexit
niu_ldg_free(struct niu
*np
)
9232 if (np
->flags
& NIU_FLAGS_MSIX
)
9233 pci_disable_msix(np
->pdev
);
9236 static int __devinit
niu_get_of_props(struct niu
*np
)
9238 #ifdef CONFIG_SPARC64
9239 struct net_device
*dev
= np
->dev
;
9240 struct device_node
*dp
;
9241 const char *phy_type
;
9246 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9249 dp
= pci_device_to_OF_node(np
->pdev
);
9251 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
9253 netdev_err(dev
, "%s: OF node lacks phy-type property\n",
9258 if (!strcmp(phy_type
, "none"))
9261 strcpy(np
->vpd
.phy_type
, phy_type
);
9263 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
9264 netdev_err(dev
, "%s: Illegal phy string [%s]\n",
9265 dp
->full_name
, np
->vpd
.phy_type
);
9269 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
9271 netdev_err(dev
, "%s: OF node lacks local-mac-address property\n",
9275 if (prop_len
!= dev
->addr_len
) {
9276 netdev_err(dev
, "%s: OF MAC address prop len (%d) is wrong\n",
9277 dp
->full_name
, prop_len
);
9279 memcpy(dev
->perm_addr
, mac_addr
, dev
->addr_len
);
9280 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
9281 netdev_err(dev
, "%s: OF MAC address is invalid\n",
9283 netdev_err(dev
, "%s: [ %pM ]\n", dp
->full_name
, dev
->perm_addr
);
9287 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
9289 model
= of_get_property(dp
, "model", &prop_len
);
9292 strcpy(np
->vpd
.model
, model
);
9294 if (of_find_property(dp
, "hot-swappable-phy", &prop_len
)) {
9295 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
9296 NIU_FLAGS_HOTPLUG_PHY
);
9305 static int __devinit
niu_get_invariants(struct niu
*np
)
9307 int err
, have_props
;
9310 err
= niu_get_of_props(np
);
9316 err
= niu_init_mac_ipp_pcs_base(np
);
9321 err
= niu_get_and_validate_port(np
);
9326 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9329 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
9330 offset
= niu_pci_vpd_offset(np
);
9331 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
9332 "%s() VPD offset [%08x]\n", __func__
, offset
);
9334 niu_pci_vpd_fetch(np
, offset
);
9335 nw64(ESPC_PIO_EN
, 0);
9337 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
9338 niu_pci_vpd_validate(np
);
9339 err
= niu_get_and_validate_port(np
);
9344 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
9345 err
= niu_get_and_validate_port(np
);
9348 err
= niu_pci_probe_sprom(np
);
9354 err
= niu_probe_ports(np
);
9360 niu_classifier_swstate_init(np
);
9361 niu_link_config_init(np
);
9363 err
= niu_determine_phy_disposition(np
);
9365 err
= niu_init_link(np
);
9370 static LIST_HEAD(niu_parent_list
);
9371 static DEFINE_MUTEX(niu_parent_lock
);
9372 static int niu_parent_index
;
9374 static ssize_t
show_port_phy(struct device
*dev
,
9375 struct device_attribute
*attr
, char *buf
)
9377 struct platform_device
*plat_dev
= to_platform_device(dev
);
9378 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9379 u32 port_phy
= p
->port_phy
;
9380 char *orig_buf
= buf
;
9383 if (port_phy
== PORT_PHY_UNKNOWN
||
9384 port_phy
== PORT_PHY_INVALID
)
9387 for (i
= 0; i
< p
->num_ports
; i
++) {
9388 const char *type_str
;
9391 type
= phy_decode(port_phy
, i
);
9392 if (type
== PORT_TYPE_10G
)
9397 (i
== 0) ? "%s" : " %s",
9400 buf
+= sprintf(buf
, "\n");
9401 return buf
- orig_buf
;
9404 static ssize_t
show_plat_type(struct device
*dev
,
9405 struct device_attribute
*attr
, char *buf
)
9407 struct platform_device
*plat_dev
= to_platform_device(dev
);
9408 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9409 const char *type_str
;
9411 switch (p
->plat_type
) {
9412 case PLAT_TYPE_ATLAS
:
9418 case PLAT_TYPE_VF_P0
:
9421 case PLAT_TYPE_VF_P1
:
9425 type_str
= "unknown";
9429 return sprintf(buf
, "%s\n", type_str
);
9432 static ssize_t
__show_chan_per_port(struct device
*dev
,
9433 struct device_attribute
*attr
, char *buf
,
9436 struct platform_device
*plat_dev
= to_platform_device(dev
);
9437 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9438 char *orig_buf
= buf
;
9442 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
9444 for (i
= 0; i
< p
->num_ports
; i
++) {
9446 (i
== 0) ? "%d" : " %d",
9449 buf
+= sprintf(buf
, "\n");
9451 return buf
- orig_buf
;
9454 static ssize_t
show_rxchan_per_port(struct device
*dev
,
9455 struct device_attribute
*attr
, char *buf
)
9457 return __show_chan_per_port(dev
, attr
, buf
, 1);
9460 static ssize_t
show_txchan_per_port(struct device
*dev
,
9461 struct device_attribute
*attr
, char *buf
)
9463 return __show_chan_per_port(dev
, attr
, buf
, 1);
9466 static ssize_t
show_num_ports(struct device
*dev
,
9467 struct device_attribute
*attr
, char *buf
)
9469 struct platform_device
*plat_dev
= to_platform_device(dev
);
9470 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9472 return sprintf(buf
, "%d\n", p
->num_ports
);
9475 static struct device_attribute niu_parent_attributes
[] = {
9476 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
9477 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
9478 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
9479 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
9480 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
9484 static struct niu_parent
* __devinit
niu_new_parent(struct niu
*np
,
9485 union niu_parent_id
*id
,
9488 struct platform_device
*plat_dev
;
9489 struct niu_parent
*p
;
9492 plat_dev
= platform_device_register_simple("niu", niu_parent_index
,
9494 if (IS_ERR(plat_dev
))
9497 for (i
= 0; attr_name(niu_parent_attributes
[i
]); i
++) {
9498 int err
= device_create_file(&plat_dev
->dev
,
9499 &niu_parent_attributes
[i
]);
9501 goto fail_unregister
;
9504 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
9506 goto fail_unregister
;
9508 p
->index
= niu_parent_index
++;
9510 plat_dev
->dev
.platform_data
= p
;
9511 p
->plat_dev
= plat_dev
;
9513 memcpy(&p
->id
, id
, sizeof(*id
));
9514 p
->plat_type
= ptype
;
9515 INIT_LIST_HEAD(&p
->list
);
9516 atomic_set(&p
->refcnt
, 0);
9517 list_add(&p
->list
, &niu_parent_list
);
9518 spin_lock_init(&p
->lock
);
9520 p
->rxdma_clock_divider
= 7500;
9522 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
9523 if (p
->plat_type
== PLAT_TYPE_NIU
)
9524 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
9526 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
9527 int index
= i
- CLASS_CODE_USER_PROG1
;
9529 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
9530 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
9533 (FLOW_KEY_L4_BYTE12
<<
9534 FLOW_KEY_L4_0_SHIFT
) |
9535 (FLOW_KEY_L4_BYTE12
<<
9536 FLOW_KEY_L4_1_SHIFT
));
9539 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
9540 p
->ldg_map
[i
] = LDG_INVALID
;
9545 platform_device_unregister(plat_dev
);
9549 static struct niu_parent
* __devinit
niu_get_parent(struct niu
*np
,
9550 union niu_parent_id
*id
,
9553 struct niu_parent
*p
, *tmp
;
9554 int port
= np
->port
;
9556 mutex_lock(&niu_parent_lock
);
9558 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
9559 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
9565 p
= niu_new_parent(np
, id
, ptype
);
9571 sprintf(port_name
, "port%d", port
);
9572 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
9576 p
->ports
[port
] = np
;
9577 atomic_inc(&p
->refcnt
);
9580 mutex_unlock(&niu_parent_lock
);
9585 static void niu_put_parent(struct niu
*np
)
9587 struct niu_parent
*p
= np
->parent
;
9591 BUG_ON(!p
|| p
->ports
[port
] != np
);
9593 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
9594 "%s() port[%u]\n", __func__
, port
);
9596 sprintf(port_name
, "port%d", port
);
9598 mutex_lock(&niu_parent_lock
);
9600 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
9602 p
->ports
[port
] = NULL
;
9605 if (atomic_dec_and_test(&p
->refcnt
)) {
9607 platform_device_unregister(p
->plat_dev
);
9610 mutex_unlock(&niu_parent_lock
);
9613 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
9614 u64
*handle
, gfp_t flag
)
9619 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
9625 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
9626 void *cpu_addr
, u64 handle
)
9628 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
9631 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
9632 unsigned long offset
, size_t size
,
9633 enum dma_data_direction direction
)
9635 return dma_map_page(dev
, page
, offset
, size
, direction
);
9638 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
9639 size_t size
, enum dma_data_direction direction
)
9641 dma_unmap_page(dev
, dma_address
, size
, direction
);
9644 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
9646 enum dma_data_direction direction
)
9648 return dma_map_single(dev
, cpu_addr
, size
, direction
);
9651 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
9653 enum dma_data_direction direction
)
9655 dma_unmap_single(dev
, dma_address
, size
, direction
);
9658 static const struct niu_ops niu_pci_ops
= {
9659 .alloc_coherent
= niu_pci_alloc_coherent
,
9660 .free_coherent
= niu_pci_free_coherent
,
9661 .map_page
= niu_pci_map_page
,
9662 .unmap_page
= niu_pci_unmap_page
,
9663 .map_single
= niu_pci_map_single
,
9664 .unmap_single
= niu_pci_unmap_single
,
9667 static void __devinit
niu_driver_version(void)
9669 static int niu_version_printed
;
9671 if (niu_version_printed
++ == 0)
9672 pr_info("%s", version
);
9675 static struct net_device
* __devinit
niu_alloc_and_init(
9676 struct device
*gen_dev
, struct pci_dev
*pdev
,
9677 struct of_device
*op
, const struct niu_ops
*ops
,
9680 struct net_device
*dev
;
9683 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
9685 dev_err(gen_dev
, "Etherdev alloc failed, aborting\n");
9689 SET_NETDEV_DEV(dev
, gen_dev
);
9691 np
= netdev_priv(dev
);
9695 np
->device
= gen_dev
;
9698 np
->msg_enable
= niu_debug
;
9700 spin_lock_init(&np
->lock
);
9701 INIT_WORK(&np
->reset_task
, niu_reset_task
);
9708 static const struct net_device_ops niu_netdev_ops
= {
9709 .ndo_open
= niu_open
,
9710 .ndo_stop
= niu_close
,
9711 .ndo_start_xmit
= niu_start_xmit
,
9712 .ndo_get_stats
= niu_get_stats
,
9713 .ndo_set_multicast_list
= niu_set_rx_mode
,
9714 .ndo_validate_addr
= eth_validate_addr
,
9715 .ndo_set_mac_address
= niu_set_mac_addr
,
9716 .ndo_do_ioctl
= niu_ioctl
,
9717 .ndo_tx_timeout
= niu_tx_timeout
,
9718 .ndo_change_mtu
= niu_change_mtu
,
9721 static void __devinit
niu_assign_netdev_ops(struct net_device
*dev
)
9723 dev
->netdev_ops
= &niu_netdev_ops
;
9724 dev
->ethtool_ops
= &niu_ethtool_ops
;
9725 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
9728 static void __devinit
niu_device_announce(struct niu
*np
)
9730 struct net_device
*dev
= np
->dev
;
9732 pr_info("%s: NIU Ethernet %pM\n", dev
->name
, dev
->dev_addr
);
9734 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
9735 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9737 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9738 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9739 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
9740 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9741 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9744 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9746 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9747 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9748 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" :
9749 (np
->flags
& NIU_FLAGS_XCVR_SERDES
? "SERDES" :
9751 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9752 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9757 static int __devinit
niu_pci_init_one(struct pci_dev
*pdev
,
9758 const struct pci_device_id
*ent
)
9760 union niu_parent_id parent_id
;
9761 struct net_device
*dev
;
9767 niu_driver_version();
9769 err
= pci_enable_device(pdev
);
9771 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
9775 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
9776 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
9777 dev_err(&pdev
->dev
, "Cannot find proper PCI device base addresses, aborting\n");
9779 goto err_out_disable_pdev
;
9782 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9784 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
9785 goto err_out_disable_pdev
;
9788 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
9790 dev_err(&pdev
->dev
, "Cannot find PCI Express capability, aborting\n");
9791 goto err_out_free_res
;
9794 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
9795 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
9798 goto err_out_free_res
;
9800 np
= netdev_priv(dev
);
9802 memset(&parent_id
, 0, sizeof(parent_id
));
9803 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
9804 parent_id
.pci
.bus
= pdev
->bus
->number
;
9805 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
9807 np
->parent
= niu_get_parent(np
, &parent_id
,
9811 goto err_out_free_dev
;
9814 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
9815 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
9816 val16
|= (PCI_EXP_DEVCTL_CERE
|
9817 PCI_EXP_DEVCTL_NFERE
|
9818 PCI_EXP_DEVCTL_FERE
|
9819 PCI_EXP_DEVCTL_URRE
|
9820 PCI_EXP_DEVCTL_RELAX_EN
);
9821 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
9823 dma_mask
= DMA_BIT_MASK(44);
9824 err
= pci_set_dma_mask(pdev
, dma_mask
);
9826 dev
->features
|= NETIF_F_HIGHDMA
;
9827 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
9829 dev_err(&pdev
->dev
, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9830 goto err_out_release_parent
;
9833 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
9834 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9836 dev_err(&pdev
->dev
, "No usable DMA configuration, aborting\n");
9837 goto err_out_release_parent
;
9841 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
9843 np
->regs
= pci_ioremap_bar(pdev
, 0);
9845 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
9847 goto err_out_release_parent
;
9850 pci_set_master(pdev
);
9851 pci_save_state(pdev
);
9853 dev
->irq
= pdev
->irq
;
9855 niu_assign_netdev_ops(dev
);
9857 err
= niu_get_invariants(np
);
9860 dev_err(&pdev
->dev
, "Problem fetching invariants of chip, aborting\n");
9861 goto err_out_iounmap
;
9864 err
= register_netdev(dev
);
9866 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
9867 goto err_out_iounmap
;
9870 pci_set_drvdata(pdev
, dev
);
9872 niu_device_announce(np
);
9882 err_out_release_parent
:
9889 pci_release_regions(pdev
);
9891 err_out_disable_pdev
:
9892 pci_disable_device(pdev
);
9893 pci_set_drvdata(pdev
, NULL
);
9898 static void __devexit
niu_pci_remove_one(struct pci_dev
*pdev
)
9900 struct net_device
*dev
= pci_get_drvdata(pdev
);
9903 struct niu
*np
= netdev_priv(dev
);
9905 unregister_netdev(dev
);
9916 pci_release_regions(pdev
);
9917 pci_disable_device(pdev
);
9918 pci_set_drvdata(pdev
, NULL
);
9922 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
9924 struct net_device
*dev
= pci_get_drvdata(pdev
);
9925 struct niu
*np
= netdev_priv(dev
);
9926 unsigned long flags
;
9928 if (!netif_running(dev
))
9931 flush_scheduled_work();
9934 del_timer_sync(&np
->timer
);
9936 spin_lock_irqsave(&np
->lock
, flags
);
9937 niu_enable_interrupts(np
, 0);
9938 spin_unlock_irqrestore(&np
->lock
, flags
);
9940 netif_device_detach(dev
);
9942 spin_lock_irqsave(&np
->lock
, flags
);
9944 spin_unlock_irqrestore(&np
->lock
, flags
);
9946 pci_save_state(pdev
);
9951 static int niu_resume(struct pci_dev
*pdev
)
9953 struct net_device
*dev
= pci_get_drvdata(pdev
);
9954 struct niu
*np
= netdev_priv(dev
);
9955 unsigned long flags
;
9958 if (!netif_running(dev
))
9961 pci_restore_state(pdev
);
9963 netif_device_attach(dev
);
9965 spin_lock_irqsave(&np
->lock
, flags
);
9967 err
= niu_init_hw(np
);
9969 np
->timer
.expires
= jiffies
+ HZ
;
9970 add_timer(&np
->timer
);
9971 niu_netif_start(np
);
9974 spin_unlock_irqrestore(&np
->lock
, flags
);
9979 static struct pci_driver niu_pci_driver
= {
9980 .name
= DRV_MODULE_NAME
,
9981 .id_table
= niu_pci_tbl
,
9982 .probe
= niu_pci_init_one
,
9983 .remove
= __devexit_p(niu_pci_remove_one
),
9984 .suspend
= niu_suspend
,
9985 .resume
= niu_resume
,
9988 #ifdef CONFIG_SPARC64
9989 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
9990 u64
*dma_addr
, gfp_t flag
)
9992 unsigned long order
= get_order(size
);
9993 unsigned long page
= __get_free_pages(flag
, order
);
9997 memset((char *)page
, 0, PAGE_SIZE
<< order
);
9998 *dma_addr
= __pa(page
);
10000 return (void *) page
;
10003 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
10004 void *cpu_addr
, u64 handle
)
10006 unsigned long order
= get_order(size
);
10008 free_pages((unsigned long) cpu_addr
, order
);
10011 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
10012 unsigned long offset
, size_t size
,
10013 enum dma_data_direction direction
)
10015 return page_to_phys(page
) + offset
;
10018 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
10019 size_t size
, enum dma_data_direction direction
)
10021 /* Nothing to do. */
10024 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
10026 enum dma_data_direction direction
)
10028 return __pa(cpu_addr
);
10031 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
10033 enum dma_data_direction direction
)
10035 /* Nothing to do. */
10038 static const struct niu_ops niu_phys_ops
= {
10039 .alloc_coherent
= niu_phys_alloc_coherent
,
10040 .free_coherent
= niu_phys_free_coherent
,
10041 .map_page
= niu_phys_map_page
,
10042 .unmap_page
= niu_phys_unmap_page
,
10043 .map_single
= niu_phys_map_single
,
10044 .unmap_single
= niu_phys_unmap_single
,
10047 static int __devinit
niu_of_probe(struct of_device
*op
,
10048 const struct of_device_id
*match
)
10050 union niu_parent_id parent_id
;
10051 struct net_device
*dev
;
10056 niu_driver_version();
10058 reg
= of_get_property(op
->node
, "reg", NULL
);
10060 dev_err(&op
->dev
, "%s: No 'reg' property, aborting\n",
10061 op
->node
->full_name
);
10065 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
10066 &niu_phys_ops
, reg
[0] & 0x1);
10071 np
= netdev_priv(dev
);
10073 memset(&parent_id
, 0, sizeof(parent_id
));
10074 parent_id
.of
= of_get_parent(op
->node
);
10076 np
->parent
= niu_get_parent(np
, &parent_id
,
10080 goto err_out_free_dev
;
10083 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
10085 np
->regs
= of_ioremap(&op
->resource
[1], 0,
10086 resource_size(&op
->resource
[1]),
10089 dev_err(&op
->dev
, "Cannot map device registers, aborting\n");
10091 goto err_out_release_parent
;
10094 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
10095 resource_size(&op
->resource
[2]),
10097 if (!np
->vir_regs_1
) {
10098 dev_err(&op
->dev
, "Cannot map device vir registers 1, aborting\n");
10100 goto err_out_iounmap
;
10103 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
10104 resource_size(&op
->resource
[3]),
10106 if (!np
->vir_regs_2
) {
10107 dev_err(&op
->dev
, "Cannot map device vir registers 2, aborting\n");
10109 goto err_out_iounmap
;
10112 niu_assign_netdev_ops(dev
);
10114 err
= niu_get_invariants(np
);
10116 if (err
!= -ENODEV
)
10117 dev_err(&op
->dev
, "Problem fetching invariants of chip, aborting\n");
10118 goto err_out_iounmap
;
10121 err
= register_netdev(dev
);
10123 dev_err(&op
->dev
, "Cannot register net device, aborting\n");
10124 goto err_out_iounmap
;
10127 dev_set_drvdata(&op
->dev
, dev
);
10129 niu_device_announce(np
);
10134 if (np
->vir_regs_1
) {
10135 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10136 resource_size(&op
->resource
[2]));
10137 np
->vir_regs_1
= NULL
;
10140 if (np
->vir_regs_2
) {
10141 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10142 resource_size(&op
->resource
[3]));
10143 np
->vir_regs_2
= NULL
;
10147 of_iounmap(&op
->resource
[1], np
->regs
,
10148 resource_size(&op
->resource
[1]));
10152 err_out_release_parent
:
10153 niu_put_parent(np
);
10162 static int __devexit
niu_of_remove(struct of_device
*op
)
10164 struct net_device
*dev
= dev_get_drvdata(&op
->dev
);
10167 struct niu
*np
= netdev_priv(dev
);
10169 unregister_netdev(dev
);
10171 if (np
->vir_regs_1
) {
10172 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10173 resource_size(&op
->resource
[2]));
10174 np
->vir_regs_1
= NULL
;
10177 if (np
->vir_regs_2
) {
10178 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10179 resource_size(&op
->resource
[3]));
10180 np
->vir_regs_2
= NULL
;
10184 of_iounmap(&op
->resource
[1], np
->regs
,
10185 resource_size(&op
->resource
[1]));
10191 niu_put_parent(np
);
10194 dev_set_drvdata(&op
->dev
, NULL
);
10199 static const struct of_device_id niu_match
[] = {
10202 .compatible
= "SUNW,niusl",
10206 MODULE_DEVICE_TABLE(of
, niu_match
);
10208 static struct of_platform_driver niu_of_driver
= {
10210 .match_table
= niu_match
,
10211 .probe
= niu_of_probe
,
10212 .remove
= __devexit_p(niu_of_remove
),
10215 #endif /* CONFIG_SPARC64 */
10217 static int __init
niu_init(void)
10221 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
10223 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
10225 #ifdef CONFIG_SPARC64
10226 err
= of_register_driver(&niu_of_driver
, &of_bus_type
);
10230 err
= pci_register_driver(&niu_pci_driver
);
10231 #ifdef CONFIG_SPARC64
10233 of_unregister_driver(&niu_of_driver
);
10240 static void __exit
niu_exit(void)
10242 pci_unregister_driver(&niu_pci_driver
);
10243 #ifdef CONFIG_SPARC64
10244 of_unregister_driver(&niu_of_driver
);
10248 module_init(niu_init
);
10249 module_exit(niu_exit
);