2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/ethtool.h>
35 #include <linux/pci.h>
36 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/crc32.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/mii.h>
46 #define DRV_NAME "skge"
47 #define DRV_VERSION "1.5"
48 #define PFX DRV_NAME " "
50 #define DEFAULT_TX_RING_SIZE 128
51 #define DEFAULT_RX_RING_SIZE 512
52 #define MAX_TX_RING_SIZE 1024
53 #define MAX_RX_RING_SIZE 4096
54 #define RX_COPY_THRESHOLD 128
55 #define RX_BUF_SIZE 1536
56 #define PHY_RETRIES 1000
57 #define ETH_JUMBO_MTU 9000
58 #define TX_WATCHDOG (5 * HZ)
59 #define NAPI_WEIGHT 64
62 MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
63 MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
64 MODULE_LICENSE("GPL");
65 MODULE_VERSION(DRV_VERSION
);
67 static const u32 default_msg
68 = NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
69 | NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
;
71 static int debug
= -1; /* defaults above */
72 module_param(debug
, int, 0);
73 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
75 static const struct pci_device_id skge_id_table
[] = {
76 { PCI_DEVICE(PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940
) },
77 { PCI_DEVICE(PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940B
) },
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_GE
) },
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_YU
) },
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, PCI_DEVICE_ID_DLINK_DGE510T
), },
81 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x4320) },
82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5005) }, /* Belkin */
83 { PCI_DEVICE(PCI_VENDOR_ID_CNET
, PCI_DEVICE_ID_CNET_GIGACARD
) },
84 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS
, PCI_DEVICE_ID_LINKSYS_EG1064
) },
85 { PCI_VENDOR_ID_LINKSYS
, 0x1032, PCI_ANY_ID
, 0x0015, },
88 MODULE_DEVICE_TABLE(pci
, skge_id_table
);
90 static int skge_up(struct net_device
*dev
);
91 static int skge_down(struct net_device
*dev
);
92 static void skge_phy_reset(struct skge_port
*skge
);
93 static void skge_tx_clean(struct skge_port
*skge
);
94 static int xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
95 static int gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
96 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
);
97 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
);
98 static void yukon_init(struct skge_hw
*hw
, int port
);
99 static void genesis_mac_init(struct skge_hw
*hw
, int port
);
100 static void genesis_link_up(struct skge_port
*skge
);
102 /* Avoid conditionals by using array */
103 static const int txqaddr
[] = { Q_XA1
, Q_XA2
};
104 static const int rxqaddr
[] = { Q_R1
, Q_R2
};
105 static const u32 rxirqmask
[] = { IS_R1_F
, IS_R2_F
};
106 static const u32 txirqmask
[] = { IS_XA1_F
, IS_XA2_F
};
108 static int skge_get_regs_len(struct net_device
*dev
)
114 * Returns copy of whole control register region
115 * Note: skip RAM address register because accessing it will
118 static void skge_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
121 const struct skge_port
*skge
= netdev_priv(dev
);
122 const void __iomem
*io
= skge
->hw
->regs
;
125 memset(p
, 0, regs
->len
);
126 memcpy_fromio(p
, io
, B3_RAM_ADDR
);
128 memcpy_fromio(p
+ B3_RI_WTO_R1
, io
+ B3_RI_WTO_R1
,
129 regs
->len
- B3_RI_WTO_R1
);
132 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
133 static int wol_supported(const struct skge_hw
*hw
)
135 return !((hw
->chip_id
== CHIP_ID_GENESIS
||
136 (hw
->chip_id
== CHIP_ID_YUKON
&& hw
->chip_rev
== 0)));
139 static void skge_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
141 struct skge_port
*skge
= netdev_priv(dev
);
143 wol
->supported
= wol_supported(skge
->hw
) ? WAKE_MAGIC
: 0;
144 wol
->wolopts
= skge
->wol
? WAKE_MAGIC
: 0;
147 static int skge_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
149 struct skge_port
*skge
= netdev_priv(dev
);
150 struct skge_hw
*hw
= skge
->hw
;
152 if (wol
->wolopts
!= WAKE_MAGIC
&& wol
->wolopts
!= 0)
155 if (wol
->wolopts
== WAKE_MAGIC
&& !wol_supported(hw
))
158 skge
->wol
= wol
->wolopts
== WAKE_MAGIC
;
161 memcpy_toio(hw
->regs
+ WOL_MAC_ADDR
, dev
->dev_addr
, ETH_ALEN
);
163 skge_write16(hw
, WOL_CTRL_STAT
,
164 WOL_CTL_ENA_PME_ON_MAGIC_PKT
|
165 WOL_CTL_ENA_MAGIC_PKT_UNIT
);
167 skge_write16(hw
, WOL_CTRL_STAT
, WOL_CTL_DEFAULT
);
172 /* Determine supported/advertised modes based on hardware.
173 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
175 static u32
skge_supported_modes(const struct skge_hw
*hw
)
180 supported
= SUPPORTED_10baseT_Half
181 | SUPPORTED_10baseT_Full
182 | SUPPORTED_100baseT_Half
183 | SUPPORTED_100baseT_Full
184 | SUPPORTED_1000baseT_Half
185 | SUPPORTED_1000baseT_Full
186 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
188 if (hw
->chip_id
== CHIP_ID_GENESIS
)
189 supported
&= ~(SUPPORTED_10baseT_Half
190 | SUPPORTED_10baseT_Full
191 | SUPPORTED_100baseT_Half
192 | SUPPORTED_100baseT_Full
);
194 else if (hw
->chip_id
== CHIP_ID_YUKON
)
195 supported
&= ~SUPPORTED_1000baseT_Half
;
197 supported
= SUPPORTED_1000baseT_Full
| SUPPORTED_FIBRE
203 static int skge_get_settings(struct net_device
*dev
,
204 struct ethtool_cmd
*ecmd
)
206 struct skge_port
*skge
= netdev_priv(dev
);
207 struct skge_hw
*hw
= skge
->hw
;
209 ecmd
->transceiver
= XCVR_INTERNAL
;
210 ecmd
->supported
= skge_supported_modes(hw
);
213 ecmd
->port
= PORT_TP
;
214 ecmd
->phy_address
= hw
->phy_addr
;
216 ecmd
->port
= PORT_FIBRE
;
218 ecmd
->advertising
= skge
->advertising
;
219 ecmd
->autoneg
= skge
->autoneg
;
220 ecmd
->speed
= skge
->speed
;
221 ecmd
->duplex
= skge
->duplex
;
225 static int skge_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
227 struct skge_port
*skge
= netdev_priv(dev
);
228 const struct skge_hw
*hw
= skge
->hw
;
229 u32 supported
= skge_supported_modes(hw
);
231 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
232 ecmd
->advertising
= supported
;
238 switch (ecmd
->speed
) {
240 if (ecmd
->duplex
== DUPLEX_FULL
)
241 setting
= SUPPORTED_1000baseT_Full
;
242 else if (ecmd
->duplex
== DUPLEX_HALF
)
243 setting
= SUPPORTED_1000baseT_Half
;
248 if (ecmd
->duplex
== DUPLEX_FULL
)
249 setting
= SUPPORTED_100baseT_Full
;
250 else if (ecmd
->duplex
== DUPLEX_HALF
)
251 setting
= SUPPORTED_100baseT_Half
;
257 if (ecmd
->duplex
== DUPLEX_FULL
)
258 setting
= SUPPORTED_10baseT_Full
;
259 else if (ecmd
->duplex
== DUPLEX_HALF
)
260 setting
= SUPPORTED_10baseT_Half
;
268 if ((setting
& supported
) == 0)
271 skge
->speed
= ecmd
->speed
;
272 skge
->duplex
= ecmd
->duplex
;
275 skge
->autoneg
= ecmd
->autoneg
;
276 skge
->advertising
= ecmd
->advertising
;
278 if (netif_running(dev
))
279 skge_phy_reset(skge
);
284 static void skge_get_drvinfo(struct net_device
*dev
,
285 struct ethtool_drvinfo
*info
)
287 struct skge_port
*skge
= netdev_priv(dev
);
289 strcpy(info
->driver
, DRV_NAME
);
290 strcpy(info
->version
, DRV_VERSION
);
291 strcpy(info
->fw_version
, "N/A");
292 strcpy(info
->bus_info
, pci_name(skge
->hw
->pdev
));
295 static const struct skge_stat
{
296 char name
[ETH_GSTRING_LEN
];
300 { "tx_bytes", XM_TXO_OK_HI
, GM_TXO_OK_HI
},
301 { "rx_bytes", XM_RXO_OK_HI
, GM_RXO_OK_HI
},
303 { "tx_broadcast", XM_TXF_BC_OK
, GM_TXF_BC_OK
},
304 { "rx_broadcast", XM_RXF_BC_OK
, GM_RXF_BC_OK
},
305 { "tx_multicast", XM_TXF_MC_OK
, GM_TXF_MC_OK
},
306 { "rx_multicast", XM_RXF_MC_OK
, GM_RXF_MC_OK
},
307 { "tx_unicast", XM_TXF_UC_OK
, GM_TXF_UC_OK
},
308 { "rx_unicast", XM_RXF_UC_OK
, GM_RXF_UC_OK
},
309 { "tx_mac_pause", XM_TXF_MPAUSE
, GM_TXF_MPAUSE
},
310 { "rx_mac_pause", XM_RXF_MPAUSE
, GM_RXF_MPAUSE
},
312 { "collisions", XM_TXF_SNG_COL
, GM_TXF_SNG_COL
},
313 { "multi_collisions", XM_TXF_MUL_COL
, GM_TXF_MUL_COL
},
314 { "aborted", XM_TXF_ABO_COL
, GM_TXF_ABO_COL
},
315 { "late_collision", XM_TXF_LAT_COL
, GM_TXF_LAT_COL
},
316 { "fifo_underrun", XM_TXE_FIFO_UR
, GM_TXE_FIFO_UR
},
317 { "fifo_overflow", XM_RXE_FIFO_OV
, GM_RXE_FIFO_OV
},
319 { "rx_toolong", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
320 { "rx_jabber", XM_RXF_JAB_PKT
, GM_RXF_JAB_PKT
},
321 { "rx_runt", XM_RXE_RUNT
, GM_RXE_FRAG
},
322 { "rx_too_long", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
323 { "rx_fcs_error", XM_RXF_FCS_ERR
, GM_RXF_FCS_ERR
},
326 static int skge_get_stats_count(struct net_device
*dev
)
328 return ARRAY_SIZE(skge_stats
);
331 static void skge_get_ethtool_stats(struct net_device
*dev
,
332 struct ethtool_stats
*stats
, u64
*data
)
334 struct skge_port
*skge
= netdev_priv(dev
);
336 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
337 genesis_get_stats(skge
, data
);
339 yukon_get_stats(skge
, data
);
342 /* Use hardware MIB variables for critical path statistics and
343 * transmit feedback not reported at interrupt.
344 * Other errors are accounted for in interrupt handler.
346 static struct net_device_stats
*skge_get_stats(struct net_device
*dev
)
348 struct skge_port
*skge
= netdev_priv(dev
);
349 u64 data
[ARRAY_SIZE(skge_stats
)];
351 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
352 genesis_get_stats(skge
, data
);
354 yukon_get_stats(skge
, data
);
356 skge
->net_stats
.tx_bytes
= data
[0];
357 skge
->net_stats
.rx_bytes
= data
[1];
358 skge
->net_stats
.tx_packets
= data
[2] + data
[4] + data
[6];
359 skge
->net_stats
.rx_packets
= data
[3] + data
[5] + data
[7];
360 skge
->net_stats
.multicast
= data
[3] + data
[5];
361 skge
->net_stats
.collisions
= data
[10];
362 skge
->net_stats
.tx_aborted_errors
= data
[12];
364 return &skge
->net_stats
;
367 static void skge_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
373 for (i
= 0; i
< ARRAY_SIZE(skge_stats
); i
++)
374 memcpy(data
+ i
* ETH_GSTRING_LEN
,
375 skge_stats
[i
].name
, ETH_GSTRING_LEN
);
380 static void skge_get_ring_param(struct net_device
*dev
,
381 struct ethtool_ringparam
*p
)
383 struct skge_port
*skge
= netdev_priv(dev
);
385 p
->rx_max_pending
= MAX_RX_RING_SIZE
;
386 p
->tx_max_pending
= MAX_TX_RING_SIZE
;
387 p
->rx_mini_max_pending
= 0;
388 p
->rx_jumbo_max_pending
= 0;
390 p
->rx_pending
= skge
->rx_ring
.count
;
391 p
->tx_pending
= skge
->tx_ring
.count
;
392 p
->rx_mini_pending
= 0;
393 p
->rx_jumbo_pending
= 0;
396 static int skge_set_ring_param(struct net_device
*dev
,
397 struct ethtool_ringparam
*p
)
399 struct skge_port
*skge
= netdev_priv(dev
);
402 if (p
->rx_pending
== 0 || p
->rx_pending
> MAX_RX_RING_SIZE
||
403 p
->tx_pending
== 0 || p
->tx_pending
> MAX_TX_RING_SIZE
)
406 skge
->rx_ring
.count
= p
->rx_pending
;
407 skge
->tx_ring
.count
= p
->tx_pending
;
409 if (netif_running(dev
)) {
419 static u32
skge_get_msglevel(struct net_device
*netdev
)
421 struct skge_port
*skge
= netdev_priv(netdev
);
422 return skge
->msg_enable
;
425 static void skge_set_msglevel(struct net_device
*netdev
, u32 value
)
427 struct skge_port
*skge
= netdev_priv(netdev
);
428 skge
->msg_enable
= value
;
431 static int skge_nway_reset(struct net_device
*dev
)
433 struct skge_port
*skge
= netdev_priv(dev
);
435 if (skge
->autoneg
!= AUTONEG_ENABLE
|| !netif_running(dev
))
438 skge_phy_reset(skge
);
442 static int skge_set_sg(struct net_device
*dev
, u32 data
)
444 struct skge_port
*skge
= netdev_priv(dev
);
445 struct skge_hw
*hw
= skge
->hw
;
447 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
449 return ethtool_op_set_sg(dev
, data
);
452 static int skge_set_tx_csum(struct net_device
*dev
, u32 data
)
454 struct skge_port
*skge
= netdev_priv(dev
);
455 struct skge_hw
*hw
= skge
->hw
;
457 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
460 return ethtool_op_set_tx_csum(dev
, data
);
463 static u32
skge_get_rx_csum(struct net_device
*dev
)
465 struct skge_port
*skge
= netdev_priv(dev
);
467 return skge
->rx_csum
;
470 /* Only Yukon supports checksum offload. */
471 static int skge_set_rx_csum(struct net_device
*dev
, u32 data
)
473 struct skge_port
*skge
= netdev_priv(dev
);
475 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
478 skge
->rx_csum
= data
;
482 static void skge_get_pauseparam(struct net_device
*dev
,
483 struct ethtool_pauseparam
*ecmd
)
485 struct skge_port
*skge
= netdev_priv(dev
);
487 ecmd
->tx_pause
= (skge
->flow_control
== FLOW_MODE_LOC_SEND
)
488 || (skge
->flow_control
== FLOW_MODE_SYMMETRIC
);
489 ecmd
->rx_pause
= (skge
->flow_control
== FLOW_MODE_REM_SEND
)
490 || (skge
->flow_control
== FLOW_MODE_SYMMETRIC
);
492 ecmd
->autoneg
= skge
->autoneg
;
495 static int skge_set_pauseparam(struct net_device
*dev
,
496 struct ethtool_pauseparam
*ecmd
)
498 struct skge_port
*skge
= netdev_priv(dev
);
500 skge
->autoneg
= ecmd
->autoneg
;
501 if (ecmd
->rx_pause
&& ecmd
->tx_pause
)
502 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
503 else if (ecmd
->rx_pause
&& !ecmd
->tx_pause
)
504 skge
->flow_control
= FLOW_MODE_REM_SEND
;
505 else if (!ecmd
->rx_pause
&& ecmd
->tx_pause
)
506 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
508 skge
->flow_control
= FLOW_MODE_NONE
;
510 if (netif_running(dev
))
511 skge_phy_reset(skge
);
515 /* Chip internal frequency for clock calculations */
516 static inline u32
hwkhz(const struct skge_hw
*hw
)
518 if (hw
->chip_id
== CHIP_ID_GENESIS
)
519 return 53215; /* or: 53.125 MHz */
521 return 78215; /* or: 78.125 MHz */
524 /* Chip HZ to microseconds */
525 static inline u32
skge_clk2usec(const struct skge_hw
*hw
, u32 ticks
)
527 return (ticks
* 1000) / hwkhz(hw
);
530 /* Microseconds to chip HZ */
531 static inline u32
skge_usecs2clk(const struct skge_hw
*hw
, u32 usec
)
533 return hwkhz(hw
) * usec
/ 1000;
536 static int skge_get_coalesce(struct net_device
*dev
,
537 struct ethtool_coalesce
*ecmd
)
539 struct skge_port
*skge
= netdev_priv(dev
);
540 struct skge_hw
*hw
= skge
->hw
;
541 int port
= skge
->port
;
543 ecmd
->rx_coalesce_usecs
= 0;
544 ecmd
->tx_coalesce_usecs
= 0;
546 if (skge_read32(hw
, B2_IRQM_CTRL
) & TIM_START
) {
547 u32 delay
= skge_clk2usec(hw
, skge_read32(hw
, B2_IRQM_INI
));
548 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
550 if (msk
& rxirqmask
[port
])
551 ecmd
->rx_coalesce_usecs
= delay
;
552 if (msk
& txirqmask
[port
])
553 ecmd
->tx_coalesce_usecs
= delay
;
559 /* Note: interrupt timer is per board, but can turn on/off per port */
560 static int skge_set_coalesce(struct net_device
*dev
,
561 struct ethtool_coalesce
*ecmd
)
563 struct skge_port
*skge
= netdev_priv(dev
);
564 struct skge_hw
*hw
= skge
->hw
;
565 int port
= skge
->port
;
566 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
569 if (ecmd
->rx_coalesce_usecs
== 0)
570 msk
&= ~rxirqmask
[port
];
571 else if (ecmd
->rx_coalesce_usecs
< 25 ||
572 ecmd
->rx_coalesce_usecs
> 33333)
575 msk
|= rxirqmask
[port
];
576 delay
= ecmd
->rx_coalesce_usecs
;
579 if (ecmd
->tx_coalesce_usecs
== 0)
580 msk
&= ~txirqmask
[port
];
581 else if (ecmd
->tx_coalesce_usecs
< 25 ||
582 ecmd
->tx_coalesce_usecs
> 33333)
585 msk
|= txirqmask
[port
];
586 delay
= min(delay
, ecmd
->rx_coalesce_usecs
);
589 skge_write32(hw
, B2_IRQM_MSK
, msk
);
591 skge_write32(hw
, B2_IRQM_CTRL
, TIM_STOP
);
593 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, delay
));
594 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
599 enum led_mode
{ LED_MODE_OFF
, LED_MODE_ON
, LED_MODE_TST
};
600 static void skge_led(struct skge_port
*skge
, enum led_mode mode
)
602 struct skge_hw
*hw
= skge
->hw
;
603 int port
= skge
->port
;
605 spin_lock_bh(&hw
->phy_lock
);
606 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
609 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, PHY_B_PEC_LED_OFF
);
610 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_OFF
);
611 skge_write32(hw
, SK_REG(port
, RX_LED_VAL
), 0);
612 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_T_OFF
);
616 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_ON
);
617 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_LINKSYNC_ON
);
619 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_START
);
620 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_START
);
625 skge_write8(hw
, SK_REG(port
, RX_LED_TST
), LED_T_ON
);
626 skge_write32(hw
, SK_REG(port
, RX_LED_VAL
), 100);
627 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_START
);
629 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, PHY_B_PEC_LED_ON
);
635 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
636 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
637 PHY_M_LED_MO_DUP(MO_LED_OFF
) |
638 PHY_M_LED_MO_10(MO_LED_OFF
) |
639 PHY_M_LED_MO_100(MO_LED_OFF
) |
640 PHY_M_LED_MO_1000(MO_LED_OFF
) |
641 PHY_M_LED_MO_RX(MO_LED_OFF
));
644 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
,
645 PHY_M_LED_PULS_DUR(PULS_170MS
) |
646 PHY_M_LED_BLINK_RT(BLINK_84MS
) |
650 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
651 PHY_M_LED_MO_RX(MO_LED_OFF
) |
652 (skge
->speed
== SPEED_100
?
653 PHY_M_LED_MO_100(MO_LED_ON
) : 0));
656 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
657 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
658 PHY_M_LED_MO_DUP(MO_LED_ON
) |
659 PHY_M_LED_MO_10(MO_LED_ON
) |
660 PHY_M_LED_MO_100(MO_LED_ON
) |
661 PHY_M_LED_MO_1000(MO_LED_ON
) |
662 PHY_M_LED_MO_RX(MO_LED_ON
));
665 spin_unlock_bh(&hw
->phy_lock
);
668 /* blink LED's for finding board */
669 static int skge_phys_id(struct net_device
*dev
, u32 data
)
671 struct skge_port
*skge
= netdev_priv(dev
);
673 enum led_mode mode
= LED_MODE_TST
;
675 if (!data
|| data
> (u32
)(MAX_SCHEDULE_TIMEOUT
/ HZ
))
676 ms
= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT
/ HZ
) * 1000;
681 skge_led(skge
, mode
);
682 mode
^= LED_MODE_TST
;
684 if (msleep_interruptible(BLINK_MS
))
689 /* back to regular LED state */
690 skge_led(skge
, netif_running(dev
) ? LED_MODE_ON
: LED_MODE_OFF
);
695 static struct ethtool_ops skge_ethtool_ops
= {
696 .get_settings
= skge_get_settings
,
697 .set_settings
= skge_set_settings
,
698 .get_drvinfo
= skge_get_drvinfo
,
699 .get_regs_len
= skge_get_regs_len
,
700 .get_regs
= skge_get_regs
,
701 .get_wol
= skge_get_wol
,
702 .set_wol
= skge_set_wol
,
703 .get_msglevel
= skge_get_msglevel
,
704 .set_msglevel
= skge_set_msglevel
,
705 .nway_reset
= skge_nway_reset
,
706 .get_link
= ethtool_op_get_link
,
707 .get_ringparam
= skge_get_ring_param
,
708 .set_ringparam
= skge_set_ring_param
,
709 .get_pauseparam
= skge_get_pauseparam
,
710 .set_pauseparam
= skge_set_pauseparam
,
711 .get_coalesce
= skge_get_coalesce
,
712 .set_coalesce
= skge_set_coalesce
,
713 .get_sg
= ethtool_op_get_sg
,
714 .set_sg
= skge_set_sg
,
715 .get_tx_csum
= ethtool_op_get_tx_csum
,
716 .set_tx_csum
= skge_set_tx_csum
,
717 .get_rx_csum
= skge_get_rx_csum
,
718 .set_rx_csum
= skge_set_rx_csum
,
719 .get_strings
= skge_get_strings
,
720 .phys_id
= skge_phys_id
,
721 .get_stats_count
= skge_get_stats_count
,
722 .get_ethtool_stats
= skge_get_ethtool_stats
,
723 .get_perm_addr
= ethtool_op_get_perm_addr
,
727 * Allocate ring elements and chain them together
728 * One-to-one association of board descriptors with ring elements
730 static int skge_ring_alloc(struct skge_ring
*ring
, void *vaddr
, u32 base
)
732 struct skge_tx_desc
*d
;
733 struct skge_element
*e
;
736 ring
->start
= kcalloc(sizeof(*e
), ring
->count
, GFP_KERNEL
);
740 for (i
= 0, e
= ring
->start
, d
= vaddr
; i
< ring
->count
; i
++, e
++, d
++) {
742 if (i
== ring
->count
- 1) {
743 e
->next
= ring
->start
;
744 d
->next_offset
= base
;
747 d
->next_offset
= base
+ (i
+1) * sizeof(*d
);
750 ring
->to_use
= ring
->to_clean
= ring
->start
;
755 /* Allocate and setup a new buffer for receiving */
756 static void skge_rx_setup(struct skge_port
*skge
, struct skge_element
*e
,
757 struct sk_buff
*skb
, unsigned int bufsize
)
759 struct skge_rx_desc
*rd
= e
->desc
;
762 map
= pci_map_single(skge
->hw
->pdev
, skb
->data
, bufsize
,
766 rd
->dma_hi
= map
>> 32;
768 rd
->csum1_start
= ETH_HLEN
;
769 rd
->csum2_start
= ETH_HLEN
;
775 rd
->control
= BMU_OWN
| BMU_STF
| BMU_IRQ_EOF
| BMU_TCP_CHECK
| bufsize
;
776 pci_unmap_addr_set(e
, mapaddr
, map
);
777 pci_unmap_len_set(e
, maplen
, bufsize
);
780 /* Resume receiving using existing skb,
781 * Note: DMA address is not changed by chip.
782 * MTU not changed while receiver active.
784 static inline void skge_rx_reuse(struct skge_element
*e
, unsigned int size
)
786 struct skge_rx_desc
*rd
= e
->desc
;
789 rd
->csum2_start
= ETH_HLEN
;
793 rd
->control
= BMU_OWN
| BMU_STF
| BMU_IRQ_EOF
| BMU_TCP_CHECK
| size
;
797 /* Free all buffers in receive ring, assumes receiver stopped */
798 static void skge_rx_clean(struct skge_port
*skge
)
800 struct skge_hw
*hw
= skge
->hw
;
801 struct skge_ring
*ring
= &skge
->rx_ring
;
802 struct skge_element
*e
;
806 struct skge_rx_desc
*rd
= e
->desc
;
809 pci_unmap_single(hw
->pdev
,
810 pci_unmap_addr(e
, mapaddr
),
811 pci_unmap_len(e
, maplen
),
813 dev_kfree_skb(e
->skb
);
816 } while ((e
= e
->next
) != ring
->start
);
820 /* Allocate buffers for receive ring
821 * For receive: to_clean is next received frame.
823 static int skge_rx_fill(struct skge_port
*skge
)
825 struct skge_ring
*ring
= &skge
->rx_ring
;
826 struct skge_element
*e
;
832 skb
= alloc_skb(skge
->rx_buf_size
+ NET_IP_ALIGN
, GFP_KERNEL
);
836 skb_reserve(skb
, NET_IP_ALIGN
);
837 skge_rx_setup(skge
, e
, skb
, skge
->rx_buf_size
);
838 } while ( (e
= e
->next
) != ring
->start
);
840 ring
->to_clean
= ring
->start
;
844 static void skge_link_up(struct skge_port
*skge
)
846 skge_write8(skge
->hw
, SK_REG(skge
->port
, LNK_LED_REG
),
847 LED_BLK_OFF
|LED_SYNC_OFF
|LED_ON
);
849 netif_carrier_on(skge
->netdev
);
850 netif_wake_queue(skge
->netdev
);
852 if (netif_msg_link(skge
))
854 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
855 skge
->netdev
->name
, skge
->speed
,
856 skge
->duplex
== DUPLEX_FULL
? "full" : "half",
857 (skge
->flow_control
== FLOW_MODE_NONE
) ? "none" :
858 (skge
->flow_control
== FLOW_MODE_LOC_SEND
) ? "tx only" :
859 (skge
->flow_control
== FLOW_MODE_REM_SEND
) ? "rx only" :
860 (skge
->flow_control
== FLOW_MODE_SYMMETRIC
) ? "tx and rx" :
864 static void skge_link_down(struct skge_port
*skge
)
866 skge_write8(skge
->hw
, SK_REG(skge
->port
, LNK_LED_REG
), LED_OFF
);
867 netif_carrier_off(skge
->netdev
);
868 netif_stop_queue(skge
->netdev
);
870 if (netif_msg_link(skge
))
871 printk(KERN_INFO PFX
"%s: Link is down.\n", skge
->netdev
->name
);
874 static int __xm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
, u16
*val
)
878 xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
879 *val
= xm_read16(hw
, port
, XM_PHY_DATA
);
881 for (i
= 0; i
< PHY_RETRIES
; i
++) {
882 if (xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_RDY
)
889 *val
= xm_read16(hw
, port
, XM_PHY_DATA
);
894 static u16
xm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
897 if (__xm_phy_read(hw
, port
, reg
, &v
))
898 printk(KERN_WARNING PFX
"%s: phy read timed out\n",
899 hw
->dev
[port
]->name
);
903 static int xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
907 xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
908 for (i
= 0; i
< PHY_RETRIES
; i
++) {
909 if (!(xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
916 xm_write16(hw
, port
, XM_PHY_DATA
, val
);
917 for (i
= 0; i
< PHY_RETRIES
; i
++) {
918 if (!(xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
925 static void genesis_init(struct skge_hw
*hw
)
927 /* set blink source counter */
928 skge_write32(hw
, B2_BSC_INI
, (SK_BLK_DUR
* SK_FACT_53
) / 100);
929 skge_write8(hw
, B2_BSC_CTRL
, BSC_START
);
931 /* configure mac arbiter */
932 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
934 /* configure mac arbiter timeout values */
935 skge_write8(hw
, B3_MA_TOINI_RX1
, SK_MAC_TO_53
);
936 skge_write8(hw
, B3_MA_TOINI_RX2
, SK_MAC_TO_53
);
937 skge_write8(hw
, B3_MA_TOINI_TX1
, SK_MAC_TO_53
);
938 skge_write8(hw
, B3_MA_TOINI_TX2
, SK_MAC_TO_53
);
940 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
941 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
942 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
943 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
945 /* configure packet arbiter timeout */
946 skge_write16(hw
, B3_PA_CTRL
, PA_RST_CLR
);
947 skge_write16(hw
, B3_PA_TOINI_RX1
, SK_PKT_TO_MAX
);
948 skge_write16(hw
, B3_PA_TOINI_TX1
, SK_PKT_TO_MAX
);
949 skge_write16(hw
, B3_PA_TOINI_RX2
, SK_PKT_TO_MAX
);
950 skge_write16(hw
, B3_PA_TOINI_TX2
, SK_PKT_TO_MAX
);
953 static void genesis_reset(struct skge_hw
*hw
, int port
)
955 const u8 zero
[8] = { 0 };
957 skge_write8(hw
, SK_REG(port
, GMAC_IRQ_MSK
), 0);
959 /* reset the statistics module */
960 xm_write32(hw
, port
, XM_GP_PORT
, XM_GP_RES_STAT
);
961 xm_write16(hw
, port
, XM_IMSK
, 0xffff); /* disable XMAC IRQs */
962 xm_write32(hw
, port
, XM_MODE
, 0); /* clear Mode Reg */
963 xm_write16(hw
, port
, XM_TX_CMD
, 0); /* reset TX CMD Reg */
964 xm_write16(hw
, port
, XM_RX_CMD
, 0); /* reset RX CMD Reg */
966 /* disable Broadcom PHY IRQ */
967 xm_write16(hw
, port
, PHY_BCOM_INT_MASK
, 0xffff);
969 xm_outhash(hw
, port
, XM_HSM
, zero
);
973 /* Convert mode to MII values */
974 static const u16 phy_pause_map
[] = {
975 [FLOW_MODE_NONE
] = 0,
976 [FLOW_MODE_LOC_SEND
] = PHY_AN_PAUSE_ASYM
,
977 [FLOW_MODE_SYMMETRIC
] = PHY_AN_PAUSE_CAP
,
978 [FLOW_MODE_REM_SEND
] = PHY_AN_PAUSE_CAP
| PHY_AN_PAUSE_ASYM
,
982 /* Check status of Broadcom phy link */
983 static void bcom_check_link(struct skge_hw
*hw
, int port
)
985 struct net_device
*dev
= hw
->dev
[port
];
986 struct skge_port
*skge
= netdev_priv(dev
);
989 /* read twice because of latch */
990 (void) xm_phy_read(hw
, port
, PHY_BCOM_STAT
);
991 status
= xm_phy_read(hw
, port
, PHY_BCOM_STAT
);
993 if ((status
& PHY_ST_LSYNC
) == 0) {
994 u16 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
995 cmd
&= ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
);
996 xm_write16(hw
, port
, XM_MMU_CMD
, cmd
);
997 /* dummy read to ensure writing */
998 (void) xm_read16(hw
, port
, XM_MMU_CMD
);
1000 if (netif_carrier_ok(dev
))
1001 skge_link_down(skge
);
1003 if (skge
->autoneg
== AUTONEG_ENABLE
&&
1004 (status
& PHY_ST_AN_OVER
)) {
1005 u16 lpa
= xm_phy_read(hw
, port
, PHY_BCOM_AUNE_LP
);
1006 u16 aux
= xm_phy_read(hw
, port
, PHY_BCOM_AUX_STAT
);
1008 if (lpa
& PHY_B_AN_RF
) {
1009 printk(KERN_NOTICE PFX
"%s: remote fault\n",
1014 /* Check Duplex mismatch */
1015 switch (aux
& PHY_B_AS_AN_RES_MSK
) {
1016 case PHY_B_RES_1000FD
:
1017 skge
->duplex
= DUPLEX_FULL
;
1019 case PHY_B_RES_1000HD
:
1020 skge
->duplex
= DUPLEX_HALF
;
1023 printk(KERN_NOTICE PFX
"%s: duplex mismatch\n",
1029 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1030 switch (aux
& PHY_B_AS_PAUSE_MSK
) {
1031 case PHY_B_AS_PAUSE_MSK
:
1032 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
1035 skge
->flow_control
= FLOW_MODE_REM_SEND
;
1038 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
1041 skge
->flow_control
= FLOW_MODE_NONE
;
1044 skge
->speed
= SPEED_1000
;
1047 if (!netif_carrier_ok(dev
))
1048 genesis_link_up(skge
);
1052 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1053 * Phy on for 100 or 10Mbit operation
1055 static void bcom_phy_init(struct skge_port
*skge
, int jumbo
)
1057 struct skge_hw
*hw
= skge
->hw
;
1058 int port
= skge
->port
;
1060 u16 id1
, r
, ext
, ctl
;
1062 /* magic workaround patterns for Broadcom */
1063 static const struct {
1067 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1068 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1069 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1070 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1072 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1073 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1076 /* read Id from external PHY (all have the same address) */
1077 id1
= xm_phy_read(hw
, port
, PHY_XMAC_ID1
);
1079 /* Optimize MDIO transfer by suppressing preamble. */
1080 r
= xm_read16(hw
, port
, XM_MMU_CMD
);
1082 xm_write16(hw
, port
, XM_MMU_CMD
,r
);
1085 case PHY_BCOM_ID1_C0
:
1087 * Workaround BCOM Errata for the C0 type.
1088 * Write magic patterns to reserved registers.
1090 for (i
= 0; i
< ARRAY_SIZE(C0hack
); i
++)
1091 xm_phy_write(hw
, port
,
1092 C0hack
[i
].reg
, C0hack
[i
].val
);
1095 case PHY_BCOM_ID1_A1
:
1097 * Workaround BCOM Errata for the A1 type.
1098 * Write magic patterns to reserved registers.
1100 for (i
= 0; i
< ARRAY_SIZE(A1hack
); i
++)
1101 xm_phy_write(hw
, port
,
1102 A1hack
[i
].reg
, A1hack
[i
].val
);
1107 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1108 * Disable Power Management after reset.
1110 r
= xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
);
1111 r
|= PHY_B_AC_DIS_PM
;
1112 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
, r
);
1115 xm_read16(hw
, port
, XM_ISRC
);
1117 ext
= PHY_B_PEC_EN_LTR
; /* enable tx led */
1118 ctl
= PHY_CT_SP1000
; /* always 1000mbit */
1120 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1122 * Workaround BCOM Errata #1 for the C5 type.
1123 * 1000Base-T Link Acquisition Failure in Slave Mode
1124 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1126 u16 adv
= PHY_B_1000C_RD
;
1127 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1128 adv
|= PHY_B_1000C_AHD
;
1129 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1130 adv
|= PHY_B_1000C_AFD
;
1131 xm_phy_write(hw
, port
, PHY_BCOM_1000T_CTRL
, adv
);
1133 ctl
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1135 if (skge
->duplex
== DUPLEX_FULL
)
1136 ctl
|= PHY_CT_DUP_MD
;
1137 /* Force to slave */
1138 xm_phy_write(hw
, port
, PHY_BCOM_1000T_CTRL
, PHY_B_1000C_MSE
);
1141 /* Set autonegotiation pause parameters */
1142 xm_phy_write(hw
, port
, PHY_BCOM_AUNE_ADV
,
1143 phy_pause_map
[skge
->flow_control
] | PHY_AN_CSMA
);
1145 /* Handle Jumbo frames */
1147 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
,
1148 PHY_B_AC_TX_TST
| PHY_B_AC_LONG_PACK
);
1150 ext
|= PHY_B_PEC_HIGH_LA
;
1154 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, ext
);
1155 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
, ctl
);
1157 /* Use link status change interrupt */
1158 xm_phy_write(hw
, port
, PHY_BCOM_INT_MASK
, PHY_B_DEF_MSK
);
1160 bcom_check_link(hw
, port
);
1163 static void genesis_mac_init(struct skge_hw
*hw
, int port
)
1165 struct net_device
*dev
= hw
->dev
[port
];
1166 struct skge_port
*skge
= netdev_priv(dev
);
1167 int jumbo
= hw
->dev
[port
]->mtu
> ETH_DATA_LEN
;
1170 const u8 zero
[6] = { 0 };
1172 for (i
= 0; i
< 10; i
++) {
1173 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
),
1175 if (skge_read16(hw
, SK_REG(port
, TX_MFF_CTRL1
)) & MFF_SET_MAC_RST
)
1180 printk(KERN_WARNING PFX
"%s: genesis reset failed\n", dev
->name
);
1183 /* Unreset the XMAC. */
1184 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_CLR_MAC_RST
);
1187 * Perform additional initialization for external PHYs,
1188 * namely for the 1000baseTX cards that use the XMAC's
1191 /* Take external Phy out of reset */
1192 r
= skge_read32(hw
, B2_GP_IO
);
1194 r
|= GP_DIR_0
|GP_IO_0
;
1196 r
|= GP_DIR_2
|GP_IO_2
;
1198 skge_write32(hw
, B2_GP_IO
, r
);
1201 /* Enable GMII interface */
1202 xm_write16(hw
, port
, XM_HW_CFG
, XM_HW_GMII_MD
);
1204 bcom_phy_init(skge
, jumbo
);
1206 /* Set Station Address */
1207 xm_outaddr(hw
, port
, XM_SA
, dev
->dev_addr
);
1209 /* We don't use match addresses so clear */
1210 for (i
= 1; i
< 16; i
++)
1211 xm_outaddr(hw
, port
, XM_EXM(i
), zero
);
1213 /* Clear MIB counters */
1214 xm_write16(hw
, port
, XM_STAT_CMD
,
1215 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1216 /* Clear two times according to Errata #3 */
1217 xm_write16(hw
, port
, XM_STAT_CMD
,
1218 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1220 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1221 xm_write16(hw
, port
, XM_RX_HI_WM
, 1450);
1223 /* We don't need the FCS appended to the packet. */
1224 r
= XM_RX_LENERR_OK
| XM_RX_STRIP_FCS
;
1226 r
|= XM_RX_BIG_PK_OK
;
1228 if (skge
->duplex
== DUPLEX_HALF
) {
1230 * If in manual half duplex mode the other side might be in
1231 * full duplex mode, so ignore if a carrier extension is not seen
1232 * on frames received
1234 r
|= XM_RX_DIS_CEXT
;
1236 xm_write16(hw
, port
, XM_RX_CMD
, r
);
1239 /* We want short frames padded to 60 bytes. */
1240 xm_write16(hw
, port
, XM_TX_CMD
, XM_TX_AUTO_PAD
);
1243 * Bump up the transmit threshold. This helps hold off transmit
1244 * underruns when we're blasting traffic from both ports at once.
1246 xm_write16(hw
, port
, XM_TX_THR
, 512);
1249 * Enable the reception of all error frames. This is is
1250 * a necessary evil due to the design of the XMAC. The
1251 * XMAC's receive FIFO is only 8K in size, however jumbo
1252 * frames can be up to 9000 bytes in length. When bad
1253 * frame filtering is enabled, the XMAC's RX FIFO operates
1254 * in 'store and forward' mode. For this to work, the
1255 * entire frame has to fit into the FIFO, but that means
1256 * that jumbo frames larger than 8192 bytes will be
1257 * truncated. Disabling all bad frame filtering causes
1258 * the RX FIFO to operate in streaming mode, in which
1259 * case the XMAC will start transferring frames out of the
1260 * RX FIFO as soon as the FIFO threshold is reached.
1262 xm_write32(hw
, port
, XM_MODE
, XM_DEF_MODE
);
1266 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1267 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1268 * and 'Octets Rx OK Hi Cnt Ov'.
1270 xm_write32(hw
, port
, XM_RX_EV_MSK
, XMR_DEF_MSK
);
1273 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1274 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1275 * and 'Octets Tx OK Hi Cnt Ov'.
1277 xm_write32(hw
, port
, XM_TX_EV_MSK
, XMT_DEF_MSK
);
1279 /* Configure MAC arbiter */
1280 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
1282 /* configure timeout values */
1283 skge_write8(hw
, B3_MA_TOINI_RX1
, 72);
1284 skge_write8(hw
, B3_MA_TOINI_RX2
, 72);
1285 skge_write8(hw
, B3_MA_TOINI_TX1
, 72);
1286 skge_write8(hw
, B3_MA_TOINI_TX2
, 72);
1288 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
1289 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
1290 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
1291 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
1293 /* Configure Rx MAC FIFO */
1294 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_RST_CLR
);
1295 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_TIM_PAT
);
1296 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1298 /* Configure Tx MAC FIFO */
1299 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_RST_CLR
);
1300 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_TX_CTRL_DEF
);
1301 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1304 /* Enable frame flushing if jumbo frames used */
1305 skge_write16(hw
, SK_REG(port
,RX_MFF_CTRL1
), MFF_ENA_FLUSH
);
1307 /* enable timeout timers if normal frames */
1308 skge_write16(hw
, B3_PA_CTRL
,
1309 (port
== 0) ? PA_ENA_TO_TX1
: PA_ENA_TO_TX2
);
1313 static void genesis_stop(struct skge_port
*skge
)
1315 struct skge_hw
*hw
= skge
->hw
;
1316 int port
= skge
->port
;
1319 genesis_reset(hw
, port
);
1321 /* Clear Tx packet arbiter timeout IRQ */
1322 skge_write16(hw
, B3_PA_CTRL
,
1323 port
== 0 ? PA_CLR_TO_TX1
: PA_CLR_TO_TX2
);
1326 * If the transfer sticks at the MAC the STOP command will not
1327 * terminate if we don't flush the XMAC's transmit FIFO !
1329 xm_write32(hw
, port
, XM_MODE
,
1330 xm_read32(hw
, port
, XM_MODE
)|XM_MD_FTF
);
1334 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_SET_MAC_RST
);
1336 /* For external PHYs there must be special handling */
1337 reg
= skge_read32(hw
, B2_GP_IO
);
1345 skge_write32(hw
, B2_GP_IO
, reg
);
1346 skge_read32(hw
, B2_GP_IO
);
1348 xm_write16(hw
, port
, XM_MMU_CMD
,
1349 xm_read16(hw
, port
, XM_MMU_CMD
)
1350 & ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
));
1352 xm_read16(hw
, port
, XM_MMU_CMD
);
1356 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
)
1358 struct skge_hw
*hw
= skge
->hw
;
1359 int port
= skge
->port
;
1361 unsigned long timeout
= jiffies
+ HZ
;
1363 xm_write16(hw
, port
,
1364 XM_STAT_CMD
, XM_SC_SNP_TXC
| XM_SC_SNP_RXC
);
1366 /* wait for update to complete */
1367 while (xm_read16(hw
, port
, XM_STAT_CMD
)
1368 & (XM_SC_SNP_TXC
| XM_SC_SNP_RXC
)) {
1369 if (time_after(jiffies
, timeout
))
1374 /* special case for 64 bit octet counter */
1375 data
[0] = (u64
) xm_read32(hw
, port
, XM_TXO_OK_HI
) << 32
1376 | xm_read32(hw
, port
, XM_TXO_OK_LO
);
1377 data
[1] = (u64
) xm_read32(hw
, port
, XM_RXO_OK_HI
) << 32
1378 | xm_read32(hw
, port
, XM_RXO_OK_LO
);
1380 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1381 data
[i
] = xm_read32(hw
, port
, skge_stats
[i
].xmac_offset
);
1384 static void genesis_mac_intr(struct skge_hw
*hw
, int port
)
1386 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1387 u16 status
= xm_read16(hw
, port
, XM_ISRC
);
1389 if (netif_msg_intr(skge
))
1390 printk(KERN_DEBUG PFX
"%s: mac interrupt status 0x%x\n",
1391 skge
->netdev
->name
, status
);
1393 if (status
& XM_IS_TXF_UR
) {
1394 xm_write32(hw
, port
, XM_MODE
, XM_MD_FTF
);
1395 ++skge
->net_stats
.tx_fifo_errors
;
1397 if (status
& XM_IS_RXF_OV
) {
1398 xm_write32(hw
, port
, XM_MODE
, XM_MD_FRF
);
1399 ++skge
->net_stats
.rx_fifo_errors
;
1403 static void genesis_link_up(struct skge_port
*skge
)
1405 struct skge_hw
*hw
= skge
->hw
;
1406 int port
= skge
->port
;
1410 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1413 * enabling pause frame reception is required for 1000BT
1414 * because the XMAC is not reset if the link is going down
1416 if (skge
->flow_control
== FLOW_MODE_NONE
||
1417 skge
->flow_control
== FLOW_MODE_LOC_SEND
)
1418 /* Disable Pause Frame Reception */
1419 cmd
|= XM_MMU_IGN_PF
;
1421 /* Enable Pause Frame Reception */
1422 cmd
&= ~XM_MMU_IGN_PF
;
1424 xm_write16(hw
, port
, XM_MMU_CMD
, cmd
);
1426 mode
= xm_read32(hw
, port
, XM_MODE
);
1427 if (skge
->flow_control
== FLOW_MODE_SYMMETRIC
||
1428 skge
->flow_control
== FLOW_MODE_LOC_SEND
) {
1430 * Configure Pause Frame Generation
1431 * Use internal and external Pause Frame Generation.
1432 * Sending pause frames is edge triggered.
1433 * Send a Pause frame with the maximum pause time if
1434 * internal oder external FIFO full condition occurs.
1435 * Send a zero pause time frame to re-start transmission.
1437 /* XM_PAUSE_DA = '010000C28001' (default) */
1438 /* XM_MAC_PTIME = 0xffff (maximum) */
1439 /* remember this value is defined in big endian (!) */
1440 xm_write16(hw
, port
, XM_MAC_PTIME
, 0xffff);
1442 mode
|= XM_PAUSE_MODE
;
1443 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_PAUSE
);
1446 * disable pause frame generation is required for 1000BT
1447 * because the XMAC is not reset if the link is going down
1449 /* Disable Pause Mode in Mode Register */
1450 mode
&= ~XM_PAUSE_MODE
;
1452 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_DIS_PAUSE
);
1455 xm_write32(hw
, port
, XM_MODE
, mode
);
1458 /* disable GP0 interrupt bit for external Phy */
1459 msk
|= XM_IS_INP_ASS
;
1461 xm_write16(hw
, port
, XM_IMSK
, msk
);
1462 xm_read16(hw
, port
, XM_ISRC
);
1464 /* get MMU Command Reg. */
1465 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1466 if (skge
->duplex
== DUPLEX_FULL
)
1467 cmd
|= XM_MMU_GMII_FD
;
1470 * Workaround BCOM Errata (#10523) for all BCom Phys
1471 * Enable Power Management after link up
1473 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
,
1474 xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
)
1475 & ~PHY_B_AC_DIS_PM
);
1476 xm_phy_write(hw
, port
, PHY_BCOM_INT_MASK
, PHY_B_DEF_MSK
);
1479 xm_write16(hw
, port
, XM_MMU_CMD
,
1480 cmd
| XM_MMU_ENA_RX
| XM_MMU_ENA_TX
);
1485 static inline void bcom_phy_intr(struct skge_port
*skge
)
1487 struct skge_hw
*hw
= skge
->hw
;
1488 int port
= skge
->port
;
1491 isrc
= xm_phy_read(hw
, port
, PHY_BCOM_INT_STAT
);
1492 if (netif_msg_intr(skge
))
1493 printk(KERN_DEBUG PFX
"%s: phy interrupt status 0x%x\n",
1494 skge
->netdev
->name
, isrc
);
1496 if (isrc
& PHY_B_IS_PSE
)
1497 printk(KERN_ERR PFX
"%s: uncorrectable pair swap error\n",
1498 hw
->dev
[port
]->name
);
1500 /* Workaround BCom Errata:
1501 * enable and disable loopback mode if "NO HCD" occurs.
1503 if (isrc
& PHY_B_IS_NO_HDCL
) {
1504 u16 ctrl
= xm_phy_read(hw
, port
, PHY_BCOM_CTRL
);
1505 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1506 ctrl
| PHY_CT_LOOP
);
1507 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1508 ctrl
& ~PHY_CT_LOOP
);
1511 if (isrc
& (PHY_B_IS_AN_PR
| PHY_B_IS_LST_CHANGE
))
1512 bcom_check_link(hw
, port
);
1516 static int gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
1520 gma_write16(hw
, port
, GM_SMI_DATA
, val
);
1521 gma_write16(hw
, port
, GM_SMI_CTRL
,
1522 GM_SMI_CT_PHY_AD(hw
->phy_addr
) | GM_SMI_CT_REG_AD(reg
));
1523 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1526 if (!(gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_BUSY
))
1530 printk(KERN_WARNING PFX
"%s: phy write timeout\n",
1531 hw
->dev
[port
]->name
);
1535 static int __gm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
, u16
*val
)
1539 gma_write16(hw
, port
, GM_SMI_CTRL
,
1540 GM_SMI_CT_PHY_AD(hw
->phy_addr
)
1541 | GM_SMI_CT_REG_AD(reg
) | GM_SMI_CT_OP_RD
);
1543 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1545 if (gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_RD_VAL
)
1551 *val
= gma_read16(hw
, port
, GM_SMI_DATA
);
1555 static u16
gm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
1558 if (__gm_phy_read(hw
, port
, reg
, &v
))
1559 printk(KERN_WARNING PFX
"%s: phy read timeout\n",
1560 hw
->dev
[port
]->name
);
1564 /* Marvell Phy Initialization */
1565 static void yukon_init(struct skge_hw
*hw
, int port
)
1567 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1568 u16 ctrl
, ct1000
, adv
;
1570 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1571 u16 ectrl
= gm_phy_read(hw
, port
, PHY_MARV_EXT_CTRL
);
1573 ectrl
&= ~(PHY_M_EC_M_DSC_MSK
| PHY_M_EC_S_DSC_MSK
|
1574 PHY_M_EC_MAC_S_MSK
);
1575 ectrl
|= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ
);
1577 ectrl
|= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1579 gm_phy_write(hw
, port
, PHY_MARV_EXT_CTRL
, ectrl
);
1582 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
1583 if (skge
->autoneg
== AUTONEG_DISABLE
)
1584 ctrl
&= ~PHY_CT_ANE
;
1586 ctrl
|= PHY_CT_RESET
;
1587 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1593 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1595 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1596 ct1000
|= PHY_M_1000C_AFD
;
1597 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1598 ct1000
|= PHY_M_1000C_AHD
;
1599 if (skge
->advertising
& ADVERTISED_100baseT_Full
)
1600 adv
|= PHY_M_AN_100_FD
;
1601 if (skge
->advertising
& ADVERTISED_100baseT_Half
)
1602 adv
|= PHY_M_AN_100_HD
;
1603 if (skge
->advertising
& ADVERTISED_10baseT_Full
)
1604 adv
|= PHY_M_AN_10_FD
;
1605 if (skge
->advertising
& ADVERTISED_10baseT_Half
)
1606 adv
|= PHY_M_AN_10_HD
;
1607 } else /* special defines for FIBER (88E1011S only) */
1608 adv
|= PHY_M_AN_1000X_AHD
| PHY_M_AN_1000X_AFD
;
1610 /* Set Flow-control capabilities */
1611 adv
|= phy_pause_map
[skge
->flow_control
];
1613 /* Restart Auto-negotiation */
1614 ctrl
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1616 /* forced speed/duplex settings */
1617 ct1000
= PHY_M_1000C_MSE
;
1619 if (skge
->duplex
== DUPLEX_FULL
)
1620 ctrl
|= PHY_CT_DUP_MD
;
1622 switch (skge
->speed
) {
1624 ctrl
|= PHY_CT_SP1000
;
1627 ctrl
|= PHY_CT_SP100
;
1631 ctrl
|= PHY_CT_RESET
;
1634 gm_phy_write(hw
, port
, PHY_MARV_1000T_CTRL
, ct1000
);
1636 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
, adv
);
1637 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1639 /* Enable phy interrupt on autonegotiation complete (or link up) */
1640 if (skge
->autoneg
== AUTONEG_ENABLE
)
1641 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_AN_MSK
);
1643 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_DEF_MSK
);
1646 static void yukon_reset(struct skge_hw
*hw
, int port
)
1648 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);/* disable PHY IRQs */
1649 gma_write16(hw
, port
, GM_MC_ADDR_H1
, 0); /* clear MC hash */
1650 gma_write16(hw
, port
, GM_MC_ADDR_H2
, 0);
1651 gma_write16(hw
, port
, GM_MC_ADDR_H3
, 0);
1652 gma_write16(hw
, port
, GM_MC_ADDR_H4
, 0);
1654 gma_write16(hw
, port
, GM_RX_CTRL
,
1655 gma_read16(hw
, port
, GM_RX_CTRL
)
1656 | GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
1659 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1660 static int is_yukon_lite_a0(struct skge_hw
*hw
)
1665 if (hw
->chip_id
!= CHIP_ID_YUKON
)
1668 reg
= skge_read32(hw
, B2_FAR
);
1669 skge_write8(hw
, B2_FAR
+ 3, 0xff);
1670 ret
= (skge_read8(hw
, B2_FAR
+ 3) != 0);
1671 skge_write32(hw
, B2_FAR
, reg
);
1675 static void yukon_mac_init(struct skge_hw
*hw
, int port
)
1677 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1680 const u8
*addr
= hw
->dev
[port
]->dev_addr
;
1682 /* WA code for COMA mode -- set PHY reset */
1683 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1684 hw
->chip_rev
>= CHIP_REV_YU_LITE_A3
) {
1685 reg
= skge_read32(hw
, B2_GP_IO
);
1686 reg
|= GP_DIR_9
| GP_IO_9
;
1687 skge_write32(hw
, B2_GP_IO
, reg
);
1691 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), GPC_RST_SET
);
1692 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_RST_SET
);
1694 /* WA code for COMA mode -- clear PHY reset */
1695 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1696 hw
->chip_rev
>= CHIP_REV_YU_LITE_A3
) {
1697 reg
= skge_read32(hw
, B2_GP_IO
);
1700 skge_write32(hw
, B2_GP_IO
, reg
);
1703 /* Set hardware config mode */
1704 reg
= GPC_INT_POL_HI
| GPC_DIS_FC
| GPC_DIS_SLEEP
|
1705 GPC_ENA_XC
| GPC_ANEG_ADV_ALL_M
| GPC_ENA_PAUSE
;
1706 reg
|= hw
->copper
? GPC_HWCFG_GMII_COP
: GPC_HWCFG_GMII_FIB
;
1708 /* Clear GMC reset */
1709 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), reg
| GPC_RST_SET
);
1710 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), reg
| GPC_RST_CLR
);
1711 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
| GMC_RST_CLR
);
1713 if (skge
->autoneg
== AUTONEG_DISABLE
) {
1714 reg
= GM_GPCR_AU_ALL_DIS
;
1715 gma_write16(hw
, port
, GM_GP_CTRL
,
1716 gma_read16(hw
, port
, GM_GP_CTRL
) | reg
);
1718 switch (skge
->speed
) {
1720 reg
&= ~GM_GPCR_SPEED_100
;
1721 reg
|= GM_GPCR_SPEED_1000
;
1724 reg
&= ~GM_GPCR_SPEED_1000
;
1725 reg
|= GM_GPCR_SPEED_100
;
1728 reg
&= ~(GM_GPCR_SPEED_1000
| GM_GPCR_SPEED_100
);
1732 if (skge
->duplex
== DUPLEX_FULL
)
1733 reg
|= GM_GPCR_DUP_FULL
;
1735 reg
= GM_GPCR_SPEED_1000
| GM_GPCR_SPEED_100
| GM_GPCR_DUP_FULL
;
1737 switch (skge
->flow_control
) {
1738 case FLOW_MODE_NONE
:
1739 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
1740 reg
|= GM_GPCR_FC_TX_DIS
| GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
1742 case FLOW_MODE_LOC_SEND
:
1743 /* disable Rx flow-control */
1744 reg
|= GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
1747 gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
1748 skge_read16(hw
, SK_REG(port
, GMAC_IRQ_SRC
));
1750 yukon_init(hw
, port
);
1753 reg
= gma_read16(hw
, port
, GM_PHY_ADDR
);
1754 gma_write16(hw
, port
, GM_PHY_ADDR
, reg
| GM_PAR_MIB_CLR
);
1756 for (i
= 0; i
< GM_MIB_CNT_SIZE
; i
++)
1757 gma_read16(hw
, port
, GM_MIB_CNT_BASE
+ 8*i
);
1758 gma_write16(hw
, port
, GM_PHY_ADDR
, reg
);
1760 /* transmit control */
1761 gma_write16(hw
, port
, GM_TX_CTRL
, TX_COL_THR(TX_COL_DEF
));
1763 /* receive control reg: unicast + multicast + no FCS */
1764 gma_write16(hw
, port
, GM_RX_CTRL
,
1765 GM_RXCR_UCF_ENA
| GM_RXCR_CRC_DIS
| GM_RXCR_MCF_ENA
);
1767 /* transmit flow control */
1768 gma_write16(hw
, port
, GM_TX_FLOW_CTRL
, 0xffff);
1770 /* transmit parameter */
1771 gma_write16(hw
, port
, GM_TX_PARAM
,
1772 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF
) |
1773 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF
) |
1774 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF
));
1776 /* serial mode register */
1777 reg
= GM_SMOD_VLAN_ENA
| IPG_DATA_VAL(IPG_DATA_DEF
);
1778 if (hw
->dev
[port
]->mtu
> 1500)
1779 reg
|= GM_SMOD_JUMBO_ENA
;
1781 gma_write16(hw
, port
, GM_SERIAL_MODE
, reg
);
1783 /* physical address: used for pause frames */
1784 gma_set_addr(hw
, port
, GM_SRC_ADDR_1L
, addr
);
1785 /* virtual address for data */
1786 gma_set_addr(hw
, port
, GM_SRC_ADDR_2L
, addr
);
1788 /* enable interrupt mask for counter overflows */
1789 gma_write16(hw
, port
, GM_TX_IRQ_MSK
, 0);
1790 gma_write16(hw
, port
, GM_RX_IRQ_MSK
, 0);
1791 gma_write16(hw
, port
, GM_TR_IRQ_MSK
, 0);
1793 /* Initialize Mac Fifo */
1795 /* Configure Rx MAC FIFO */
1796 skge_write16(hw
, SK_REG(port
, RX_GMF_FL_MSK
), RX_FF_FL_DEF_MSK
);
1797 reg
= GMF_OPER_ON
| GMF_RX_F_FL_ON
;
1799 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1800 if (is_yukon_lite_a0(hw
))
1801 reg
&= ~GMF_RX_F_FL_ON
;
1803 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_CLR
);
1804 skge_write16(hw
, SK_REG(port
, RX_GMF_CTRL_T
), reg
);
1806 * because Pause Packet Truncation in GMAC is not working
1807 * we have to increase the Flush Threshold to 64 bytes
1808 * in order to flush pause packets in Rx FIFO on Yukon-1
1810 skge_write16(hw
, SK_REG(port
, RX_GMF_FL_THR
), RX_GMF_FL_THR_DEF
+1);
1812 /* Configure Tx MAC FIFO */
1813 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_RST_CLR
);
1814 skge_write16(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_OPER_ON
);
1817 /* Go into power down mode */
1818 static void yukon_suspend(struct skge_hw
*hw
, int port
)
1822 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_PHY_CTRL
);
1823 ctrl
|= PHY_M_PC_POL_R_DIS
;
1824 gm_phy_write(hw
, port
, PHY_MARV_PHY_CTRL
, ctrl
);
1826 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
1827 ctrl
|= PHY_CT_RESET
;
1828 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1830 /* switch IEEE compatible power down mode on */
1831 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
1832 ctrl
|= PHY_CT_PDOWN
;
1833 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1836 static void yukon_stop(struct skge_port
*skge
)
1838 struct skge_hw
*hw
= skge
->hw
;
1839 int port
= skge
->port
;
1841 skge_write8(hw
, SK_REG(port
, GMAC_IRQ_MSK
), 0);
1842 yukon_reset(hw
, port
);
1844 gma_write16(hw
, port
, GM_GP_CTRL
,
1845 gma_read16(hw
, port
, GM_GP_CTRL
)
1846 & ~(GM_GPCR_TX_ENA
|GM_GPCR_RX_ENA
));
1847 gma_read16(hw
, port
, GM_GP_CTRL
);
1849 yukon_suspend(hw
, port
);
1851 /* set GPHY Control reset */
1852 skge_write8(hw
, SK_REG(port
, GPHY_CTRL
), GPC_RST_SET
);
1853 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_RST_SET
);
1856 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
)
1858 struct skge_hw
*hw
= skge
->hw
;
1859 int port
= skge
->port
;
1862 data
[0] = (u64
) gma_read32(hw
, port
, GM_TXO_OK_HI
) << 32
1863 | gma_read32(hw
, port
, GM_TXO_OK_LO
);
1864 data
[1] = (u64
) gma_read32(hw
, port
, GM_RXO_OK_HI
) << 32
1865 | gma_read32(hw
, port
, GM_RXO_OK_LO
);
1867 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1868 data
[i
] = gma_read32(hw
, port
,
1869 skge_stats
[i
].gma_offset
);
1872 static void yukon_mac_intr(struct skge_hw
*hw
, int port
)
1874 struct net_device
*dev
= hw
->dev
[port
];
1875 struct skge_port
*skge
= netdev_priv(dev
);
1876 u8 status
= skge_read8(hw
, SK_REG(port
, GMAC_IRQ_SRC
));
1878 if (netif_msg_intr(skge
))
1879 printk(KERN_DEBUG PFX
"%s: mac interrupt status 0x%x\n",
1882 if (status
& GM_IS_RX_FF_OR
) {
1883 ++skge
->net_stats
.rx_fifo_errors
;
1884 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_CLI_RX_FO
);
1887 if (status
& GM_IS_TX_FF_UR
) {
1888 ++skge
->net_stats
.tx_fifo_errors
;
1889 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_CLI_TX_FU
);
1894 static u16
yukon_speed(const struct skge_hw
*hw
, u16 aux
)
1896 switch (aux
& PHY_M_PS_SPEED_MSK
) {
1897 case PHY_M_PS_SPEED_1000
:
1899 case PHY_M_PS_SPEED_100
:
1906 static void yukon_link_up(struct skge_port
*skge
)
1908 struct skge_hw
*hw
= skge
->hw
;
1909 int port
= skge
->port
;
1912 /* Enable Transmit FIFO Underrun */
1913 skge_write8(hw
, SK_REG(port
, GMAC_IRQ_MSK
), GMAC_DEF_MSK
);
1915 reg
= gma_read16(hw
, port
, GM_GP_CTRL
);
1916 if (skge
->duplex
== DUPLEX_FULL
|| skge
->autoneg
== AUTONEG_ENABLE
)
1917 reg
|= GM_GPCR_DUP_FULL
;
1920 reg
|= GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
;
1921 gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
1923 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_DEF_MSK
);
1927 static void yukon_link_down(struct skge_port
*skge
)
1929 struct skge_hw
*hw
= skge
->hw
;
1930 int port
= skge
->port
;
1933 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);
1935 ctrl
= gma_read16(hw
, port
, GM_GP_CTRL
);
1936 ctrl
&= ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
);
1937 gma_write16(hw
, port
, GM_GP_CTRL
, ctrl
);
1939 if (skge
->flow_control
== FLOW_MODE_REM_SEND
) {
1940 /* restore Asymmetric Pause bit */
1941 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
,
1942 gm_phy_read(hw
, port
,
1948 yukon_reset(hw
, port
);
1949 skge_link_down(skge
);
1951 yukon_init(hw
, port
);
1954 static void yukon_phy_intr(struct skge_port
*skge
)
1956 struct skge_hw
*hw
= skge
->hw
;
1957 int port
= skge
->port
;
1958 const char *reason
= NULL
;
1959 u16 istatus
, phystat
;
1961 istatus
= gm_phy_read(hw
, port
, PHY_MARV_INT_STAT
);
1962 phystat
= gm_phy_read(hw
, port
, PHY_MARV_PHY_STAT
);
1964 if (netif_msg_intr(skge
))
1965 printk(KERN_DEBUG PFX
"%s: phy interrupt status 0x%x 0x%x\n",
1966 skge
->netdev
->name
, istatus
, phystat
);
1968 if (istatus
& PHY_M_IS_AN_COMPL
) {
1969 if (gm_phy_read(hw
, port
, PHY_MARV_AUNE_LP
)
1971 reason
= "remote fault";
1975 if (gm_phy_read(hw
, port
, PHY_MARV_1000T_STAT
) & PHY_B_1000S_MSF
) {
1976 reason
= "master/slave fault";
1980 if (!(phystat
& PHY_M_PS_SPDUP_RES
)) {
1981 reason
= "speed/duplex";
1985 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
)
1986 ? DUPLEX_FULL
: DUPLEX_HALF
;
1987 skge
->speed
= yukon_speed(hw
, phystat
);
1989 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1990 switch (phystat
& PHY_M_PS_PAUSE_MSK
) {
1991 case PHY_M_PS_PAUSE_MSK
:
1992 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
1994 case PHY_M_PS_RX_P_EN
:
1995 skge
->flow_control
= FLOW_MODE_REM_SEND
;
1997 case PHY_M_PS_TX_P_EN
:
1998 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
2001 skge
->flow_control
= FLOW_MODE_NONE
;
2004 if (skge
->flow_control
== FLOW_MODE_NONE
||
2005 (skge
->speed
< SPEED_1000
&& skge
->duplex
== DUPLEX_HALF
))
2006 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
2008 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
);
2009 yukon_link_up(skge
);
2013 if (istatus
& PHY_M_IS_LSP_CHANGE
)
2014 skge
->speed
= yukon_speed(hw
, phystat
);
2016 if (istatus
& PHY_M_IS_DUP_CHANGE
)
2017 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
) ? DUPLEX_FULL
: DUPLEX_HALF
;
2018 if (istatus
& PHY_M_IS_LST_CHANGE
) {
2019 if (phystat
& PHY_M_PS_LINK_UP
)
2020 yukon_link_up(skge
);
2022 yukon_link_down(skge
);
2026 printk(KERN_ERR PFX
"%s: autonegotiation failed (%s)\n",
2027 skge
->netdev
->name
, reason
);
2029 /* XXX restart autonegotiation? */
2032 static void skge_phy_reset(struct skge_port
*skge
)
2034 struct skge_hw
*hw
= skge
->hw
;
2035 int port
= skge
->port
;
2037 netif_stop_queue(skge
->netdev
);
2038 netif_carrier_off(skge
->netdev
);
2040 spin_lock_bh(&hw
->phy_lock
);
2041 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2042 genesis_reset(hw
, port
);
2043 genesis_mac_init(hw
, port
);
2045 yukon_reset(hw
, port
);
2046 yukon_init(hw
, port
);
2048 spin_unlock_bh(&hw
->phy_lock
);
2051 /* Basic MII support */
2052 static int skge_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2054 struct mii_ioctl_data
*data
= if_mii(ifr
);
2055 struct skge_port
*skge
= netdev_priv(dev
);
2056 struct skge_hw
*hw
= skge
->hw
;
2057 int err
= -EOPNOTSUPP
;
2059 if (!netif_running(dev
))
2060 return -ENODEV
; /* Phy still in reset */
2064 data
->phy_id
= hw
->phy_addr
;
2069 spin_lock_bh(&hw
->phy_lock
);
2070 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2071 err
= __xm_phy_read(hw
, skge
->port
, data
->reg_num
& 0x1f, &val
);
2073 err
= __gm_phy_read(hw
, skge
->port
, data
->reg_num
& 0x1f, &val
);
2074 spin_unlock_bh(&hw
->phy_lock
);
2075 data
->val_out
= val
;
2080 if (!capable(CAP_NET_ADMIN
))
2083 spin_lock_bh(&hw
->phy_lock
);
2084 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2085 err
= xm_phy_write(hw
, skge
->port
, data
->reg_num
& 0x1f,
2088 err
= gm_phy_write(hw
, skge
->port
, data
->reg_num
& 0x1f,
2090 spin_unlock_bh(&hw
->phy_lock
);
2096 static void skge_ramset(struct skge_hw
*hw
, u16 q
, u32 start
, size_t len
)
2102 end
= start
+ len
- 1;
2104 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_RST_CLR
);
2105 skge_write32(hw
, RB_ADDR(q
, RB_START
), start
);
2106 skge_write32(hw
, RB_ADDR(q
, RB_WP
), start
);
2107 skge_write32(hw
, RB_ADDR(q
, RB_RP
), start
);
2108 skge_write32(hw
, RB_ADDR(q
, RB_END
), end
);
2110 if (q
== Q_R1
|| q
== Q_R2
) {
2111 /* Set thresholds on receive queue's */
2112 skge_write32(hw
, RB_ADDR(q
, RB_RX_UTPP
),
2114 skge_write32(hw
, RB_ADDR(q
, RB_RX_LTPP
),
2117 /* Enable store & forward on Tx queue's because
2118 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2120 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_STFWD
);
2123 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_OP_MD
);
2126 /* Setup Bus Memory Interface */
2127 static void skge_qset(struct skge_port
*skge
, u16 q
,
2128 const struct skge_element
*e
)
2130 struct skge_hw
*hw
= skge
->hw
;
2131 u32 watermark
= 0x600;
2132 u64 base
= skge
->dma
+ (e
->desc
- skge
->mem
);
2134 /* optimization to reduce window on 32bit/33mhz */
2135 if ((skge_read16(hw
, B0_CTST
) & (CS_BUS_CLOCK
| CS_BUS_SLOT_SZ
)) == 0)
2138 skge_write32(hw
, Q_ADDR(q
, Q_CSR
), CSR_CLR_RESET
);
2139 skge_write32(hw
, Q_ADDR(q
, Q_F
), watermark
);
2140 skge_write32(hw
, Q_ADDR(q
, Q_DA_H
), (u32
)(base
>> 32));
2141 skge_write32(hw
, Q_ADDR(q
, Q_DA_L
), (u32
)base
);
2144 static int skge_up(struct net_device
*dev
)
2146 struct skge_port
*skge
= netdev_priv(dev
);
2147 struct skge_hw
*hw
= skge
->hw
;
2148 int port
= skge
->port
;
2149 u32 chunk
, ram_addr
;
2150 size_t rx_size
, tx_size
;
2153 if (netif_msg_ifup(skge
))
2154 printk(KERN_INFO PFX
"%s: enabling interface\n", dev
->name
);
2156 if (dev
->mtu
> RX_BUF_SIZE
)
2157 skge
->rx_buf_size
= dev
->mtu
+ ETH_HLEN
;
2159 skge
->rx_buf_size
= RX_BUF_SIZE
;
2162 rx_size
= skge
->rx_ring
.count
* sizeof(struct skge_rx_desc
);
2163 tx_size
= skge
->tx_ring
.count
* sizeof(struct skge_tx_desc
);
2164 skge
->mem_size
= tx_size
+ rx_size
;
2165 skge
->mem
= pci_alloc_consistent(hw
->pdev
, skge
->mem_size
, &skge
->dma
);
2169 BUG_ON(skge
->dma
& 7);
2171 if ((u64
)skge
->dma
>> 32 != ((u64
) skge
->dma
+ skge
->mem_size
) >> 32) {
2172 printk(KERN_ERR PFX
"pci_alloc_consistent region crosses 4G boundary\n");
2177 memset(skge
->mem
, 0, skge
->mem_size
);
2179 err
= skge_ring_alloc(&skge
->rx_ring
, skge
->mem
, skge
->dma
);
2183 err
= skge_rx_fill(skge
);
2187 err
= skge_ring_alloc(&skge
->tx_ring
, skge
->mem
+ rx_size
,
2188 skge
->dma
+ rx_size
);
2192 /* Initialize MAC */
2193 spin_lock_bh(&hw
->phy_lock
);
2194 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2195 genesis_mac_init(hw
, port
);
2197 yukon_mac_init(hw
, port
);
2198 spin_unlock_bh(&hw
->phy_lock
);
2200 /* Configure RAMbuffers */
2201 chunk
= hw
->ram_size
/ ((hw
->ports
+ 1)*2);
2202 ram_addr
= hw
->ram_offset
+ 2 * chunk
* port
;
2204 skge_ramset(hw
, rxqaddr
[port
], ram_addr
, chunk
);
2205 skge_qset(skge
, rxqaddr
[port
], skge
->rx_ring
.to_clean
);
2207 BUG_ON(skge
->tx_ring
.to_use
!= skge
->tx_ring
.to_clean
);
2208 skge_ramset(hw
, txqaddr
[port
], ram_addr
+chunk
, chunk
);
2209 skge_qset(skge
, txqaddr
[port
], skge
->tx_ring
.to_use
);
2211 /* Start receiver BMU */
2213 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_START
| CSR_IRQ_CL_F
);
2214 skge_led(skge
, LED_MODE_ON
);
2219 skge_rx_clean(skge
);
2220 kfree(skge
->rx_ring
.start
);
2222 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2228 static int skge_down(struct net_device
*dev
)
2230 struct skge_port
*skge
= netdev_priv(dev
);
2231 struct skge_hw
*hw
= skge
->hw
;
2232 int port
= skge
->port
;
2234 if (skge
->mem
== NULL
)
2237 if (netif_msg_ifdown(skge
))
2238 printk(KERN_INFO PFX
"%s: disabling interface\n", dev
->name
);
2240 netif_stop_queue(dev
);
2242 skge_write8(skge
->hw
, SK_REG(skge
->port
, LNK_LED_REG
), LED_OFF
);
2243 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2248 /* Stop transmitter */
2249 skge_write8(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_STOP
);
2250 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
),
2251 RB_RST_SET
|RB_DIS_OP_MD
);
2254 /* Disable Force Sync bit and Enable Alloc bit */
2255 skge_write8(hw
, SK_REG(port
, TXA_CTRL
),
2256 TXA_DIS_FSYNC
| TXA_DIS_ALLOC
| TXA_STOP_RC
);
2258 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2259 skge_write32(hw
, SK_REG(port
, TXA_ITI_INI
), 0L);
2260 skge_write32(hw
, SK_REG(port
, TXA_LIM_INI
), 0L);
2262 /* Reset PCI FIFO */
2263 skge_write32(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2264 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
), RB_RST_SET
);
2266 /* Reset the RAM Buffer async Tx queue */
2267 skge_write8(hw
, RB_ADDR(port
== 0 ? Q_XA1
: Q_XA2
, RB_CTRL
), RB_RST_SET
);
2269 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_STOP
);
2270 skge_write32(hw
, RB_ADDR(port
? Q_R2
: Q_R1
, RB_CTRL
),
2271 RB_RST_SET
|RB_DIS_OP_MD
);
2272 skge_write32(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2274 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2275 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_RST_SET
);
2276 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_RST_SET
);
2278 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
2279 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
2282 skge_led(skge
, LED_MODE_OFF
);
2284 skge_tx_clean(skge
);
2285 skge_rx_clean(skge
);
2287 kfree(skge
->rx_ring
.start
);
2288 kfree(skge
->tx_ring
.start
);
2289 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2294 static inline int skge_avail(const struct skge_ring
*ring
)
2296 return ((ring
->to_clean
> ring
->to_use
) ? 0 : ring
->count
)
2297 + (ring
->to_clean
- ring
->to_use
) - 1;
2300 static int skge_xmit_frame(struct sk_buff
*skb
, struct net_device
*dev
)
2302 struct skge_port
*skge
= netdev_priv(dev
);
2303 struct skge_hw
*hw
= skge
->hw
;
2304 struct skge_ring
*ring
= &skge
->tx_ring
;
2305 struct skge_element
*e
;
2306 struct skge_tx_desc
*td
;
2311 skb
= skb_padto(skb
, ETH_ZLEN
);
2313 return NETDEV_TX_OK
;
2315 if (!spin_trylock(&skge
->tx_lock
)) {
2316 /* Collision - tell upper layer to requeue */
2317 return NETDEV_TX_LOCKED
;
2320 if (unlikely(skge_avail(&skge
->tx_ring
) < skb_shinfo(skb
)->nr_frags
+ 1)) {
2321 if (!netif_queue_stopped(dev
)) {
2322 netif_stop_queue(dev
);
2324 printk(KERN_WARNING PFX
"%s: ring full when queue awake!\n",
2327 spin_unlock(&skge
->tx_lock
);
2328 return NETDEV_TX_BUSY
;
2334 len
= skb_headlen(skb
);
2335 map
= pci_map_single(hw
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2336 pci_unmap_addr_set(e
, mapaddr
, map
);
2337 pci_unmap_len_set(e
, maplen
, len
);
2340 td
->dma_hi
= map
>> 32;
2342 if (skb
->ip_summed
== CHECKSUM_HW
) {
2343 int offset
= skb
->h
.raw
- skb
->data
;
2345 /* This seems backwards, but it is what the sk98lin
2346 * does. Looks like hardware is wrong?
2348 if (skb
->h
.ipiph
->protocol
== IPPROTO_UDP
2349 && hw
->chip_rev
== 0 && hw
->chip_id
== CHIP_ID_YUKON
)
2350 control
= BMU_TCP_CHECK
;
2352 control
= BMU_UDP_CHECK
;
2355 td
->csum_start
= offset
;
2356 td
->csum_write
= offset
+ skb
->csum
;
2358 control
= BMU_CHECK
;
2360 if (!skb_shinfo(skb
)->nr_frags
) /* single buffer i.e. no fragments */
2361 control
|= BMU_EOF
| BMU_IRQ_EOF
;
2363 struct skge_tx_desc
*tf
= td
;
2365 control
|= BMU_STFWD
;
2366 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2367 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2369 map
= pci_map_page(hw
->pdev
, frag
->page
, frag
->page_offset
,
2370 frag
->size
, PCI_DMA_TODEVICE
);
2376 tf
->dma_hi
= (u64
) map
>> 32;
2377 pci_unmap_addr_set(e
, mapaddr
, map
);
2378 pci_unmap_len_set(e
, maplen
, frag
->size
);
2380 tf
->control
= BMU_OWN
| BMU_SW
| control
| frag
->size
;
2382 tf
->control
|= BMU_EOF
| BMU_IRQ_EOF
;
2384 /* Make sure all the descriptors written */
2386 td
->control
= BMU_OWN
| BMU_SW
| BMU_STF
| control
| len
;
2389 skge_write8(hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_START
);
2391 if (netif_msg_tx_queued(skge
))
2392 printk(KERN_DEBUG
"%s: tx queued, slot %td, len %d\n",
2393 dev
->name
, e
- ring
->start
, skb
->len
);
2395 ring
->to_use
= e
->next
;
2396 if (skge_avail(&skge
->tx_ring
) <= MAX_SKB_FRAGS
+ 1) {
2397 pr_debug("%s: transmit queue full\n", dev
->name
);
2398 netif_stop_queue(dev
);
2402 spin_unlock(&skge
->tx_lock
);
2404 dev
->trans_start
= jiffies
;
2406 return NETDEV_TX_OK
;
2409 static void skge_tx_complete(struct skge_port
*skge
, struct skge_element
*last
)
2411 struct pci_dev
*pdev
= skge
->hw
->pdev
;
2412 struct skge_element
*e
;
2414 for (e
= skge
->tx_ring
.to_clean
; e
!= last
; e
= e
->next
) {
2415 struct sk_buff
*skb
= e
->skb
;
2419 pci_unmap_single(pdev
, pci_unmap_addr(e
, mapaddr
),
2420 skb_headlen(skb
), PCI_DMA_TODEVICE
);
2422 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2424 pci_unmap_page(pdev
, pci_unmap_addr(e
, mapaddr
),
2425 skb_shinfo(skb
)->frags
[i
].size
,
2431 skge
->tx_ring
.to_clean
= e
;
2434 static void skge_tx_clean(struct skge_port
*skge
)
2437 spin_lock_bh(&skge
->tx_lock
);
2438 skge_tx_complete(skge
, skge
->tx_ring
.to_use
);
2439 netif_wake_queue(skge
->netdev
);
2440 spin_unlock_bh(&skge
->tx_lock
);
2443 static void skge_tx_timeout(struct net_device
*dev
)
2445 struct skge_port
*skge
= netdev_priv(dev
);
2447 if (netif_msg_timer(skge
))
2448 printk(KERN_DEBUG PFX
"%s: tx timeout\n", dev
->name
);
2450 skge_write8(skge
->hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_STOP
);
2451 skge_tx_clean(skge
);
2454 static int skge_change_mtu(struct net_device
*dev
, int new_mtu
)
2458 if (new_mtu
< ETH_ZLEN
|| new_mtu
> ETH_JUMBO_MTU
)
2461 if (!netif_running(dev
)) {
2477 static void genesis_set_multicast(struct net_device
*dev
)
2479 struct skge_port
*skge
= netdev_priv(dev
);
2480 struct skge_hw
*hw
= skge
->hw
;
2481 int port
= skge
->port
;
2482 int i
, count
= dev
->mc_count
;
2483 struct dev_mc_list
*list
= dev
->mc_list
;
2487 mode
= xm_read32(hw
, port
, XM_MODE
);
2488 mode
|= XM_MD_ENA_HASH
;
2489 if (dev
->flags
& IFF_PROMISC
)
2490 mode
|= XM_MD_ENA_PROM
;
2492 mode
&= ~XM_MD_ENA_PROM
;
2494 if (dev
->flags
& IFF_ALLMULTI
)
2495 memset(filter
, 0xff, sizeof(filter
));
2497 memset(filter
, 0, sizeof(filter
));
2498 for (i
= 0; list
&& i
< count
; i
++, list
= list
->next
) {
2500 crc
= ether_crc_le(ETH_ALEN
, list
->dmi_addr
);
2502 filter
[bit
/8] |= 1 << (bit
%8);
2506 xm_write32(hw
, port
, XM_MODE
, mode
);
2507 xm_outhash(hw
, port
, XM_HSM
, filter
);
2510 static void yukon_set_multicast(struct net_device
*dev
)
2512 struct skge_port
*skge
= netdev_priv(dev
);
2513 struct skge_hw
*hw
= skge
->hw
;
2514 int port
= skge
->port
;
2515 struct dev_mc_list
*list
= dev
->mc_list
;
2519 memset(filter
, 0, sizeof(filter
));
2521 reg
= gma_read16(hw
, port
, GM_RX_CTRL
);
2522 reg
|= GM_RXCR_UCF_ENA
;
2524 if (dev
->flags
& IFF_PROMISC
) /* promiscuous */
2525 reg
&= ~(GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
2526 else if (dev
->flags
& IFF_ALLMULTI
) /* all multicast */
2527 memset(filter
, 0xff, sizeof(filter
));
2528 else if (dev
->mc_count
== 0) /* no multicast */
2529 reg
&= ~GM_RXCR_MCF_ENA
;
2532 reg
|= GM_RXCR_MCF_ENA
;
2534 for (i
= 0; list
&& i
< dev
->mc_count
; i
++, list
= list
->next
) {
2535 u32 bit
= ether_crc(ETH_ALEN
, list
->dmi_addr
) & 0x3f;
2536 filter
[bit
/8] |= 1 << (bit
%8);
2541 gma_write16(hw
, port
, GM_MC_ADDR_H1
,
2542 (u16
)filter
[0] | ((u16
)filter
[1] << 8));
2543 gma_write16(hw
, port
, GM_MC_ADDR_H2
,
2544 (u16
)filter
[2] | ((u16
)filter
[3] << 8));
2545 gma_write16(hw
, port
, GM_MC_ADDR_H3
,
2546 (u16
)filter
[4] | ((u16
)filter
[5] << 8));
2547 gma_write16(hw
, port
, GM_MC_ADDR_H4
,
2548 (u16
)filter
[6] | ((u16
)filter
[7] << 8));
2550 gma_write16(hw
, port
, GM_RX_CTRL
, reg
);
2553 static inline u16
phy_length(const struct skge_hw
*hw
, u32 status
)
2555 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2556 return status
>> XMR_FS_LEN_SHIFT
;
2558 return status
>> GMR_FS_LEN_SHIFT
;
2561 static inline int bad_phy_status(const struct skge_hw
*hw
, u32 status
)
2563 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2564 return (status
& (XMR_FS_ERR
| XMR_FS_2L_VLAN
)) != 0;
2566 return (status
& GMR_FS_ANY_ERR
) ||
2567 (status
& GMR_FS_RX_OK
) == 0;
2571 /* Get receive buffer from descriptor.
2572 * Handles copy of small buffers and reallocation failures
2574 static inline struct sk_buff
*skge_rx_get(struct skge_port
*skge
,
2575 struct skge_element
*e
,
2576 u32 control
, u32 status
, u16 csum
)
2578 struct sk_buff
*skb
;
2579 u16 len
= control
& BMU_BBC
;
2581 if (unlikely(netif_msg_rx_status(skge
)))
2582 printk(KERN_DEBUG PFX
"%s: rx slot %td status 0x%x len %d\n",
2583 skge
->netdev
->name
, e
- skge
->rx_ring
.start
,
2586 if (len
> skge
->rx_buf_size
)
2589 if ((control
& (BMU_EOF
|BMU_STF
)) != (BMU_STF
|BMU_EOF
))
2592 if (bad_phy_status(skge
->hw
, status
))
2595 if (phy_length(skge
->hw
, status
) != len
)
2598 if (len
< RX_COPY_THRESHOLD
) {
2599 skb
= alloc_skb(len
+ 2, GFP_ATOMIC
);
2603 skb_reserve(skb
, 2);
2604 pci_dma_sync_single_for_cpu(skge
->hw
->pdev
,
2605 pci_unmap_addr(e
, mapaddr
),
2606 len
, PCI_DMA_FROMDEVICE
);
2607 memcpy(skb
->data
, e
->skb
->data
, len
);
2608 pci_dma_sync_single_for_device(skge
->hw
->pdev
,
2609 pci_unmap_addr(e
, mapaddr
),
2610 len
, PCI_DMA_FROMDEVICE
);
2611 skge_rx_reuse(e
, skge
->rx_buf_size
);
2613 struct sk_buff
*nskb
;
2614 nskb
= alloc_skb(skge
->rx_buf_size
+ NET_IP_ALIGN
, GFP_ATOMIC
);
2618 skb_reserve(nskb
, NET_IP_ALIGN
);
2619 pci_unmap_single(skge
->hw
->pdev
,
2620 pci_unmap_addr(e
, mapaddr
),
2621 pci_unmap_len(e
, maplen
),
2622 PCI_DMA_FROMDEVICE
);
2624 prefetch(skb
->data
);
2625 skge_rx_setup(skge
, e
, nskb
, skge
->rx_buf_size
);
2629 skb
->dev
= skge
->netdev
;
2630 if (skge
->rx_csum
) {
2632 skb
->ip_summed
= CHECKSUM_HW
;
2635 skb
->protocol
= eth_type_trans(skb
, skge
->netdev
);
2640 if (netif_msg_rx_err(skge
))
2641 printk(KERN_DEBUG PFX
"%s: rx err, slot %td control 0x%x status 0x%x\n",
2642 skge
->netdev
->name
, e
- skge
->rx_ring
.start
,
2645 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
) {
2646 if (status
& (XMR_FS_RUNT
|XMR_FS_LNG_ERR
))
2647 skge
->net_stats
.rx_length_errors
++;
2648 if (status
& XMR_FS_FRA_ERR
)
2649 skge
->net_stats
.rx_frame_errors
++;
2650 if (status
& XMR_FS_FCS_ERR
)
2651 skge
->net_stats
.rx_crc_errors
++;
2653 if (status
& (GMR_FS_LONG_ERR
|GMR_FS_UN_SIZE
))
2654 skge
->net_stats
.rx_length_errors
++;
2655 if (status
& GMR_FS_FRAGMENT
)
2656 skge
->net_stats
.rx_frame_errors
++;
2657 if (status
& GMR_FS_CRC_ERR
)
2658 skge
->net_stats
.rx_crc_errors
++;
2662 skge_rx_reuse(e
, skge
->rx_buf_size
);
2666 static void skge_tx_done(struct skge_port
*skge
)
2668 struct skge_ring
*ring
= &skge
->tx_ring
;
2669 struct skge_element
*e
, *last
;
2671 spin_lock(&skge
->tx_lock
);
2672 last
= ring
->to_clean
;
2673 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
2674 struct skge_tx_desc
*td
= e
->desc
;
2676 if (td
->control
& BMU_OWN
)
2679 if (td
->control
& BMU_EOF
) {
2681 if (unlikely(netif_msg_tx_done(skge
)))
2682 printk(KERN_DEBUG PFX
"%s: tx done slot %td\n",
2683 skge
->netdev
->name
, e
- ring
->start
);
2687 skge_tx_complete(skge
, last
);
2689 skge_write8(skge
->hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_IRQ_CL_F
);
2691 if (skge_avail(&skge
->tx_ring
) > MAX_SKB_FRAGS
+ 1)
2692 netif_wake_queue(skge
->netdev
);
2694 spin_unlock(&skge
->tx_lock
);
2697 static int skge_poll(struct net_device
*dev
, int *budget
)
2699 struct skge_port
*skge
= netdev_priv(dev
);
2700 struct skge_hw
*hw
= skge
->hw
;
2701 struct skge_ring
*ring
= &skge
->rx_ring
;
2702 struct skge_element
*e
;
2703 int to_do
= min(dev
->quota
, *budget
);
2708 for (e
= ring
->to_clean
; prefetch(e
->next
), work_done
< to_do
; e
= e
->next
) {
2709 struct skge_rx_desc
*rd
= e
->desc
;
2710 struct sk_buff
*skb
;
2714 control
= rd
->control
;
2715 if (control
& BMU_OWN
)
2718 skb
= skge_rx_get(skge
, e
, control
, rd
->status
,
2719 le16_to_cpu(rd
->csum2
));
2721 dev
->last_rx
= jiffies
;
2722 netif_receive_skb(skb
);
2729 /* restart receiver */
2731 skge_write8(hw
, Q_ADDR(rxqaddr
[skge
->port
], Q_CSR
), CSR_START
);
2733 *budget
-= work_done
;
2734 dev
->quota
-= work_done
;
2736 if (work_done
>= to_do
)
2737 return 1; /* not done */
2739 netif_rx_complete(dev
);
2742 hw
->intr_mask
|= skge
->port
== 0 ? (IS_R1_F
|IS_XA1_F
) : (IS_R2_F
|IS_XA2_F
);
2743 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2748 /* Parity errors seem to happen when Genesis is connected to a switch
2749 * with no other ports present. Heartbeat error??
2751 static void skge_mac_parity(struct skge_hw
*hw
, int port
)
2753 struct net_device
*dev
= hw
->dev
[port
];
2756 struct skge_port
*skge
= netdev_priv(dev
);
2757 ++skge
->net_stats
.tx_heartbeat_errors
;
2760 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2761 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
),
2764 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2765 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
),
2766 (hw
->chip_id
== CHIP_ID_YUKON
&& hw
->chip_rev
== 0)
2767 ? GMF_CLI_TX_FC
: GMF_CLI_TX_PE
);
2770 static void skge_mac_intr(struct skge_hw
*hw
, int port
)
2772 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2773 genesis_mac_intr(hw
, port
);
2775 yukon_mac_intr(hw
, port
);
2778 /* Handle device specific framing and timeout interrupts */
2779 static void skge_error_irq(struct skge_hw
*hw
)
2781 u32 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
2783 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2784 /* clear xmac errors */
2785 if (hwstatus
& (IS_NO_STAT_M1
|IS_NO_TIST_M1
))
2786 skge_write16(hw
, RX_MFF_CTRL1
, MFF_CLR_INSTAT
);
2787 if (hwstatus
& (IS_NO_STAT_M2
|IS_NO_TIST_M2
))
2788 skge_write16(hw
, RX_MFF_CTRL2
, MFF_CLR_INSTAT
);
2790 /* Timestamp (unused) overflow */
2791 if (hwstatus
& IS_IRQ_TIST_OV
)
2792 skge_write8(hw
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
2795 if (hwstatus
& IS_RAM_RD_PAR
) {
2796 printk(KERN_ERR PFX
"Ram read data parity error\n");
2797 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_RD_PERR
);
2800 if (hwstatus
& IS_RAM_WR_PAR
) {
2801 printk(KERN_ERR PFX
"Ram write data parity error\n");
2802 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_WR_PERR
);
2805 if (hwstatus
& IS_M1_PAR_ERR
)
2806 skge_mac_parity(hw
, 0);
2808 if (hwstatus
& IS_M2_PAR_ERR
)
2809 skge_mac_parity(hw
, 1);
2811 if (hwstatus
& IS_R1_PAR_ERR
) {
2812 printk(KERN_ERR PFX
"%s: receive queue parity error\n",
2814 skge_write32(hw
, B0_R1_CSR
, CSR_IRQ_CL_P
);
2817 if (hwstatus
& IS_R2_PAR_ERR
) {
2818 printk(KERN_ERR PFX
"%s: receive queue parity error\n",
2820 skge_write32(hw
, B0_R2_CSR
, CSR_IRQ_CL_P
);
2823 if (hwstatus
& (IS_IRQ_MST_ERR
|IS_IRQ_STAT
)) {
2824 u16 pci_status
, pci_cmd
;
2826 pci_read_config_word(hw
->pdev
, PCI_COMMAND
, &pci_cmd
);
2827 pci_read_config_word(hw
->pdev
, PCI_STATUS
, &pci_status
);
2829 printk(KERN_ERR PFX
"%s: PCI error cmd=%#x status=%#x\n",
2830 pci_name(hw
->pdev
), pci_cmd
, pci_status
);
2832 /* Write the error bits back to clear them. */
2833 pci_status
&= PCI_STATUS_ERROR_BITS
;
2834 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
2835 pci_write_config_word(hw
->pdev
, PCI_COMMAND
,
2836 pci_cmd
| PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
2837 pci_write_config_word(hw
->pdev
, PCI_STATUS
, pci_status
);
2838 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
2840 /* if error still set then just ignore it */
2841 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
2842 if (hwstatus
& IS_IRQ_STAT
) {
2843 printk(KERN_INFO PFX
"unable to clear error (so ignoring them)\n");
2844 hw
->intr_mask
&= ~IS_HW_ERR
;
2850 * Interrupt from PHY are handled in tasklet (soft irq)
2851 * because accessing phy registers requires spin wait which might
2852 * cause excess interrupt latency.
2854 static void skge_extirq(unsigned long data
)
2856 struct skge_hw
*hw
= (struct skge_hw
*) data
;
2859 spin_lock(&hw
->phy_lock
);
2860 for (port
= 0; port
< hw
->ports
; port
++) {
2861 struct net_device
*dev
= hw
->dev
[port
];
2862 struct skge_port
*skge
= netdev_priv(dev
);
2864 if (netif_running(dev
)) {
2865 if (hw
->chip_id
!= CHIP_ID_GENESIS
)
2866 yukon_phy_intr(skge
);
2868 bcom_phy_intr(skge
);
2871 spin_unlock(&hw
->phy_lock
);
2873 hw
->intr_mask
|= IS_EXT_REG
;
2874 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2877 static irqreturn_t
skge_intr(int irq
, void *dev_id
, struct pt_regs
*regs
)
2879 struct skge_hw
*hw
= dev_id
;
2882 /* Reading this register masks IRQ */
2883 status
= skge_read32(hw
, B0_SP_ISRC
);
2887 if (status
& IS_EXT_REG
) {
2888 hw
->intr_mask
&= ~IS_EXT_REG
;
2889 tasklet_schedule(&hw
->ext_tasklet
);
2892 if (status
& (IS_R1_F
|IS_XA1_F
)) {
2893 skge_write8(hw
, Q_ADDR(Q_R1
, Q_CSR
), CSR_IRQ_CL_F
);
2894 hw
->intr_mask
&= ~(IS_R1_F
|IS_XA1_F
);
2895 netif_rx_schedule(hw
->dev
[0]);
2898 if (status
& (IS_R2_F
|IS_XA2_F
)) {
2899 skge_write8(hw
, Q_ADDR(Q_R2
, Q_CSR
), CSR_IRQ_CL_F
);
2900 hw
->intr_mask
&= ~(IS_R2_F
|IS_XA2_F
);
2901 netif_rx_schedule(hw
->dev
[1]);
2904 if (likely((status
& hw
->intr_mask
) == 0))
2907 if (status
& IS_PA_TO_RX1
) {
2908 struct skge_port
*skge
= netdev_priv(hw
->dev
[0]);
2909 ++skge
->net_stats
.rx_over_errors
;
2910 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_RX1
);
2913 if (status
& IS_PA_TO_RX2
) {
2914 struct skge_port
*skge
= netdev_priv(hw
->dev
[1]);
2915 ++skge
->net_stats
.rx_over_errors
;
2916 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_RX2
);
2919 if (status
& IS_PA_TO_TX1
)
2920 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_TX1
);
2922 if (status
& IS_PA_TO_TX2
)
2923 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_TX2
);
2925 if (status
& IS_MAC1
)
2926 skge_mac_intr(hw
, 0);
2928 if (status
& IS_MAC2
)
2929 skge_mac_intr(hw
, 1);
2931 if (status
& IS_HW_ERR
)
2934 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2939 #ifdef CONFIG_NET_POLL_CONTROLLER
2940 static void skge_netpoll(struct net_device
*dev
)
2942 struct skge_port
*skge
= netdev_priv(dev
);
2944 disable_irq(dev
->irq
);
2945 skge_intr(dev
->irq
, skge
->hw
, NULL
);
2946 enable_irq(dev
->irq
);
2950 static int skge_set_mac_address(struct net_device
*dev
, void *p
)
2952 struct skge_port
*skge
= netdev_priv(dev
);
2953 struct skge_hw
*hw
= skge
->hw
;
2954 unsigned port
= skge
->port
;
2955 const struct sockaddr
*addr
= p
;
2957 if (!is_valid_ether_addr(addr
->sa_data
))
2958 return -EADDRNOTAVAIL
;
2960 spin_lock_bh(&hw
->phy_lock
);
2961 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
2962 memcpy_toio(hw
->regs
+ B2_MAC_1
+ port
*8,
2963 dev
->dev_addr
, ETH_ALEN
);
2964 memcpy_toio(hw
->regs
+ B2_MAC_2
+ port
*8,
2965 dev
->dev_addr
, ETH_ALEN
);
2967 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2968 xm_outaddr(hw
, port
, XM_SA
, dev
->dev_addr
);
2970 gma_set_addr(hw
, port
, GM_SRC_ADDR_1L
, dev
->dev_addr
);
2971 gma_set_addr(hw
, port
, GM_SRC_ADDR_2L
, dev
->dev_addr
);
2973 spin_unlock_bh(&hw
->phy_lock
);
2978 static const struct {
2982 { CHIP_ID_GENESIS
, "Genesis" },
2983 { CHIP_ID_YUKON
, "Yukon" },
2984 { CHIP_ID_YUKON_LITE
, "Yukon-Lite"},
2985 { CHIP_ID_YUKON_LP
, "Yukon-LP"},
2988 static const char *skge_board_name(const struct skge_hw
*hw
)
2991 static char buf
[16];
2993 for (i
= 0; i
< ARRAY_SIZE(skge_chips
); i
++)
2994 if (skge_chips
[i
].id
== hw
->chip_id
)
2995 return skge_chips
[i
].name
;
2997 snprintf(buf
, sizeof buf
, "chipid 0x%x", hw
->chip_id
);
3003 * Setup the board data structure, but don't bring up
3006 static int skge_reset(struct skge_hw
*hw
)
3009 u16 ctst
, pci_status
;
3010 u8 t8
, mac_cfg
, pmd_type
, phy_type
;
3013 ctst
= skge_read16(hw
, B0_CTST
);
3016 skge_write8(hw
, B0_CTST
, CS_RST_SET
);
3017 skge_write8(hw
, B0_CTST
, CS_RST_CLR
);
3019 /* clear PCI errors, if any */
3020 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3021 skge_write8(hw
, B2_TST_CTRL2
, 0);
3023 pci_read_config_word(hw
->pdev
, PCI_STATUS
, &pci_status
);
3024 pci_write_config_word(hw
->pdev
, PCI_STATUS
,
3025 pci_status
| PCI_STATUS_ERROR_BITS
);
3026 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3027 skge_write8(hw
, B0_CTST
, CS_MRST_CLR
);
3029 /* restore CLK_RUN bits (for Yukon-Lite) */
3030 skge_write16(hw
, B0_CTST
,
3031 ctst
& (CS_CLK_RUN_HOT
|CS_CLK_RUN_RST
|CS_CLK_RUN_ENA
));
3033 hw
->chip_id
= skge_read8(hw
, B2_CHIP_ID
);
3034 phy_type
= skge_read8(hw
, B2_E_1
) & 0xf;
3035 pmd_type
= skge_read8(hw
, B2_PMD_TYP
);
3036 hw
->copper
= (pmd_type
== 'T' || pmd_type
== '1');
3038 switch (hw
->chip_id
) {
3039 case CHIP_ID_GENESIS
:
3042 hw
->phy_addr
= PHY_ADDR_BCOM
;
3045 printk(KERN_ERR PFX
"%s: unsupported phy type 0x%x\n",
3046 pci_name(hw
->pdev
), phy_type
);
3052 case CHIP_ID_YUKON_LITE
:
3053 case CHIP_ID_YUKON_LP
:
3054 if (phy_type
< SK_PHY_MARV_COPPER
&& pmd_type
!= 'S')
3057 hw
->phy_addr
= PHY_ADDR_MARV
;
3061 printk(KERN_ERR PFX
"%s: unsupported chip type 0x%x\n",
3062 pci_name(hw
->pdev
), hw
->chip_id
);
3066 mac_cfg
= skge_read8(hw
, B2_MAC_CFG
);
3067 hw
->ports
= (mac_cfg
& CFG_SNG_MAC
) ? 1 : 2;
3068 hw
->chip_rev
= (mac_cfg
& CFG_CHIP_R_MSK
) >> 4;
3070 /* read the adapters RAM size */
3071 t8
= skge_read8(hw
, B2_E_0
);
3072 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
3074 /* special case: 4 x 64k x 36, offset = 0x80000 */
3075 hw
->ram_size
= 0x100000;
3076 hw
->ram_offset
= 0x80000;
3078 hw
->ram_size
= t8
* 512;
3081 hw
->ram_size
= 0x20000;
3083 hw
->ram_size
= t8
* 4096;
3085 hw
->intr_mask
= IS_HW_ERR
| IS_EXT_REG
| IS_PORT_1
;
3087 hw
->intr_mask
|= IS_PORT_2
;
3089 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3092 /* switch power to VCC (WA for VAUX problem) */
3093 skge_write8(hw
, B0_POWER_CTRL
,
3094 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_OFF
| PC_VCC_ON
);
3096 /* avoid boards with stuck Hardware error bits */
3097 if ((skge_read32(hw
, B0_ISRC
) & IS_HW_ERR
) &&
3098 (skge_read32(hw
, B0_HWE_ISRC
) & IS_IRQ_SENSOR
)) {
3099 printk(KERN_WARNING PFX
"stuck hardware sensor bit\n");
3100 hw
->intr_mask
&= ~IS_HW_ERR
;
3103 /* Clear PHY COMA */
3104 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3105 pci_read_config_dword(hw
->pdev
, PCI_DEV_REG1
, ®
);
3106 reg
&= ~PCI_PHY_COMA
;
3107 pci_write_config_dword(hw
->pdev
, PCI_DEV_REG1
, reg
);
3108 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3111 for (i
= 0; i
< hw
->ports
; i
++) {
3112 skge_write16(hw
, SK_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_SET
);
3113 skge_write16(hw
, SK_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_CLR
);
3117 /* turn off hardware timer (unused) */
3118 skge_write8(hw
, B2_TI_CTRL
, TIM_STOP
);
3119 skge_write8(hw
, B2_TI_CTRL
, TIM_CLR_IRQ
);
3120 skge_write8(hw
, B0_LED
, LED_STAT_ON
);
3122 /* enable the Tx Arbiters */
3123 for (i
= 0; i
< hw
->ports
; i
++)
3124 skge_write8(hw
, SK_REG(i
, TXA_CTRL
), TXA_ENA_ARB
);
3126 /* Initialize ram interface */
3127 skge_write16(hw
, B3_RI_CTRL
, RI_RST_CLR
);
3129 skge_write8(hw
, B3_RI_WTO_R1
, SK_RI_TO_53
);
3130 skge_write8(hw
, B3_RI_WTO_XA1
, SK_RI_TO_53
);
3131 skge_write8(hw
, B3_RI_WTO_XS1
, SK_RI_TO_53
);
3132 skge_write8(hw
, B3_RI_RTO_R1
, SK_RI_TO_53
);
3133 skge_write8(hw
, B3_RI_RTO_XA1
, SK_RI_TO_53
);
3134 skge_write8(hw
, B3_RI_RTO_XS1
, SK_RI_TO_53
);
3135 skge_write8(hw
, B3_RI_WTO_R2
, SK_RI_TO_53
);
3136 skge_write8(hw
, B3_RI_WTO_XA2
, SK_RI_TO_53
);
3137 skge_write8(hw
, B3_RI_WTO_XS2
, SK_RI_TO_53
);
3138 skge_write8(hw
, B3_RI_RTO_R2
, SK_RI_TO_53
);
3139 skge_write8(hw
, B3_RI_RTO_XA2
, SK_RI_TO_53
);
3140 skge_write8(hw
, B3_RI_RTO_XS2
, SK_RI_TO_53
);
3142 skge_write32(hw
, B0_HWE_IMSK
, IS_ERR_MSK
);
3144 /* Set interrupt moderation for Transmit only
3145 * Receive interrupts avoided by NAPI
3147 skge_write32(hw
, B2_IRQM_MSK
, IS_XA1_F
|IS_XA2_F
);
3148 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, 100));
3149 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
3151 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3153 spin_lock_bh(&hw
->phy_lock
);
3154 for (i
= 0; i
< hw
->ports
; i
++) {
3155 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3156 genesis_reset(hw
, i
);
3160 spin_unlock_bh(&hw
->phy_lock
);
3165 /* Initialize network device */
3166 static struct net_device
*skge_devinit(struct skge_hw
*hw
, int port
,
3169 struct skge_port
*skge
;
3170 struct net_device
*dev
= alloc_etherdev(sizeof(*skge
));
3173 printk(KERN_ERR
"skge etherdev alloc failed");
3177 SET_MODULE_OWNER(dev
);
3178 SET_NETDEV_DEV(dev
, &hw
->pdev
->dev
);
3179 dev
->open
= skge_up
;
3180 dev
->stop
= skge_down
;
3181 dev
->do_ioctl
= skge_ioctl
;
3182 dev
->hard_start_xmit
= skge_xmit_frame
;
3183 dev
->get_stats
= skge_get_stats
;
3184 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3185 dev
->set_multicast_list
= genesis_set_multicast
;
3187 dev
->set_multicast_list
= yukon_set_multicast
;
3189 dev
->set_mac_address
= skge_set_mac_address
;
3190 dev
->change_mtu
= skge_change_mtu
;
3191 SET_ETHTOOL_OPS(dev
, &skge_ethtool_ops
);
3192 dev
->tx_timeout
= skge_tx_timeout
;
3193 dev
->watchdog_timeo
= TX_WATCHDOG
;
3194 dev
->poll
= skge_poll
;
3195 dev
->weight
= NAPI_WEIGHT
;
3196 #ifdef CONFIG_NET_POLL_CONTROLLER
3197 dev
->poll_controller
= skge_netpoll
;
3199 dev
->irq
= hw
->pdev
->irq
;
3200 dev
->features
= NETIF_F_LLTX
;
3202 dev
->features
|= NETIF_F_HIGHDMA
;
3204 skge
= netdev_priv(dev
);
3207 skge
->msg_enable
= netif_msg_init(debug
, default_msg
);
3208 skge
->tx_ring
.count
= DEFAULT_TX_RING_SIZE
;
3209 skge
->rx_ring
.count
= DEFAULT_RX_RING_SIZE
;
3211 /* Auto speed and flow control */
3212 skge
->autoneg
= AUTONEG_ENABLE
;
3213 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
3216 skge
->advertising
= skge_supported_modes(hw
);
3218 hw
->dev
[port
] = dev
;
3222 spin_lock_init(&skge
->tx_lock
);
3224 if (hw
->chip_id
!= CHIP_ID_GENESIS
) {
3225 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
3229 /* read the mac address */
3230 memcpy_fromio(dev
->dev_addr
, hw
->regs
+ B2_MAC_1
+ port
*8, ETH_ALEN
);
3231 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
3233 /* device is off until link detection */
3234 netif_carrier_off(dev
);
3235 netif_stop_queue(dev
);
3240 static void __devinit
skge_show_addr(struct net_device
*dev
)
3242 const struct skge_port
*skge
= netdev_priv(dev
);
3244 if (netif_msg_probe(skge
))
3245 printk(KERN_INFO PFX
"%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3247 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
3248 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
3251 static int __devinit
skge_probe(struct pci_dev
*pdev
,
3252 const struct pci_device_id
*ent
)
3254 struct net_device
*dev
, *dev1
;
3256 int err
, using_dac
= 0;
3258 err
= pci_enable_device(pdev
);
3260 printk(KERN_ERR PFX
"%s cannot enable PCI device\n",
3265 err
= pci_request_regions(pdev
, DRV_NAME
);
3267 printk(KERN_ERR PFX
"%s cannot obtain PCI resources\n",
3269 goto err_out_disable_pdev
;
3272 pci_set_master(pdev
);
3274 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3276 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3277 } else if (!(err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))) {
3279 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3283 printk(KERN_ERR PFX
"%s no usable DMA configuration\n",
3285 goto err_out_free_regions
;
3289 /* byte swap descriptors in hardware */
3293 pci_read_config_dword(pdev
, PCI_DEV_REG2
, ®
);
3294 reg
|= PCI_REV_DESC
;
3295 pci_write_config_dword(pdev
, PCI_DEV_REG2
, reg
);
3300 hw
= kzalloc(sizeof(*hw
), GFP_KERNEL
);
3302 printk(KERN_ERR PFX
"%s: cannot allocate hardware struct\n",
3304 goto err_out_free_regions
;
3308 spin_lock_init(&hw
->phy_lock
);
3309 tasklet_init(&hw
->ext_tasklet
, skge_extirq
, (unsigned long) hw
);
3311 hw
->regs
= ioremap_nocache(pci_resource_start(pdev
, 0), 0x4000);
3313 printk(KERN_ERR PFX
"%s: cannot map device registers\n",
3315 goto err_out_free_hw
;
3318 err
= request_irq(pdev
->irq
, skge_intr
, SA_SHIRQ
, DRV_NAME
, hw
);
3320 printk(KERN_ERR PFX
"%s: cannot assign irq %d\n",
3321 pci_name(pdev
), pdev
->irq
);
3322 goto err_out_iounmap
;
3324 pci_set_drvdata(pdev
, hw
);
3326 err
= skge_reset(hw
);
3328 goto err_out_free_irq
;
3330 printk(KERN_INFO PFX DRV_VERSION
" addr 0x%lx irq %d chip %s rev %d\n",
3331 pci_resource_start(pdev
, 0), pdev
->irq
,
3332 skge_board_name(hw
), hw
->chip_rev
);
3334 if ((dev
= skge_devinit(hw
, 0, using_dac
)) == NULL
)
3335 goto err_out_led_off
;
3337 err
= register_netdev(dev
);
3339 printk(KERN_ERR PFX
"%s: cannot register net device\n",
3341 goto err_out_free_netdev
;
3344 skge_show_addr(dev
);
3346 if (hw
->ports
> 1 && (dev1
= skge_devinit(hw
, 1, using_dac
))) {
3347 if (register_netdev(dev1
) == 0)
3348 skge_show_addr(dev1
);
3350 /* Failure to register second port need not be fatal */
3351 printk(KERN_WARNING PFX
"register of second port failed\n");
3359 err_out_free_netdev
:
3362 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3364 free_irq(pdev
->irq
, hw
);
3369 err_out_free_regions
:
3370 pci_release_regions(pdev
);
3371 err_out_disable_pdev
:
3372 pci_disable_device(pdev
);
3373 pci_set_drvdata(pdev
, NULL
);
3378 static void __devexit
skge_remove(struct pci_dev
*pdev
)
3380 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3381 struct net_device
*dev0
, *dev1
;
3386 if ((dev1
= hw
->dev
[1]))
3387 unregister_netdev(dev1
);
3389 unregister_netdev(dev0
);
3391 skge_write32(hw
, B0_IMSK
, 0);
3392 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3393 skge_write8(hw
, B0_CTST
, CS_RST_SET
);
3395 tasklet_kill(&hw
->ext_tasklet
);
3397 free_irq(pdev
->irq
, hw
);
3398 pci_release_regions(pdev
);
3399 pci_disable_device(pdev
);
3406 pci_set_drvdata(pdev
, NULL
);
3410 static int skge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3412 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3415 for (i
= 0; i
< 2; i
++) {
3416 struct net_device
*dev
= hw
->dev
[i
];
3419 struct skge_port
*skge
= netdev_priv(dev
);
3420 if (netif_running(dev
)) {
3421 netif_carrier_off(dev
);
3423 netif_stop_queue(dev
);
3427 netif_device_detach(dev
);
3432 pci_save_state(pdev
);
3433 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), wol
);
3434 pci_disable_device(pdev
);
3435 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3440 static int skge_resume(struct pci_dev
*pdev
)
3442 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3445 pci_set_power_state(pdev
, PCI_D0
);
3446 pci_restore_state(pdev
);
3447 pci_enable_wake(pdev
, PCI_D0
, 0);
3451 for (i
= 0; i
< 2; i
++) {
3452 struct net_device
*dev
= hw
->dev
[i
];
3454 netif_device_attach(dev
);
3455 if (netif_running(dev
) && skge_up(dev
))
3463 static struct pci_driver skge_driver
= {
3465 .id_table
= skge_id_table
,
3466 .probe
= skge_probe
,
3467 .remove
= __devexit_p(skge_remove
),
3469 .suspend
= skge_suspend
,
3470 .resume
= skge_resume
,
3474 static int __init
skge_init_module(void)
3476 return pci_module_init(&skge_driver
);
3479 static void __exit
skge_cleanup_module(void)
3481 pci_unregister_driver(&skge_driver
);
3484 module_init(skge_init_module
);
3485 module_exit(skge_cleanup_module
);