USB: serial: don't call release without attach
[linux-2.6/next.git] / drivers / net / sungem.c
blob305ec3d783dba30dd396b2fcfb6df9ce12913384
1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver.
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
13 * TODO:
14 * - Now that the driver was significantly simplified, I need to rework
15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably
16 * can avoid taking most of them for so long period of time (and schedule
17 * instead). The main issues at this point are caused by the netdev layer
18 * though:
20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
21 * help by net/core/dev.c, thus they can't schedule. That means they can't
22 * call napi_disable() neither, thus force gem_poll() to keep a spinlock
23 * where it could have been dropped. change_mtu especially would love also to
24 * be able to msleep instead of horrid locked delays when resetting the HW,
25 * but that read_lock() makes it impossible, unless I defer it's action to
26 * the reset task, which means it'll be asynchronous (won't take effect until
27 * the system schedules a bit).
29 * Also, it would probably be possible to also remove most of the long-life
30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful
31 * about when we can start taking interrupts or get xmit() called...
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/interrupt.h>
39 #include <linux/ioport.h>
40 #include <linux/in.h>
41 #include <linux/slab.h>
42 #include <linux/string.h>
43 #include <linux/delay.h>
44 #include <linux/init.h>
45 #include <linux/errno.h>
46 #include <linux/pci.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
51 #include <linux/mii.h>
52 #include <linux/ethtool.h>
53 #include <linux/crc32.h>
54 #include <linux/random.h>
55 #include <linux/workqueue.h>
56 #include <linux/if_vlan.h>
57 #include <linux/bitops.h>
58 #include <linux/mutex.h>
59 #include <linux/mm.h>
61 #include <asm/system.h>
62 #include <asm/io.h>
63 #include <asm/byteorder.h>
64 #include <asm/uaccess.h>
65 #include <asm/irq.h>
67 #ifdef CONFIG_SPARC
68 #include <asm/idprom.h>
69 #include <asm/prom.h>
70 #endif
72 #ifdef CONFIG_PPC_PMAC
73 #include <asm/pci-bridge.h>
74 #include <asm/prom.h>
75 #include <asm/machdep.h>
76 #include <asm/pmac_feature.h>
77 #endif
79 #include "sungem_phy.h"
80 #include "sungem.h"
82 /* Stripping FCS is causing problems, disabled for now */
83 #undef STRIP_FCS
85 #define DEFAULT_MSG (NETIF_MSG_DRV | \
86 NETIF_MSG_PROBE | \
87 NETIF_MSG_LINK)
89 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
90 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
91 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
92 SUPPORTED_Pause | SUPPORTED_Autoneg)
94 #define DRV_NAME "sungem"
95 #define DRV_VERSION "0.98"
96 #define DRV_RELDATE "8/24/03"
97 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
99 static char version[] __devinitdata =
100 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
102 MODULE_AUTHOR(DRV_AUTHOR);
103 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
104 MODULE_LICENSE("GPL");
106 #define GEM_MODULE_NAME "gem"
107 #define PFX GEM_MODULE_NAME ": "
109 static struct pci_device_id gem_pci_tbl[] = {
110 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
113 /* These models only differ from the original GEM in
114 * that their tx/rx fifos are of a different size and
115 * they only support 10/100 speeds. -DaveM
117 * Apple's GMAC does support gigabit on machines with
118 * the BCM54xx PHYs. -BenH
120 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
122 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
124 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
126 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
128 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
130 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
132 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
134 {0, }
137 MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
139 static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
141 u32 cmd;
142 int limit = 10000;
144 cmd = (1 << 30);
145 cmd |= (2 << 28);
146 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
147 cmd |= (reg << 18) & MIF_FRAME_REGAD;
148 cmd |= (MIF_FRAME_TAMSB);
149 writel(cmd, gp->regs + MIF_FRAME);
151 while (--limit) {
152 cmd = readl(gp->regs + MIF_FRAME);
153 if (cmd & MIF_FRAME_TALSB)
154 break;
156 udelay(10);
159 if (!limit)
160 cmd = 0xffff;
162 return cmd & MIF_FRAME_DATA;
165 static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
167 struct gem *gp = netdev_priv(dev);
168 return __phy_read(gp, mii_id, reg);
171 static inline u16 phy_read(struct gem *gp, int reg)
173 return __phy_read(gp, gp->mii_phy_addr, reg);
176 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
178 u32 cmd;
179 int limit = 10000;
181 cmd = (1 << 30);
182 cmd |= (1 << 28);
183 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
184 cmd |= (reg << 18) & MIF_FRAME_REGAD;
185 cmd |= (MIF_FRAME_TAMSB);
186 cmd |= (val & MIF_FRAME_DATA);
187 writel(cmd, gp->regs + MIF_FRAME);
189 while (limit--) {
190 cmd = readl(gp->regs + MIF_FRAME);
191 if (cmd & MIF_FRAME_TALSB)
192 break;
194 udelay(10);
198 static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
200 struct gem *gp = netdev_priv(dev);
201 __phy_write(gp, mii_id, reg, val & 0xffff);
204 static inline void phy_write(struct gem *gp, int reg, u16 val)
206 __phy_write(gp, gp->mii_phy_addr, reg, val);
209 static inline void gem_enable_ints(struct gem *gp)
211 /* Enable all interrupts but TXDONE */
212 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
215 static inline void gem_disable_ints(struct gem *gp)
217 /* Disable all interrupts, including TXDONE */
218 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
221 static void gem_get_cell(struct gem *gp)
223 BUG_ON(gp->cell_enabled < 0);
224 gp->cell_enabled++;
225 #ifdef CONFIG_PPC_PMAC
226 if (gp->cell_enabled == 1) {
227 mb();
228 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
229 udelay(10);
231 #endif /* CONFIG_PPC_PMAC */
234 /* Turn off the chip's clock */
235 static void gem_put_cell(struct gem *gp)
237 BUG_ON(gp->cell_enabled <= 0);
238 gp->cell_enabled--;
239 #ifdef CONFIG_PPC_PMAC
240 if (gp->cell_enabled == 0) {
241 mb();
242 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
243 udelay(10);
245 #endif /* CONFIG_PPC_PMAC */
248 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
250 if (netif_msg_intr(gp))
251 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
254 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
256 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
257 u32 pcs_miistat;
259 if (netif_msg_intr(gp))
260 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
261 gp->dev->name, pcs_istat);
263 if (!(pcs_istat & PCS_ISTAT_LSC)) {
264 printk(KERN_ERR "%s: PCS irq but no link status change???\n",
265 dev->name);
266 return 0;
269 /* The link status bit latches on zero, so you must
270 * read it twice in such a case to see a transition
271 * to the link being up.
273 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
274 if (!(pcs_miistat & PCS_MIISTAT_LS))
275 pcs_miistat |=
276 (readl(gp->regs + PCS_MIISTAT) &
277 PCS_MIISTAT_LS);
279 if (pcs_miistat & PCS_MIISTAT_ANC) {
280 /* The remote-fault indication is only valid
281 * when autoneg has completed.
283 if (pcs_miistat & PCS_MIISTAT_RF)
284 printk(KERN_INFO "%s: PCS AutoNEG complete, "
285 "RemoteFault\n", dev->name);
286 else
287 printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
288 dev->name);
291 if (pcs_miistat & PCS_MIISTAT_LS) {
292 printk(KERN_INFO "%s: PCS link is now up.\n",
293 dev->name);
294 netif_carrier_on(gp->dev);
295 } else {
296 printk(KERN_INFO "%s: PCS link is now down.\n",
297 dev->name);
298 netif_carrier_off(gp->dev);
299 /* If this happens and the link timer is not running,
300 * reset so we re-negotiate.
302 if (!timer_pending(&gp->link_timer))
303 return 1;
306 return 0;
309 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
311 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
313 if (netif_msg_intr(gp))
314 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
315 gp->dev->name, txmac_stat);
317 /* Defer timer expiration is quite normal,
318 * don't even log the event.
320 if ((txmac_stat & MAC_TXSTAT_DTE) &&
321 !(txmac_stat & ~MAC_TXSTAT_DTE))
322 return 0;
324 if (txmac_stat & MAC_TXSTAT_URUN) {
325 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
326 dev->name);
327 gp->net_stats.tx_fifo_errors++;
330 if (txmac_stat & MAC_TXSTAT_MPE) {
331 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
332 dev->name);
333 gp->net_stats.tx_errors++;
336 /* The rest are all cases of one of the 16-bit TX
337 * counters expiring.
339 if (txmac_stat & MAC_TXSTAT_NCE)
340 gp->net_stats.collisions += 0x10000;
342 if (txmac_stat & MAC_TXSTAT_ECE) {
343 gp->net_stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000;
347 if (txmac_stat & MAC_TXSTAT_LCE) {
348 gp->net_stats.tx_aborted_errors += 0x10000;
349 gp->net_stats.collisions += 0x10000;
352 /* We do not keep track of MAC_TXSTAT_FCE and
353 * MAC_TXSTAT_PCE events.
355 return 0;
358 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
359 * so we do the following.
361 * If any part of the reset goes wrong, we return 1 and that causes the
362 * whole chip to be reset.
364 static int gem_rxmac_reset(struct gem *gp)
366 struct net_device *dev = gp->dev;
367 int limit, i;
368 u64 desc_dma;
369 u32 val;
371 /* First, reset & disable MAC RX. */
372 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
373 for (limit = 0; limit < 5000; limit++) {
374 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
375 break;
376 udelay(10);
378 if (limit == 5000) {
379 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
380 "chip.\n", dev->name);
381 return 1;
384 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
385 gp->regs + MAC_RXCFG);
386 for (limit = 0; limit < 5000; limit++) {
387 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
388 break;
389 udelay(10);
391 if (limit == 5000) {
392 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
393 "chip.\n", dev->name);
394 return 1;
397 /* Second, disable RX DMA. */
398 writel(0, gp->regs + RXDMA_CFG);
399 for (limit = 0; limit < 5000; limit++) {
400 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
401 break;
402 udelay(10);
404 if (limit == 5000) {
405 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
406 "chip.\n", dev->name);
407 return 1;
410 udelay(5000);
412 /* Execute RX reset command. */
413 writel(gp->swrst_base | GREG_SWRST_RXRST,
414 gp->regs + GREG_SWRST);
415 for (limit = 0; limit < 5000; limit++) {
416 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
417 break;
418 udelay(10);
420 if (limit == 5000) {
421 printk(KERN_ERR "%s: RX reset command will not execute, resetting "
422 "whole chip.\n", dev->name);
423 return 1;
426 /* Refresh the RX ring. */
427 for (i = 0; i < RX_RING_SIZE; i++) {
428 struct gem_rxd *rxd = &gp->init_block->rxd[i];
430 if (gp->rx_skbs[i] == NULL) {
431 printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
432 "whole chip.\n", dev->name);
433 return 1;
436 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
438 gp->rx_new = gp->rx_old = 0;
440 /* Now we must reprogram the rest of RX unit. */
441 desc_dma = (u64) gp->gblock_dvma;
442 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
443 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
444 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
445 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
446 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
447 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
448 writel(val, gp->regs + RXDMA_CFG);
449 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
450 writel(((5 & RXDMA_BLANK_IPKTS) |
451 ((8 << 12) & RXDMA_BLANK_ITIME)),
452 gp->regs + RXDMA_BLANK);
453 else
454 writel(((5 & RXDMA_BLANK_IPKTS) |
455 ((4 << 12) & RXDMA_BLANK_ITIME)),
456 gp->regs + RXDMA_BLANK);
457 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
458 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
459 writel(val, gp->regs + RXDMA_PTHRESH);
460 val = readl(gp->regs + RXDMA_CFG);
461 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
462 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
463 val = readl(gp->regs + MAC_RXCFG);
464 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
466 return 0;
469 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
471 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
472 int ret = 0;
474 if (netif_msg_intr(gp))
475 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
476 gp->dev->name, rxmac_stat);
478 if (rxmac_stat & MAC_RXSTAT_OFLW) {
479 u32 smac = readl(gp->regs + MAC_SMACHINE);
481 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
482 dev->name, smac);
483 gp->net_stats.rx_over_errors++;
484 gp->net_stats.rx_fifo_errors++;
486 ret = gem_rxmac_reset(gp);
489 if (rxmac_stat & MAC_RXSTAT_ACE)
490 gp->net_stats.rx_frame_errors += 0x10000;
492 if (rxmac_stat & MAC_RXSTAT_CCE)
493 gp->net_stats.rx_crc_errors += 0x10000;
495 if (rxmac_stat & MAC_RXSTAT_LCE)
496 gp->net_stats.rx_length_errors += 0x10000;
498 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
499 * events.
501 return ret;
504 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
506 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
508 if (netif_msg_intr(gp))
509 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
510 gp->dev->name, mac_cstat);
512 /* This interrupt is just for pause frame and pause
513 * tracking. It is useful for diagnostics and debug
514 * but probably by default we will mask these events.
516 if (mac_cstat & MAC_CSTAT_PS)
517 gp->pause_entered++;
519 if (mac_cstat & MAC_CSTAT_PRCV)
520 gp->pause_last_time_recvd = (mac_cstat >> 16);
522 return 0;
525 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
527 u32 mif_status = readl(gp->regs + MIF_STATUS);
528 u32 reg_val, changed_bits;
530 reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
531 changed_bits = (mif_status & MIF_STATUS_STAT);
533 gem_handle_mif_event(gp, reg_val, changed_bits);
535 return 0;
538 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
540 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
542 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
543 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
544 printk(KERN_ERR "%s: PCI error [%04x] ",
545 dev->name, pci_estat);
547 if (pci_estat & GREG_PCIESTAT_BADACK)
548 printk("<No ACK64# during ABS64 cycle> ");
549 if (pci_estat & GREG_PCIESTAT_DTRTO)
550 printk("<Delayed transaction timeout> ");
551 if (pci_estat & GREG_PCIESTAT_OTHER)
552 printk("<other>");
553 printk("\n");
554 } else {
555 pci_estat |= GREG_PCIESTAT_OTHER;
556 printk(KERN_ERR "%s: PCI error\n", dev->name);
559 if (pci_estat & GREG_PCIESTAT_OTHER) {
560 u16 pci_cfg_stat;
562 /* Interrogate PCI config space for the
563 * true cause.
565 pci_read_config_word(gp->pdev, PCI_STATUS,
566 &pci_cfg_stat);
567 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
568 dev->name, pci_cfg_stat);
569 if (pci_cfg_stat & PCI_STATUS_PARITY)
570 printk(KERN_ERR "%s: PCI parity error detected.\n",
571 dev->name);
572 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
573 printk(KERN_ERR "%s: PCI target abort.\n",
574 dev->name);
575 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
576 printk(KERN_ERR "%s: PCI master acks target abort.\n",
577 dev->name);
578 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
579 printk(KERN_ERR "%s: PCI master abort.\n",
580 dev->name);
581 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
582 printk(KERN_ERR "%s: PCI system error SERR#.\n",
583 dev->name);
584 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
585 printk(KERN_ERR "%s: PCI parity error.\n",
586 dev->name);
588 /* Write the error bits back to clear them. */
589 pci_cfg_stat &= (PCI_STATUS_PARITY |
590 PCI_STATUS_SIG_TARGET_ABORT |
591 PCI_STATUS_REC_TARGET_ABORT |
592 PCI_STATUS_REC_MASTER_ABORT |
593 PCI_STATUS_SIG_SYSTEM_ERROR |
594 PCI_STATUS_DETECTED_PARITY);
595 pci_write_config_word(gp->pdev,
596 PCI_STATUS, pci_cfg_stat);
599 /* For all PCI errors, we should reset the chip. */
600 return 1;
603 /* All non-normal interrupt conditions get serviced here.
604 * Returns non-zero if we should just exit the interrupt
605 * handler right now (ie. if we reset the card which invalidates
606 * all of the other original irq status bits).
608 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
610 if (gem_status & GREG_STAT_RXNOBUF) {
611 /* Frame arrived, no free RX buffers available. */
612 if (netif_msg_rx_err(gp))
613 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
614 gp->dev->name);
615 gp->net_stats.rx_dropped++;
618 if (gem_status & GREG_STAT_RXTAGERR) {
619 /* corrupt RX tag framing */
620 if (netif_msg_rx_err(gp))
621 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
622 gp->dev->name);
623 gp->net_stats.rx_errors++;
625 goto do_reset;
628 if (gem_status & GREG_STAT_PCS) {
629 if (gem_pcs_interrupt(dev, gp, gem_status))
630 goto do_reset;
633 if (gem_status & GREG_STAT_TXMAC) {
634 if (gem_txmac_interrupt(dev, gp, gem_status))
635 goto do_reset;
638 if (gem_status & GREG_STAT_RXMAC) {
639 if (gem_rxmac_interrupt(dev, gp, gem_status))
640 goto do_reset;
643 if (gem_status & GREG_STAT_MAC) {
644 if (gem_mac_interrupt(dev, gp, gem_status))
645 goto do_reset;
648 if (gem_status & GREG_STAT_MIF) {
649 if (gem_mif_interrupt(dev, gp, gem_status))
650 goto do_reset;
653 if (gem_status & GREG_STAT_PCIERR) {
654 if (gem_pci_interrupt(dev, gp, gem_status))
655 goto do_reset;
658 return 0;
660 do_reset:
661 gp->reset_task_pending = 1;
662 schedule_work(&gp->reset_task);
664 return 1;
667 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
669 int entry, limit;
671 if (netif_msg_intr(gp))
672 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
673 gp->dev->name, gem_status);
675 entry = gp->tx_old;
676 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
677 while (entry != limit) {
678 struct sk_buff *skb;
679 struct gem_txd *txd;
680 dma_addr_t dma_addr;
681 u32 dma_len;
682 int frag;
684 if (netif_msg_tx_done(gp))
685 printk(KERN_DEBUG "%s: tx done, slot %d\n",
686 gp->dev->name, entry);
687 skb = gp->tx_skbs[entry];
688 if (skb_shinfo(skb)->nr_frags) {
689 int last = entry + skb_shinfo(skb)->nr_frags;
690 int walk = entry;
691 int incomplete = 0;
693 last &= (TX_RING_SIZE - 1);
694 for (;;) {
695 walk = NEXT_TX(walk);
696 if (walk == limit)
697 incomplete = 1;
698 if (walk == last)
699 break;
701 if (incomplete)
702 break;
704 gp->tx_skbs[entry] = NULL;
705 gp->net_stats.tx_bytes += skb->len;
707 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
708 txd = &gp->init_block->txd[entry];
710 dma_addr = le64_to_cpu(txd->buffer);
711 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
713 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
714 entry = NEXT_TX(entry);
717 gp->net_stats.tx_packets++;
718 dev_kfree_skb_irq(skb);
720 gp->tx_old = entry;
722 if (netif_queue_stopped(dev) &&
723 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
724 netif_wake_queue(dev);
727 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
729 int cluster_start, curr, count, kick;
731 cluster_start = curr = (gp->rx_new & ~(4 - 1));
732 count = 0;
733 kick = -1;
734 wmb();
735 while (curr != limit) {
736 curr = NEXT_RX(curr);
737 if (++count == 4) {
738 struct gem_rxd *rxd =
739 &gp->init_block->rxd[cluster_start];
740 for (;;) {
741 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
742 rxd++;
743 cluster_start = NEXT_RX(cluster_start);
744 if (cluster_start == curr)
745 break;
747 kick = curr;
748 count = 0;
751 if (kick >= 0) {
752 mb();
753 writel(kick, gp->regs + RXDMA_KICK);
757 static int gem_rx(struct gem *gp, int work_to_do)
759 int entry, drops, work_done = 0;
760 u32 done;
761 __sum16 csum;
763 if (netif_msg_rx_status(gp))
764 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
765 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
767 entry = gp->rx_new;
768 drops = 0;
769 done = readl(gp->regs + RXDMA_DONE);
770 for (;;) {
771 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
772 struct sk_buff *skb;
773 u64 status = le64_to_cpu(rxd->status_word);
774 dma_addr_t dma_addr;
775 int len;
777 if ((status & RXDCTRL_OWN) != 0)
778 break;
780 if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
781 break;
783 /* When writing back RX descriptor, GEM writes status
784 * then buffer address, possibly in seperate transactions.
785 * If we don't wait for the chip to write both, we could
786 * post a new buffer to this descriptor then have GEM spam
787 * on the buffer address. We sync on the RX completion
788 * register to prevent this from happening.
790 if (entry == done) {
791 done = readl(gp->regs + RXDMA_DONE);
792 if (entry == done)
793 break;
796 /* We can now account for the work we're about to do */
797 work_done++;
799 skb = gp->rx_skbs[entry];
801 len = (status & RXDCTRL_BUFSZ) >> 16;
802 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
803 gp->net_stats.rx_errors++;
804 if (len < ETH_ZLEN)
805 gp->net_stats.rx_length_errors++;
806 if (len & RXDCTRL_BAD)
807 gp->net_stats.rx_crc_errors++;
809 /* We'll just return it to GEM. */
810 drop_it:
811 gp->net_stats.rx_dropped++;
812 goto next;
815 dma_addr = le64_to_cpu(rxd->buffer);
816 if (len > RX_COPY_THRESHOLD) {
817 struct sk_buff *new_skb;
819 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
820 if (new_skb == NULL) {
821 drops++;
822 goto drop_it;
824 pci_unmap_page(gp->pdev, dma_addr,
825 RX_BUF_ALLOC_SIZE(gp),
826 PCI_DMA_FROMDEVICE);
827 gp->rx_skbs[entry] = new_skb;
828 new_skb->dev = gp->dev;
829 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
830 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
831 virt_to_page(new_skb->data),
832 offset_in_page(new_skb->data),
833 RX_BUF_ALLOC_SIZE(gp),
834 PCI_DMA_FROMDEVICE));
835 skb_reserve(new_skb, RX_OFFSET);
837 /* Trim the original skb for the netif. */
838 skb_trim(skb, len);
839 } else {
840 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
842 if (copy_skb == NULL) {
843 drops++;
844 goto drop_it;
847 skb_reserve(copy_skb, 2);
848 skb_put(copy_skb, len);
849 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
850 skb_copy_from_linear_data(skb, copy_skb->data, len);
851 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
853 /* We'll reuse the original ring buffer. */
854 skb = copy_skb;
857 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
858 skb->csum = csum_unfold(csum);
859 skb->ip_summed = CHECKSUM_COMPLETE;
860 skb->protocol = eth_type_trans(skb, gp->dev);
862 netif_receive_skb(skb);
864 gp->net_stats.rx_packets++;
865 gp->net_stats.rx_bytes += len;
867 next:
868 entry = NEXT_RX(entry);
871 gem_post_rxds(gp, entry);
873 gp->rx_new = entry;
875 if (drops)
876 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
877 gp->dev->name);
879 return work_done;
882 static int gem_poll(struct napi_struct *napi, int budget)
884 struct gem *gp = container_of(napi, struct gem, napi);
885 struct net_device *dev = gp->dev;
886 unsigned long flags;
887 int work_done;
890 * NAPI locking nightmare: See comment at head of driver
892 spin_lock_irqsave(&gp->lock, flags);
894 work_done = 0;
895 do {
896 /* Handle anomalies */
897 if (gp->status & GREG_STAT_ABNORMAL) {
898 if (gem_abnormal_irq(dev, gp, gp->status))
899 break;
902 /* Run TX completion thread */
903 spin_lock(&gp->tx_lock);
904 gem_tx(dev, gp, gp->status);
905 spin_unlock(&gp->tx_lock);
907 spin_unlock_irqrestore(&gp->lock, flags);
909 /* Run RX thread. We don't use any locking here,
910 * code willing to do bad things - like cleaning the
911 * rx ring - must call napi_disable(), which
912 * schedule_timeout()'s if polling is already disabled.
914 work_done += gem_rx(gp, budget - work_done);
916 if (work_done >= budget)
917 return work_done;
919 spin_lock_irqsave(&gp->lock, flags);
921 gp->status = readl(gp->regs + GREG_STAT);
922 } while (gp->status & GREG_STAT_NAPI);
924 __napi_complete(napi);
925 gem_enable_ints(gp);
927 spin_unlock_irqrestore(&gp->lock, flags);
929 return work_done;
932 static irqreturn_t gem_interrupt(int irq, void *dev_id)
934 struct net_device *dev = dev_id;
935 struct gem *gp = netdev_priv(dev);
936 unsigned long flags;
938 /* Swallow interrupts when shutting the chip down, though
939 * that shouldn't happen, we should have done free_irq() at
940 * this point...
942 if (!gp->running)
943 return IRQ_HANDLED;
945 spin_lock_irqsave(&gp->lock, flags);
947 if (napi_schedule_prep(&gp->napi)) {
948 u32 gem_status = readl(gp->regs + GREG_STAT);
950 if (gem_status == 0) {
951 napi_enable(&gp->napi);
952 spin_unlock_irqrestore(&gp->lock, flags);
953 return IRQ_NONE;
955 gp->status = gem_status;
956 gem_disable_ints(gp);
957 __napi_schedule(&gp->napi);
960 spin_unlock_irqrestore(&gp->lock, flags);
962 /* If polling was disabled at the time we received that
963 * interrupt, we may return IRQ_HANDLED here while we
964 * should return IRQ_NONE. No big deal...
966 return IRQ_HANDLED;
969 #ifdef CONFIG_NET_POLL_CONTROLLER
970 static void gem_poll_controller(struct net_device *dev)
972 /* gem_interrupt is safe to reentrance so no need
973 * to disable_irq here.
975 gem_interrupt(dev->irq, dev);
977 #endif
979 static void gem_tx_timeout(struct net_device *dev)
981 struct gem *gp = netdev_priv(dev);
983 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
984 if (!gp->running) {
985 printk("%s: hrm.. hw not running !\n", dev->name);
986 return;
988 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
989 dev->name,
990 readl(gp->regs + TXDMA_CFG),
991 readl(gp->regs + MAC_TXSTAT),
992 readl(gp->regs + MAC_TXCFG));
993 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
994 dev->name,
995 readl(gp->regs + RXDMA_CFG),
996 readl(gp->regs + MAC_RXSTAT),
997 readl(gp->regs + MAC_RXCFG));
999 spin_lock_irq(&gp->lock);
1000 spin_lock(&gp->tx_lock);
1002 gp->reset_task_pending = 1;
1003 schedule_work(&gp->reset_task);
1005 spin_unlock(&gp->tx_lock);
1006 spin_unlock_irq(&gp->lock);
1009 static __inline__ int gem_intme(int entry)
1011 /* Algorithm: IRQ every 1/2 of descriptors. */
1012 if (!(entry & ((TX_RING_SIZE>>1)-1)))
1013 return 1;
1015 return 0;
1018 static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1019 struct net_device *dev)
1021 struct gem *gp = netdev_priv(dev);
1022 int entry;
1023 u64 ctrl;
1024 unsigned long flags;
1026 ctrl = 0;
1027 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1028 const u64 csum_start_off = skb_transport_offset(skb);
1029 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1031 ctrl = (TXDCTRL_CENAB |
1032 (csum_start_off << 15) |
1033 (csum_stuff_off << 21));
1036 local_irq_save(flags);
1037 if (!spin_trylock(&gp->tx_lock)) {
1038 /* Tell upper layer to requeue */
1039 local_irq_restore(flags);
1040 return NETDEV_TX_LOCKED;
1042 /* We raced with gem_do_stop() */
1043 if (!gp->running) {
1044 spin_unlock_irqrestore(&gp->tx_lock, flags);
1045 return NETDEV_TX_BUSY;
1048 /* This is a hard error, log it. */
1049 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1050 netif_stop_queue(dev);
1051 spin_unlock_irqrestore(&gp->tx_lock, flags);
1052 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
1053 dev->name);
1054 return NETDEV_TX_BUSY;
1057 entry = gp->tx_new;
1058 gp->tx_skbs[entry] = skb;
1060 if (skb_shinfo(skb)->nr_frags == 0) {
1061 struct gem_txd *txd = &gp->init_block->txd[entry];
1062 dma_addr_t mapping;
1063 u32 len;
1065 len = skb->len;
1066 mapping = pci_map_page(gp->pdev,
1067 virt_to_page(skb->data),
1068 offset_in_page(skb->data),
1069 len, PCI_DMA_TODEVICE);
1070 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1071 if (gem_intme(entry))
1072 ctrl |= TXDCTRL_INTME;
1073 txd->buffer = cpu_to_le64(mapping);
1074 wmb();
1075 txd->control_word = cpu_to_le64(ctrl);
1076 entry = NEXT_TX(entry);
1077 } else {
1078 struct gem_txd *txd;
1079 u32 first_len;
1080 u64 intme;
1081 dma_addr_t first_mapping;
1082 int frag, first_entry = entry;
1084 intme = 0;
1085 if (gem_intme(entry))
1086 intme |= TXDCTRL_INTME;
1088 /* We must give this initial chunk to the device last.
1089 * Otherwise we could race with the device.
1091 first_len = skb_headlen(skb);
1092 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1093 offset_in_page(skb->data),
1094 first_len, PCI_DMA_TODEVICE);
1095 entry = NEXT_TX(entry);
1097 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1098 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1099 u32 len;
1100 dma_addr_t mapping;
1101 u64 this_ctrl;
1103 len = this_frag->size;
1104 mapping = pci_map_page(gp->pdev,
1105 this_frag->page,
1106 this_frag->page_offset,
1107 len, PCI_DMA_TODEVICE);
1108 this_ctrl = ctrl;
1109 if (frag == skb_shinfo(skb)->nr_frags - 1)
1110 this_ctrl |= TXDCTRL_EOF;
1112 txd = &gp->init_block->txd[entry];
1113 txd->buffer = cpu_to_le64(mapping);
1114 wmb();
1115 txd->control_word = cpu_to_le64(this_ctrl | len);
1117 if (gem_intme(entry))
1118 intme |= TXDCTRL_INTME;
1120 entry = NEXT_TX(entry);
1122 txd = &gp->init_block->txd[first_entry];
1123 txd->buffer = cpu_to_le64(first_mapping);
1124 wmb();
1125 txd->control_word =
1126 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1129 gp->tx_new = entry;
1130 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
1131 netif_stop_queue(dev);
1133 if (netif_msg_tx_queued(gp))
1134 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1135 dev->name, entry, skb->len);
1136 mb();
1137 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1138 spin_unlock_irqrestore(&gp->tx_lock, flags);
1140 dev->trans_start = jiffies;
1142 return NETDEV_TX_OK;
1145 static void gem_pcs_reset(struct gem *gp)
1147 int limit;
1148 u32 val;
1150 /* Reset PCS unit. */
1151 val = readl(gp->regs + PCS_MIICTRL);
1152 val |= PCS_MIICTRL_RST;
1153 writel(val, gp->regs + PCS_MIICTRL);
1155 limit = 32;
1156 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1157 udelay(100);
1158 if (limit-- <= 0)
1159 break;
1161 if (limit < 0)
1162 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1163 gp->dev->name);
1166 static void gem_pcs_reinit_adv(struct gem *gp)
1168 u32 val;
1170 /* Make sure PCS is disabled while changing advertisement
1171 * configuration.
1173 val = readl(gp->regs + PCS_CFG);
1174 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1175 writel(val, gp->regs + PCS_CFG);
1177 /* Advertise all capabilities except assymetric
1178 * pause.
1180 val = readl(gp->regs + PCS_MIIADV);
1181 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1182 PCS_MIIADV_SP | PCS_MIIADV_AP);
1183 writel(val, gp->regs + PCS_MIIADV);
1185 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1186 * and re-enable PCS.
1188 val = readl(gp->regs + PCS_MIICTRL);
1189 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1190 val &= ~PCS_MIICTRL_WB;
1191 writel(val, gp->regs + PCS_MIICTRL);
1193 val = readl(gp->regs + PCS_CFG);
1194 val |= PCS_CFG_ENABLE;
1195 writel(val, gp->regs + PCS_CFG);
1197 /* Make sure serialink loopback is off. The meaning
1198 * of this bit is logically inverted based upon whether
1199 * you are in Serialink or SERDES mode.
1201 val = readl(gp->regs + PCS_SCTRL);
1202 if (gp->phy_type == phy_serialink)
1203 val &= ~PCS_SCTRL_LOOP;
1204 else
1205 val |= PCS_SCTRL_LOOP;
1206 writel(val, gp->regs + PCS_SCTRL);
1209 #define STOP_TRIES 32
1211 /* Must be invoked under gp->lock and gp->tx_lock. */
1212 static void gem_reset(struct gem *gp)
1214 int limit;
1215 u32 val;
1217 /* Make sure we won't get any more interrupts */
1218 writel(0xffffffff, gp->regs + GREG_IMASK);
1220 /* Reset the chip */
1221 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1222 gp->regs + GREG_SWRST);
1224 limit = STOP_TRIES;
1226 do {
1227 udelay(20);
1228 val = readl(gp->regs + GREG_SWRST);
1229 if (limit-- <= 0)
1230 break;
1231 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1233 if (limit < 0)
1234 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1236 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1237 gem_pcs_reinit_adv(gp);
1240 /* Must be invoked under gp->lock and gp->tx_lock. */
1241 static void gem_start_dma(struct gem *gp)
1243 u32 val;
1245 /* We are ready to rock, turn everything on. */
1246 val = readl(gp->regs + TXDMA_CFG);
1247 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1248 val = readl(gp->regs + RXDMA_CFG);
1249 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1250 val = readl(gp->regs + MAC_TXCFG);
1251 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1252 val = readl(gp->regs + MAC_RXCFG);
1253 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1255 (void) readl(gp->regs + MAC_RXCFG);
1256 udelay(100);
1258 gem_enable_ints(gp);
1260 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1263 /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1264 * actually stopped before about 4ms tho ...
1266 static void gem_stop_dma(struct gem *gp)
1268 u32 val;
1270 /* We are done rocking, turn everything off. */
1271 val = readl(gp->regs + TXDMA_CFG);
1272 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1273 val = readl(gp->regs + RXDMA_CFG);
1274 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1275 val = readl(gp->regs + MAC_TXCFG);
1276 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1277 val = readl(gp->regs + MAC_RXCFG);
1278 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1280 (void) readl(gp->regs + MAC_RXCFG);
1282 /* Need to wait a bit ... done by the caller */
1286 /* Must be invoked under gp->lock and gp->tx_lock. */
1287 // XXX dbl check what that function should do when called on PCS PHY
1288 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1290 u32 advertise, features;
1291 int autoneg;
1292 int speed;
1293 int duplex;
1295 if (gp->phy_type != phy_mii_mdio0 &&
1296 gp->phy_type != phy_mii_mdio1)
1297 goto non_mii;
1299 /* Setup advertise */
1300 if (found_mii_phy(gp))
1301 features = gp->phy_mii.def->features;
1302 else
1303 features = 0;
1305 advertise = features & ADVERTISE_MASK;
1306 if (gp->phy_mii.advertising != 0)
1307 advertise &= gp->phy_mii.advertising;
1309 autoneg = gp->want_autoneg;
1310 speed = gp->phy_mii.speed;
1311 duplex = gp->phy_mii.duplex;
1313 /* Setup link parameters */
1314 if (!ep)
1315 goto start_aneg;
1316 if (ep->autoneg == AUTONEG_ENABLE) {
1317 advertise = ep->advertising;
1318 autoneg = 1;
1319 } else {
1320 autoneg = 0;
1321 speed = ep->speed;
1322 duplex = ep->duplex;
1325 start_aneg:
1326 /* Sanitize settings based on PHY capabilities */
1327 if ((features & SUPPORTED_Autoneg) == 0)
1328 autoneg = 0;
1329 if (speed == SPEED_1000 &&
1330 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1331 speed = SPEED_100;
1332 if (speed == SPEED_100 &&
1333 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1334 speed = SPEED_10;
1335 if (duplex == DUPLEX_FULL &&
1336 !(features & (SUPPORTED_1000baseT_Full |
1337 SUPPORTED_100baseT_Full |
1338 SUPPORTED_10baseT_Full)))
1339 duplex = DUPLEX_HALF;
1340 if (speed == 0)
1341 speed = SPEED_10;
1343 /* If we are asleep, we don't try to actually setup the PHY, we
1344 * just store the settings
1346 if (gp->asleep) {
1347 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1348 gp->phy_mii.speed = speed;
1349 gp->phy_mii.duplex = duplex;
1350 return;
1353 /* Configure PHY & start aneg */
1354 gp->want_autoneg = autoneg;
1355 if (autoneg) {
1356 if (found_mii_phy(gp))
1357 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1358 gp->lstate = link_aneg;
1359 } else {
1360 if (found_mii_phy(gp))
1361 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1362 gp->lstate = link_force_ok;
1365 non_mii:
1366 gp->timer_ticks = 0;
1367 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1370 /* A link-up condition has occurred, initialize and enable the
1371 * rest of the chip.
1373 * Must be invoked under gp->lock and gp->tx_lock.
1375 static int gem_set_link_modes(struct gem *gp)
1377 u32 val;
1378 int full_duplex, speed, pause;
1380 full_duplex = 0;
1381 speed = SPEED_10;
1382 pause = 0;
1384 if (found_mii_phy(gp)) {
1385 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1386 return 1;
1387 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1388 speed = gp->phy_mii.speed;
1389 pause = gp->phy_mii.pause;
1390 } else if (gp->phy_type == phy_serialink ||
1391 gp->phy_type == phy_serdes) {
1392 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1394 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1395 full_duplex = 1;
1396 speed = SPEED_1000;
1399 if (netif_msg_link(gp))
1400 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1401 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1403 if (!gp->running)
1404 return 0;
1406 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1407 if (full_duplex) {
1408 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1409 } else {
1410 /* MAC_TXCFG_NBO must be zero. */
1412 writel(val, gp->regs + MAC_TXCFG);
1414 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1415 if (!full_duplex &&
1416 (gp->phy_type == phy_mii_mdio0 ||
1417 gp->phy_type == phy_mii_mdio1)) {
1418 val |= MAC_XIFCFG_DISE;
1419 } else if (full_duplex) {
1420 val |= MAC_XIFCFG_FLED;
1423 if (speed == SPEED_1000)
1424 val |= (MAC_XIFCFG_GMII);
1426 writel(val, gp->regs + MAC_XIFCFG);
1428 /* If gigabit and half-duplex, enable carrier extension
1429 * mode. Else, disable it.
1431 if (speed == SPEED_1000 && !full_duplex) {
1432 val = readl(gp->regs + MAC_TXCFG);
1433 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1435 val = readl(gp->regs + MAC_RXCFG);
1436 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1437 } else {
1438 val = readl(gp->regs + MAC_TXCFG);
1439 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1441 val = readl(gp->regs + MAC_RXCFG);
1442 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1445 if (gp->phy_type == phy_serialink ||
1446 gp->phy_type == phy_serdes) {
1447 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1449 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1450 pause = 1;
1453 if (netif_msg_link(gp)) {
1454 if (pause) {
1455 printk(KERN_INFO "%s: Pause is enabled "
1456 "(rxfifo: %d off: %d on: %d)\n",
1457 gp->dev->name,
1458 gp->rx_fifo_sz,
1459 gp->rx_pause_off,
1460 gp->rx_pause_on);
1461 } else {
1462 printk(KERN_INFO "%s: Pause is disabled\n",
1463 gp->dev->name);
1467 if (!full_duplex)
1468 writel(512, gp->regs + MAC_STIME);
1469 else
1470 writel(64, gp->regs + MAC_STIME);
1471 val = readl(gp->regs + MAC_MCCFG);
1472 if (pause)
1473 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1474 else
1475 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1476 writel(val, gp->regs + MAC_MCCFG);
1478 gem_start_dma(gp);
1480 return 0;
1483 /* Must be invoked under gp->lock and gp->tx_lock. */
1484 static int gem_mdio_link_not_up(struct gem *gp)
1486 switch (gp->lstate) {
1487 case link_force_ret:
1488 if (netif_msg_link(gp))
1489 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1490 " forced mode\n", gp->dev->name);
1491 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1492 gp->last_forced_speed, DUPLEX_HALF);
1493 gp->timer_ticks = 5;
1494 gp->lstate = link_force_ok;
1495 return 0;
1496 case link_aneg:
1497 /* We try forced modes after a failed aneg only on PHYs that don't
1498 * have "magic_aneg" bit set, which means they internally do the
1499 * while forced-mode thingy. On these, we just restart aneg
1501 if (gp->phy_mii.def->magic_aneg)
1502 return 1;
1503 if (netif_msg_link(gp))
1504 printk(KERN_INFO "%s: switching to forced 100bt\n",
1505 gp->dev->name);
1506 /* Try forced modes. */
1507 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1508 DUPLEX_HALF);
1509 gp->timer_ticks = 5;
1510 gp->lstate = link_force_try;
1511 return 0;
1512 case link_force_try:
1513 /* Downgrade from 100 to 10 Mbps if necessary.
1514 * If already at 10Mbps, warn user about the
1515 * situation every 10 ticks.
1517 if (gp->phy_mii.speed == SPEED_100) {
1518 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1519 DUPLEX_HALF);
1520 gp->timer_ticks = 5;
1521 if (netif_msg_link(gp))
1522 printk(KERN_INFO "%s: switching to forced 10bt\n",
1523 gp->dev->name);
1524 return 0;
1525 } else
1526 return 1;
1527 default:
1528 return 0;
1532 static void gem_link_timer(unsigned long data)
1534 struct gem *gp = (struct gem *) data;
1535 int restart_aneg = 0;
1537 if (gp->asleep)
1538 return;
1540 spin_lock_irq(&gp->lock);
1541 spin_lock(&gp->tx_lock);
1542 gem_get_cell(gp);
1544 /* If the reset task is still pending, we just
1545 * reschedule the link timer
1547 if (gp->reset_task_pending)
1548 goto restart;
1550 if (gp->phy_type == phy_serialink ||
1551 gp->phy_type == phy_serdes) {
1552 u32 val = readl(gp->regs + PCS_MIISTAT);
1554 if (!(val & PCS_MIISTAT_LS))
1555 val = readl(gp->regs + PCS_MIISTAT);
1557 if ((val & PCS_MIISTAT_LS) != 0) {
1558 if (gp->lstate == link_up)
1559 goto restart;
1561 gp->lstate = link_up;
1562 netif_carrier_on(gp->dev);
1563 (void)gem_set_link_modes(gp);
1565 goto restart;
1567 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1568 /* Ok, here we got a link. If we had it due to a forced
1569 * fallback, and we were configured for autoneg, we do
1570 * retry a short autoneg pass. If you know your hub is
1571 * broken, use ethtool ;)
1573 if (gp->lstate == link_force_try && gp->want_autoneg) {
1574 gp->lstate = link_force_ret;
1575 gp->last_forced_speed = gp->phy_mii.speed;
1576 gp->timer_ticks = 5;
1577 if (netif_msg_link(gp))
1578 printk(KERN_INFO "%s: Got link after fallback, retrying"
1579 " autoneg once...\n", gp->dev->name);
1580 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1581 } else if (gp->lstate != link_up) {
1582 gp->lstate = link_up;
1583 netif_carrier_on(gp->dev);
1584 if (gem_set_link_modes(gp))
1585 restart_aneg = 1;
1587 } else {
1588 /* If the link was previously up, we restart the
1589 * whole process
1591 if (gp->lstate == link_up) {
1592 gp->lstate = link_down;
1593 if (netif_msg_link(gp))
1594 printk(KERN_INFO "%s: Link down\n",
1595 gp->dev->name);
1596 netif_carrier_off(gp->dev);
1597 gp->reset_task_pending = 1;
1598 schedule_work(&gp->reset_task);
1599 restart_aneg = 1;
1600 } else if (++gp->timer_ticks > 10) {
1601 if (found_mii_phy(gp))
1602 restart_aneg = gem_mdio_link_not_up(gp);
1603 else
1604 restart_aneg = 1;
1607 if (restart_aneg) {
1608 gem_begin_auto_negotiation(gp, NULL);
1609 goto out_unlock;
1611 restart:
1612 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1613 out_unlock:
1614 gem_put_cell(gp);
1615 spin_unlock(&gp->tx_lock);
1616 spin_unlock_irq(&gp->lock);
1619 /* Must be invoked under gp->lock and gp->tx_lock. */
1620 static void gem_clean_rings(struct gem *gp)
1622 struct gem_init_block *gb = gp->init_block;
1623 struct sk_buff *skb;
1624 int i;
1625 dma_addr_t dma_addr;
1627 for (i = 0; i < RX_RING_SIZE; i++) {
1628 struct gem_rxd *rxd;
1630 rxd = &gb->rxd[i];
1631 if (gp->rx_skbs[i] != NULL) {
1632 skb = gp->rx_skbs[i];
1633 dma_addr = le64_to_cpu(rxd->buffer);
1634 pci_unmap_page(gp->pdev, dma_addr,
1635 RX_BUF_ALLOC_SIZE(gp),
1636 PCI_DMA_FROMDEVICE);
1637 dev_kfree_skb_any(skb);
1638 gp->rx_skbs[i] = NULL;
1640 rxd->status_word = 0;
1641 wmb();
1642 rxd->buffer = 0;
1645 for (i = 0; i < TX_RING_SIZE; i++) {
1646 if (gp->tx_skbs[i] != NULL) {
1647 struct gem_txd *txd;
1648 int frag;
1650 skb = gp->tx_skbs[i];
1651 gp->tx_skbs[i] = NULL;
1653 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1654 int ent = i & (TX_RING_SIZE - 1);
1656 txd = &gb->txd[ent];
1657 dma_addr = le64_to_cpu(txd->buffer);
1658 pci_unmap_page(gp->pdev, dma_addr,
1659 le64_to_cpu(txd->control_word) &
1660 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1662 if (frag != skb_shinfo(skb)->nr_frags)
1663 i++;
1665 dev_kfree_skb_any(skb);
1670 /* Must be invoked under gp->lock and gp->tx_lock. */
1671 static void gem_init_rings(struct gem *gp)
1673 struct gem_init_block *gb = gp->init_block;
1674 struct net_device *dev = gp->dev;
1675 int i;
1676 dma_addr_t dma_addr;
1678 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1680 gem_clean_rings(gp);
1682 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1683 (unsigned)VLAN_ETH_FRAME_LEN);
1685 for (i = 0; i < RX_RING_SIZE; i++) {
1686 struct sk_buff *skb;
1687 struct gem_rxd *rxd = &gb->rxd[i];
1689 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1690 if (!skb) {
1691 rxd->buffer = 0;
1692 rxd->status_word = 0;
1693 continue;
1696 gp->rx_skbs[i] = skb;
1697 skb->dev = dev;
1698 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1699 dma_addr = pci_map_page(gp->pdev,
1700 virt_to_page(skb->data),
1701 offset_in_page(skb->data),
1702 RX_BUF_ALLOC_SIZE(gp),
1703 PCI_DMA_FROMDEVICE);
1704 rxd->buffer = cpu_to_le64(dma_addr);
1705 wmb();
1706 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1707 skb_reserve(skb, RX_OFFSET);
1710 for (i = 0; i < TX_RING_SIZE; i++) {
1711 struct gem_txd *txd = &gb->txd[i];
1713 txd->control_word = 0;
1714 wmb();
1715 txd->buffer = 0;
1717 wmb();
1720 /* Init PHY interface and start link poll state machine */
1721 static void gem_init_phy(struct gem *gp)
1723 u32 mifcfg;
1725 /* Revert MIF CFG setting done on stop_phy */
1726 mifcfg = readl(gp->regs + MIF_CFG);
1727 mifcfg &= ~MIF_CFG_BBMODE;
1728 writel(mifcfg, gp->regs + MIF_CFG);
1730 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1731 int i;
1733 /* Those delay sucks, the HW seem to love them though, I'll
1734 * serisouly consider breaking some locks here to be able
1735 * to schedule instead
1737 for (i = 0; i < 3; i++) {
1738 #ifdef CONFIG_PPC_PMAC
1739 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1740 msleep(20);
1741 #endif
1742 /* Some PHYs used by apple have problem getting back to us,
1743 * we do an additional reset here
1745 phy_write(gp, MII_BMCR, BMCR_RESET);
1746 msleep(20);
1747 if (phy_read(gp, MII_BMCR) != 0xffff)
1748 break;
1749 if (i == 2)
1750 printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1751 gp->dev->name);
1755 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1756 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1757 u32 val;
1759 /* Init datapath mode register. */
1760 if (gp->phy_type == phy_mii_mdio0 ||
1761 gp->phy_type == phy_mii_mdio1) {
1762 val = PCS_DMODE_MGM;
1763 } else if (gp->phy_type == phy_serialink) {
1764 val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1765 } else {
1766 val = PCS_DMODE_ESM;
1769 writel(val, gp->regs + PCS_DMODE);
1772 if (gp->phy_type == phy_mii_mdio0 ||
1773 gp->phy_type == phy_mii_mdio1) {
1774 // XXX check for errors
1775 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1777 /* Init PHY */
1778 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1779 gp->phy_mii.def->ops->init(&gp->phy_mii);
1780 } else {
1781 gem_pcs_reset(gp);
1782 gem_pcs_reinit_adv(gp);
1785 /* Default aneg parameters */
1786 gp->timer_ticks = 0;
1787 gp->lstate = link_down;
1788 netif_carrier_off(gp->dev);
1790 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1791 spin_lock_irq(&gp->lock);
1792 gem_begin_auto_negotiation(gp, NULL);
1793 spin_unlock_irq(&gp->lock);
1796 /* Must be invoked under gp->lock and gp->tx_lock. */
1797 static void gem_init_dma(struct gem *gp)
1799 u64 desc_dma = (u64) gp->gblock_dvma;
1800 u32 val;
1802 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1803 writel(val, gp->regs + TXDMA_CFG);
1805 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1806 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1807 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1809 writel(0, gp->regs + TXDMA_KICK);
1811 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1812 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1813 writel(val, gp->regs + RXDMA_CFG);
1815 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1816 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1818 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1820 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1821 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1822 writel(val, gp->regs + RXDMA_PTHRESH);
1824 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1825 writel(((5 & RXDMA_BLANK_IPKTS) |
1826 ((8 << 12) & RXDMA_BLANK_ITIME)),
1827 gp->regs + RXDMA_BLANK);
1828 else
1829 writel(((5 & RXDMA_BLANK_IPKTS) |
1830 ((4 << 12) & RXDMA_BLANK_ITIME)),
1831 gp->regs + RXDMA_BLANK);
1834 /* Must be invoked under gp->lock and gp->tx_lock. */
1835 static u32 gem_setup_multicast(struct gem *gp)
1837 u32 rxcfg = 0;
1838 int i;
1840 if ((gp->dev->flags & IFF_ALLMULTI) ||
1841 (gp->dev->mc_count > 256)) {
1842 for (i=0; i<16; i++)
1843 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1844 rxcfg |= MAC_RXCFG_HFE;
1845 } else if (gp->dev->flags & IFF_PROMISC) {
1846 rxcfg |= MAC_RXCFG_PROM;
1847 } else {
1848 u16 hash_table[16];
1849 u32 crc;
1850 struct dev_mc_list *dmi = gp->dev->mc_list;
1851 int i;
1853 for (i = 0; i < 16; i++)
1854 hash_table[i] = 0;
1856 for (i = 0; i < gp->dev->mc_count; i++) {
1857 char *addrs = dmi->dmi_addr;
1859 dmi = dmi->next;
1861 if (!(*addrs & 1))
1862 continue;
1864 crc = ether_crc_le(6, addrs);
1865 crc >>= 24;
1866 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1868 for (i=0; i<16; i++)
1869 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1870 rxcfg |= MAC_RXCFG_HFE;
1873 return rxcfg;
1876 /* Must be invoked under gp->lock and gp->tx_lock. */
1877 static void gem_init_mac(struct gem *gp)
1879 unsigned char *e = &gp->dev->dev_addr[0];
1881 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1883 writel(0x00, gp->regs + MAC_IPG0);
1884 writel(0x08, gp->regs + MAC_IPG1);
1885 writel(0x04, gp->regs + MAC_IPG2);
1886 writel(0x40, gp->regs + MAC_STIME);
1887 writel(0x40, gp->regs + MAC_MINFSZ);
1889 /* Ethernet payload + header + FCS + optional VLAN tag. */
1890 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1892 writel(0x07, gp->regs + MAC_PASIZE);
1893 writel(0x04, gp->regs + MAC_JAMSIZE);
1894 writel(0x10, gp->regs + MAC_ATTLIM);
1895 writel(0x8808, gp->regs + MAC_MCTYPE);
1897 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1899 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1900 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1901 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1903 writel(0, gp->regs + MAC_ADDR3);
1904 writel(0, gp->regs + MAC_ADDR4);
1905 writel(0, gp->regs + MAC_ADDR5);
1907 writel(0x0001, gp->regs + MAC_ADDR6);
1908 writel(0xc200, gp->regs + MAC_ADDR7);
1909 writel(0x0180, gp->regs + MAC_ADDR8);
1911 writel(0, gp->regs + MAC_AFILT0);
1912 writel(0, gp->regs + MAC_AFILT1);
1913 writel(0, gp->regs + MAC_AFILT2);
1914 writel(0, gp->regs + MAC_AF21MSK);
1915 writel(0, gp->regs + MAC_AF0MSK);
1917 gp->mac_rx_cfg = gem_setup_multicast(gp);
1918 #ifdef STRIP_FCS
1919 gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1920 #endif
1921 writel(0, gp->regs + MAC_NCOLL);
1922 writel(0, gp->regs + MAC_FASUCC);
1923 writel(0, gp->regs + MAC_ECOLL);
1924 writel(0, gp->regs + MAC_LCOLL);
1925 writel(0, gp->regs + MAC_DTIMER);
1926 writel(0, gp->regs + MAC_PATMPS);
1927 writel(0, gp->regs + MAC_RFCTR);
1928 writel(0, gp->regs + MAC_LERR);
1929 writel(0, gp->regs + MAC_AERR);
1930 writel(0, gp->regs + MAC_FCSERR);
1931 writel(0, gp->regs + MAC_RXCVERR);
1933 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1934 * them once a link is established.
1936 writel(0, gp->regs + MAC_TXCFG);
1937 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1938 writel(0, gp->regs + MAC_MCCFG);
1939 writel(0, gp->regs + MAC_XIFCFG);
1941 /* Setup MAC interrupts. We want to get all of the interesting
1942 * counter expiration events, but we do not want to hear about
1943 * normal rx/tx as the DMA engine tells us that.
1945 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1946 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1948 /* Don't enable even the PAUSE interrupts for now, we
1949 * make no use of those events other than to record them.
1951 writel(0xffffffff, gp->regs + MAC_MCMASK);
1953 /* Don't enable GEM's WOL in normal operations
1955 if (gp->has_wol)
1956 writel(0, gp->regs + WOL_WAKECSR);
1959 /* Must be invoked under gp->lock and gp->tx_lock. */
1960 static void gem_init_pause_thresholds(struct gem *gp)
1962 u32 cfg;
1964 /* Calculate pause thresholds. Setting the OFF threshold to the
1965 * full RX fifo size effectively disables PAUSE generation which
1966 * is what we do for 10/100 only GEMs which have FIFOs too small
1967 * to make real gains from PAUSE.
1969 if (gp->rx_fifo_sz <= (2 * 1024)) {
1970 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1971 } else {
1972 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1973 int off = (gp->rx_fifo_sz - (max_frame * 2));
1974 int on = off - max_frame;
1976 gp->rx_pause_off = off;
1977 gp->rx_pause_on = on;
1981 /* Configure the chip "burst" DMA mode & enable some
1982 * HW bug fixes on Apple version
1984 cfg = 0;
1985 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1986 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1987 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1988 cfg |= GREG_CFG_IBURST;
1989 #endif
1990 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1991 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1992 writel(cfg, gp->regs + GREG_CFG);
1994 /* If Infinite Burst didn't stick, then use different
1995 * thresholds (and Apple bug fixes don't exist)
1997 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1998 cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1999 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
2000 writel(cfg, gp->regs + GREG_CFG);
2004 static int gem_check_invariants(struct gem *gp)
2006 struct pci_dev *pdev = gp->pdev;
2007 u32 mif_cfg;
2009 /* On Apple's sungem, we can't rely on registers as the chip
2010 * was been powered down by the firmware. The PHY is looked
2011 * up later on.
2013 if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
2014 gp->phy_type = phy_mii_mdio0;
2015 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2016 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2017 gp->swrst_base = 0;
2019 mif_cfg = readl(gp->regs + MIF_CFG);
2020 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
2021 mif_cfg |= MIF_CFG_MDI0;
2022 writel(mif_cfg, gp->regs + MIF_CFG);
2023 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2024 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2026 /* We hard-code the PHY address so we can properly bring it out of
2027 * reset later on, we can't really probe it at this point, though
2028 * that isn't an issue.
2030 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
2031 gp->mii_phy_addr = 1;
2032 else
2033 gp->mii_phy_addr = 0;
2035 return 0;
2038 mif_cfg = readl(gp->regs + MIF_CFG);
2040 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2041 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
2042 /* One of the MII PHYs _must_ be present
2043 * as this chip has no gigabit PHY.
2045 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2046 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2047 mif_cfg);
2048 return -1;
2052 /* Determine initial PHY interface type guess. MDIO1 is the
2053 * external PHY and thus takes precedence over MDIO0.
2056 if (mif_cfg & MIF_CFG_MDI1) {
2057 gp->phy_type = phy_mii_mdio1;
2058 mif_cfg |= MIF_CFG_PSELECT;
2059 writel(mif_cfg, gp->regs + MIF_CFG);
2060 } else if (mif_cfg & MIF_CFG_MDI0) {
2061 gp->phy_type = phy_mii_mdio0;
2062 mif_cfg &= ~MIF_CFG_PSELECT;
2063 writel(mif_cfg, gp->regs + MIF_CFG);
2064 } else {
2065 gp->phy_type = phy_serialink;
2067 if (gp->phy_type == phy_mii_mdio1 ||
2068 gp->phy_type == phy_mii_mdio0) {
2069 int i;
2071 for (i = 0; i < 32; i++) {
2072 gp->mii_phy_addr = i;
2073 if (phy_read(gp, MII_BMCR) != 0xffff)
2074 break;
2076 if (i == 32) {
2077 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2078 printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
2079 return -1;
2081 gp->phy_type = phy_serdes;
2085 /* Fetch the FIFO configurations now too. */
2086 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2087 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2089 if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2090 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2091 if (gp->tx_fifo_sz != (9 * 1024) ||
2092 gp->rx_fifo_sz != (20 * 1024)) {
2093 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2094 gp->tx_fifo_sz, gp->rx_fifo_sz);
2095 return -1;
2097 gp->swrst_base = 0;
2098 } else {
2099 if (gp->tx_fifo_sz != (2 * 1024) ||
2100 gp->rx_fifo_sz != (2 * 1024)) {
2101 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2102 gp->tx_fifo_sz, gp->rx_fifo_sz);
2103 return -1;
2105 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2109 return 0;
2112 /* Must be invoked under gp->lock and gp->tx_lock. */
2113 static void gem_reinit_chip(struct gem *gp)
2115 /* Reset the chip */
2116 gem_reset(gp);
2118 /* Make sure ints are disabled */
2119 gem_disable_ints(gp);
2121 /* Allocate & setup ring buffers */
2122 gem_init_rings(gp);
2124 /* Configure pause thresholds */
2125 gem_init_pause_thresholds(gp);
2127 /* Init DMA & MAC engines */
2128 gem_init_dma(gp);
2129 gem_init_mac(gp);
2133 /* Must be invoked with no lock held. */
2134 static void gem_stop_phy(struct gem *gp, int wol)
2136 u32 mifcfg;
2137 unsigned long flags;
2139 /* Let the chip settle down a bit, it seems that helps
2140 * for sleep mode on some models
2142 msleep(10);
2144 /* Make sure we aren't polling PHY status change. We
2145 * don't currently use that feature though
2147 mifcfg = readl(gp->regs + MIF_CFG);
2148 mifcfg &= ~MIF_CFG_POLL;
2149 writel(mifcfg, gp->regs + MIF_CFG);
2151 if (wol && gp->has_wol) {
2152 unsigned char *e = &gp->dev->dev_addr[0];
2153 u32 csr;
2155 /* Setup wake-on-lan for MAGIC packet */
2156 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2157 gp->regs + MAC_RXCFG);
2158 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2159 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2160 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2162 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2163 csr = WOL_WAKECSR_ENABLE;
2164 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2165 csr |= WOL_WAKECSR_MII;
2166 writel(csr, gp->regs + WOL_WAKECSR);
2167 } else {
2168 writel(0, gp->regs + MAC_RXCFG);
2169 (void)readl(gp->regs + MAC_RXCFG);
2170 /* Machine sleep will die in strange ways if we
2171 * dont wait a bit here, looks like the chip takes
2172 * some time to really shut down
2174 msleep(10);
2177 writel(0, gp->regs + MAC_TXCFG);
2178 writel(0, gp->regs + MAC_XIFCFG);
2179 writel(0, gp->regs + TXDMA_CFG);
2180 writel(0, gp->regs + RXDMA_CFG);
2182 if (!wol) {
2183 spin_lock_irqsave(&gp->lock, flags);
2184 spin_lock(&gp->tx_lock);
2185 gem_reset(gp);
2186 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2187 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2188 spin_unlock(&gp->tx_lock);
2189 spin_unlock_irqrestore(&gp->lock, flags);
2191 /* No need to take the lock here */
2193 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2194 gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2196 /* According to Apple, we must set the MDIO pins to this begnign
2197 * state or we may 1) eat more current, 2) damage some PHYs
2199 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2200 writel(0, gp->regs + MIF_BBCLK);
2201 writel(0, gp->regs + MIF_BBDATA);
2202 writel(0, gp->regs + MIF_BBOENAB);
2203 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2204 (void) readl(gp->regs + MAC_XIFCFG);
2209 static int gem_do_start(struct net_device *dev)
2211 struct gem *gp = netdev_priv(dev);
2212 unsigned long flags;
2214 spin_lock_irqsave(&gp->lock, flags);
2215 spin_lock(&gp->tx_lock);
2217 /* Enable the cell */
2218 gem_get_cell(gp);
2220 /* Init & setup chip hardware */
2221 gem_reinit_chip(gp);
2223 gp->running = 1;
2225 napi_enable(&gp->napi);
2227 if (gp->lstate == link_up) {
2228 netif_carrier_on(gp->dev);
2229 gem_set_link_modes(gp);
2232 netif_wake_queue(gp->dev);
2234 spin_unlock(&gp->tx_lock);
2235 spin_unlock_irqrestore(&gp->lock, flags);
2237 if (request_irq(gp->pdev->irq, gem_interrupt,
2238 IRQF_SHARED, dev->name, (void *)dev)) {
2239 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2241 spin_lock_irqsave(&gp->lock, flags);
2242 spin_lock(&gp->tx_lock);
2244 napi_disable(&gp->napi);
2246 gp->running = 0;
2247 gem_reset(gp);
2248 gem_clean_rings(gp);
2249 gem_put_cell(gp);
2251 spin_unlock(&gp->tx_lock);
2252 spin_unlock_irqrestore(&gp->lock, flags);
2254 return -EAGAIN;
2257 return 0;
2260 static void gem_do_stop(struct net_device *dev, int wol)
2262 struct gem *gp = netdev_priv(dev);
2263 unsigned long flags;
2265 spin_lock_irqsave(&gp->lock, flags);
2266 spin_lock(&gp->tx_lock);
2268 gp->running = 0;
2270 /* Stop netif queue */
2271 netif_stop_queue(dev);
2273 /* Make sure ints are disabled */
2274 gem_disable_ints(gp);
2276 /* We can drop the lock now */
2277 spin_unlock(&gp->tx_lock);
2278 spin_unlock_irqrestore(&gp->lock, flags);
2280 /* If we are going to sleep with WOL */
2281 gem_stop_dma(gp);
2282 msleep(10);
2283 if (!wol)
2284 gem_reset(gp);
2285 msleep(10);
2287 /* Get rid of rings */
2288 gem_clean_rings(gp);
2290 /* No irq needed anymore */
2291 free_irq(gp->pdev->irq, (void *) dev);
2293 /* Cell not needed neither if no WOL */
2294 if (!wol) {
2295 spin_lock_irqsave(&gp->lock, flags);
2296 gem_put_cell(gp);
2297 spin_unlock_irqrestore(&gp->lock, flags);
2301 static void gem_reset_task(struct work_struct *work)
2303 struct gem *gp = container_of(work, struct gem, reset_task);
2305 mutex_lock(&gp->pm_mutex);
2307 if (gp->opened)
2308 napi_disable(&gp->napi);
2310 spin_lock_irq(&gp->lock);
2311 spin_lock(&gp->tx_lock);
2313 if (gp->running) {
2314 netif_stop_queue(gp->dev);
2316 /* Reset the chip & rings */
2317 gem_reinit_chip(gp);
2318 if (gp->lstate == link_up)
2319 gem_set_link_modes(gp);
2320 netif_wake_queue(gp->dev);
2323 gp->reset_task_pending = 0;
2325 spin_unlock(&gp->tx_lock);
2326 spin_unlock_irq(&gp->lock);
2328 if (gp->opened)
2329 napi_enable(&gp->napi);
2331 mutex_unlock(&gp->pm_mutex);
2335 static int gem_open(struct net_device *dev)
2337 struct gem *gp = netdev_priv(dev);
2338 int rc = 0;
2340 mutex_lock(&gp->pm_mutex);
2342 /* We need the cell enabled */
2343 if (!gp->asleep)
2344 rc = gem_do_start(dev);
2345 gp->opened = (rc == 0);
2347 mutex_unlock(&gp->pm_mutex);
2349 return rc;
2352 static int gem_close(struct net_device *dev)
2354 struct gem *gp = netdev_priv(dev);
2356 mutex_lock(&gp->pm_mutex);
2358 napi_disable(&gp->napi);
2360 gp->opened = 0;
2361 if (!gp->asleep)
2362 gem_do_stop(dev, 0);
2364 mutex_unlock(&gp->pm_mutex);
2366 return 0;
2369 #ifdef CONFIG_PM
2370 static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2372 struct net_device *dev = pci_get_drvdata(pdev);
2373 struct gem *gp = netdev_priv(dev);
2374 unsigned long flags;
2376 mutex_lock(&gp->pm_mutex);
2378 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2379 dev->name,
2380 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2382 /* Keep the cell enabled during the entire operation */
2383 spin_lock_irqsave(&gp->lock, flags);
2384 spin_lock(&gp->tx_lock);
2385 gem_get_cell(gp);
2386 spin_unlock(&gp->tx_lock);
2387 spin_unlock_irqrestore(&gp->lock, flags);
2389 /* If the driver is opened, we stop the MAC */
2390 if (gp->opened) {
2391 napi_disable(&gp->napi);
2393 /* Stop traffic, mark us closed */
2394 netif_device_detach(dev);
2396 /* Switch off MAC, remember WOL setting */
2397 gp->asleep_wol = gp->wake_on_lan;
2398 gem_do_stop(dev, gp->asleep_wol);
2399 } else
2400 gp->asleep_wol = 0;
2402 /* Mark us asleep */
2403 gp->asleep = 1;
2404 wmb();
2406 /* Stop the link timer */
2407 del_timer_sync(&gp->link_timer);
2409 /* Now we release the mutex to not block the reset task who
2410 * can take it too. We are marked asleep, so there will be no
2411 * conflict here
2413 mutex_unlock(&gp->pm_mutex);
2415 /* Wait for a pending reset task to complete */
2416 while (gp->reset_task_pending)
2417 yield();
2418 flush_scheduled_work();
2420 /* Shut the PHY down eventually and setup WOL */
2421 gem_stop_phy(gp, gp->asleep_wol);
2423 /* Make sure bus master is disabled */
2424 pci_disable_device(gp->pdev);
2426 /* Release the cell, no need to take a lock at this point since
2427 * nothing else can happen now
2429 gem_put_cell(gp);
2431 return 0;
2434 static int gem_resume(struct pci_dev *pdev)
2436 struct net_device *dev = pci_get_drvdata(pdev);
2437 struct gem *gp = netdev_priv(dev);
2438 unsigned long flags;
2440 printk(KERN_INFO "%s: resuming\n", dev->name);
2442 mutex_lock(&gp->pm_mutex);
2444 /* Keep the cell enabled during the entire operation, no need to
2445 * take a lock here tho since nothing else can happen while we are
2446 * marked asleep
2448 gem_get_cell(gp);
2450 /* Make sure PCI access and bus master are enabled */
2451 if (pci_enable_device(gp->pdev)) {
2452 printk(KERN_ERR "%s: Can't re-enable chip !\n",
2453 dev->name);
2454 /* Put cell and forget it for now, it will be considered as
2455 * still asleep, a new sleep cycle may bring it back
2457 gem_put_cell(gp);
2458 mutex_unlock(&gp->pm_mutex);
2459 return 0;
2461 pci_set_master(gp->pdev);
2463 /* Reset everything */
2464 gem_reset(gp);
2466 /* Mark us woken up */
2467 gp->asleep = 0;
2468 wmb();
2470 /* Bring the PHY back. Again, lock is useless at this point as
2471 * nothing can be happening until we restart the whole thing
2473 gem_init_phy(gp);
2475 /* If we were opened, bring everything back */
2476 if (gp->opened) {
2477 /* Restart MAC */
2478 gem_do_start(dev);
2480 /* Re-attach net device */
2481 netif_device_attach(dev);
2484 spin_lock_irqsave(&gp->lock, flags);
2485 spin_lock(&gp->tx_lock);
2487 /* If we had WOL enabled, the cell clock was never turned off during
2488 * sleep, so we end up beeing unbalanced. Fix that here
2490 if (gp->asleep_wol)
2491 gem_put_cell(gp);
2493 /* This function doesn't need to hold the cell, it will be held if the
2494 * driver is open by gem_do_start().
2496 gem_put_cell(gp);
2498 spin_unlock(&gp->tx_lock);
2499 spin_unlock_irqrestore(&gp->lock, flags);
2501 mutex_unlock(&gp->pm_mutex);
2503 return 0;
2505 #endif /* CONFIG_PM */
2507 static struct net_device_stats *gem_get_stats(struct net_device *dev)
2509 struct gem *gp = netdev_priv(dev);
2510 struct net_device_stats *stats = &gp->net_stats;
2512 spin_lock_irq(&gp->lock);
2513 spin_lock(&gp->tx_lock);
2515 /* I have seen this being called while the PM was in progress,
2516 * so we shield against this
2518 if (gp->running) {
2519 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2520 writel(0, gp->regs + MAC_FCSERR);
2522 stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2523 writel(0, gp->regs + MAC_AERR);
2525 stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2526 writel(0, gp->regs + MAC_LERR);
2528 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2529 stats->collisions +=
2530 (readl(gp->regs + MAC_ECOLL) +
2531 readl(gp->regs + MAC_LCOLL));
2532 writel(0, gp->regs + MAC_ECOLL);
2533 writel(0, gp->regs + MAC_LCOLL);
2536 spin_unlock(&gp->tx_lock);
2537 spin_unlock_irq(&gp->lock);
2539 return &gp->net_stats;
2542 static int gem_set_mac_address(struct net_device *dev, void *addr)
2544 struct sockaddr *macaddr = (struct sockaddr *) addr;
2545 struct gem *gp = netdev_priv(dev);
2546 unsigned char *e = &dev->dev_addr[0];
2548 if (!is_valid_ether_addr(macaddr->sa_data))
2549 return -EADDRNOTAVAIL;
2551 if (!netif_running(dev) || !netif_device_present(dev)) {
2552 /* We'll just catch it later when the
2553 * device is up'd or resumed.
2555 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2556 return 0;
2559 mutex_lock(&gp->pm_mutex);
2560 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2561 if (gp->running) {
2562 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
2563 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
2564 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
2566 mutex_unlock(&gp->pm_mutex);
2568 return 0;
2571 static void gem_set_multicast(struct net_device *dev)
2573 struct gem *gp = netdev_priv(dev);
2574 u32 rxcfg, rxcfg_new;
2575 int limit = 10000;
2578 spin_lock_irq(&gp->lock);
2579 spin_lock(&gp->tx_lock);
2581 if (!gp->running)
2582 goto bail;
2584 netif_stop_queue(dev);
2586 rxcfg = readl(gp->regs + MAC_RXCFG);
2587 rxcfg_new = gem_setup_multicast(gp);
2588 #ifdef STRIP_FCS
2589 rxcfg_new |= MAC_RXCFG_SFCS;
2590 #endif
2591 gp->mac_rx_cfg = rxcfg_new;
2593 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2594 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2595 if (!limit--)
2596 break;
2597 udelay(10);
2600 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2601 rxcfg |= rxcfg_new;
2603 writel(rxcfg, gp->regs + MAC_RXCFG);
2605 netif_wake_queue(dev);
2607 bail:
2608 spin_unlock(&gp->tx_lock);
2609 spin_unlock_irq(&gp->lock);
2612 /* Jumbo-grams don't seem to work :-( */
2613 #define GEM_MIN_MTU 68
2614 #if 1
2615 #define GEM_MAX_MTU 1500
2616 #else
2617 #define GEM_MAX_MTU 9000
2618 #endif
2620 static int gem_change_mtu(struct net_device *dev, int new_mtu)
2622 struct gem *gp = netdev_priv(dev);
2624 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2625 return -EINVAL;
2627 if (!netif_running(dev) || !netif_device_present(dev)) {
2628 /* We'll just catch it later when the
2629 * device is up'd or resumed.
2631 dev->mtu = new_mtu;
2632 return 0;
2635 mutex_lock(&gp->pm_mutex);
2636 spin_lock_irq(&gp->lock);
2637 spin_lock(&gp->tx_lock);
2638 dev->mtu = new_mtu;
2639 if (gp->running) {
2640 gem_reinit_chip(gp);
2641 if (gp->lstate == link_up)
2642 gem_set_link_modes(gp);
2644 spin_unlock(&gp->tx_lock);
2645 spin_unlock_irq(&gp->lock);
2646 mutex_unlock(&gp->pm_mutex);
2648 return 0;
2651 static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2653 struct gem *gp = netdev_priv(dev);
2655 strcpy(info->driver, DRV_NAME);
2656 strcpy(info->version, DRV_VERSION);
2657 strcpy(info->bus_info, pci_name(gp->pdev));
2660 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2662 struct gem *gp = netdev_priv(dev);
2664 if (gp->phy_type == phy_mii_mdio0 ||
2665 gp->phy_type == phy_mii_mdio1) {
2666 if (gp->phy_mii.def)
2667 cmd->supported = gp->phy_mii.def->features;
2668 else
2669 cmd->supported = (SUPPORTED_10baseT_Half |
2670 SUPPORTED_10baseT_Full);
2672 /* XXX hardcoded stuff for now */
2673 cmd->port = PORT_MII;
2674 cmd->transceiver = XCVR_EXTERNAL;
2675 cmd->phy_address = 0; /* XXX fixed PHYAD */
2677 /* Return current PHY settings */
2678 spin_lock_irq(&gp->lock);
2679 cmd->autoneg = gp->want_autoneg;
2680 cmd->speed = gp->phy_mii.speed;
2681 cmd->duplex = gp->phy_mii.duplex;
2682 cmd->advertising = gp->phy_mii.advertising;
2684 /* If we started with a forced mode, we don't have a default
2685 * advertise set, we need to return something sensible so
2686 * userland can re-enable autoneg properly.
2688 if (cmd->advertising == 0)
2689 cmd->advertising = cmd->supported;
2690 spin_unlock_irq(&gp->lock);
2691 } else { // XXX PCS ?
2692 cmd->supported =
2693 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2694 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2695 SUPPORTED_Autoneg);
2696 cmd->advertising = cmd->supported;
2697 cmd->speed = 0;
2698 cmd->duplex = cmd->port = cmd->phy_address =
2699 cmd->transceiver = cmd->autoneg = 0;
2701 /* serdes means usually a Fibre connector, with most fixed */
2702 if (gp->phy_type == phy_serdes) {
2703 cmd->port = PORT_FIBRE;
2704 cmd->supported = (SUPPORTED_1000baseT_Half |
2705 SUPPORTED_1000baseT_Full |
2706 SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2707 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2708 cmd->advertising = cmd->supported;
2709 cmd->transceiver = XCVR_INTERNAL;
2710 if (gp->lstate == link_up)
2711 cmd->speed = SPEED_1000;
2712 cmd->duplex = DUPLEX_FULL;
2713 cmd->autoneg = 1;
2716 cmd->maxtxpkt = cmd->maxrxpkt = 0;
2718 return 0;
2721 static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2723 struct gem *gp = netdev_priv(dev);
2725 /* Verify the settings we care about. */
2726 if (cmd->autoneg != AUTONEG_ENABLE &&
2727 cmd->autoneg != AUTONEG_DISABLE)
2728 return -EINVAL;
2730 if (cmd->autoneg == AUTONEG_ENABLE &&
2731 cmd->advertising == 0)
2732 return -EINVAL;
2734 if (cmd->autoneg == AUTONEG_DISABLE &&
2735 ((cmd->speed != SPEED_1000 &&
2736 cmd->speed != SPEED_100 &&
2737 cmd->speed != SPEED_10) ||
2738 (cmd->duplex != DUPLEX_HALF &&
2739 cmd->duplex != DUPLEX_FULL)))
2740 return -EINVAL;
2742 /* Apply settings and restart link process. */
2743 spin_lock_irq(&gp->lock);
2744 gem_get_cell(gp);
2745 gem_begin_auto_negotiation(gp, cmd);
2746 gem_put_cell(gp);
2747 spin_unlock_irq(&gp->lock);
2749 return 0;
2752 static int gem_nway_reset(struct net_device *dev)
2754 struct gem *gp = netdev_priv(dev);
2756 if (!gp->want_autoneg)
2757 return -EINVAL;
2759 /* Restart link process. */
2760 spin_lock_irq(&gp->lock);
2761 gem_get_cell(gp);
2762 gem_begin_auto_negotiation(gp, NULL);
2763 gem_put_cell(gp);
2764 spin_unlock_irq(&gp->lock);
2766 return 0;
2769 static u32 gem_get_msglevel(struct net_device *dev)
2771 struct gem *gp = netdev_priv(dev);
2772 return gp->msg_enable;
2775 static void gem_set_msglevel(struct net_device *dev, u32 value)
2777 struct gem *gp = netdev_priv(dev);
2778 gp->msg_enable = value;
2782 /* Add more when I understand how to program the chip */
2783 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2785 #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2787 static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2789 struct gem *gp = netdev_priv(dev);
2791 /* Add more when I understand how to program the chip */
2792 if (gp->has_wol) {
2793 wol->supported = WOL_SUPPORTED_MASK;
2794 wol->wolopts = gp->wake_on_lan;
2795 } else {
2796 wol->supported = 0;
2797 wol->wolopts = 0;
2801 static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2803 struct gem *gp = netdev_priv(dev);
2805 if (!gp->has_wol)
2806 return -EOPNOTSUPP;
2807 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2808 return 0;
2811 static const struct ethtool_ops gem_ethtool_ops = {
2812 .get_drvinfo = gem_get_drvinfo,
2813 .get_link = ethtool_op_get_link,
2814 .get_settings = gem_get_settings,
2815 .set_settings = gem_set_settings,
2816 .nway_reset = gem_nway_reset,
2817 .get_msglevel = gem_get_msglevel,
2818 .set_msglevel = gem_set_msglevel,
2819 .get_wol = gem_get_wol,
2820 .set_wol = gem_set_wol,
2823 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2825 struct gem *gp = netdev_priv(dev);
2826 struct mii_ioctl_data *data = if_mii(ifr);
2827 int rc = -EOPNOTSUPP;
2828 unsigned long flags;
2830 /* Hold the PM mutex while doing ioctl's or we may collide
2831 * with power management.
2833 mutex_lock(&gp->pm_mutex);
2835 spin_lock_irqsave(&gp->lock, flags);
2836 gem_get_cell(gp);
2837 spin_unlock_irqrestore(&gp->lock, flags);
2839 switch (cmd) {
2840 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2841 data->phy_id = gp->mii_phy_addr;
2842 /* Fallthrough... */
2844 case SIOCGMIIREG: /* Read MII PHY register. */
2845 if (!gp->running)
2846 rc = -EAGAIN;
2847 else {
2848 data->val_out = __phy_read(gp, data->phy_id & 0x1f,
2849 data->reg_num & 0x1f);
2850 rc = 0;
2852 break;
2854 case SIOCSMIIREG: /* Write MII PHY register. */
2855 if (!gp->running)
2856 rc = -EAGAIN;
2857 else {
2858 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2859 data->val_in);
2860 rc = 0;
2862 break;
2865 spin_lock_irqsave(&gp->lock, flags);
2866 gem_put_cell(gp);
2867 spin_unlock_irqrestore(&gp->lock, flags);
2869 mutex_unlock(&gp->pm_mutex);
2871 return rc;
2874 #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2875 /* Fetch MAC address from vital product data of PCI ROM. */
2876 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2878 int this_offset;
2880 for (this_offset = 0x20; this_offset < len; this_offset++) {
2881 void __iomem *p = rom_base + this_offset;
2882 int i;
2884 if (readb(p + 0) != 0x90 ||
2885 readb(p + 1) != 0x00 ||
2886 readb(p + 2) != 0x09 ||
2887 readb(p + 3) != 0x4e ||
2888 readb(p + 4) != 0x41 ||
2889 readb(p + 5) != 0x06)
2890 continue;
2892 this_offset += 6;
2893 p += 6;
2895 for (i = 0; i < 6; i++)
2896 dev_addr[i] = readb(p + i);
2897 return 1;
2899 return 0;
2902 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2904 size_t size;
2905 void __iomem *p = pci_map_rom(pdev, &size);
2907 if (p) {
2908 int found;
2910 found = readb(p) == 0x55 &&
2911 readb(p + 1) == 0xaa &&
2912 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2913 pci_unmap_rom(pdev, p);
2914 if (found)
2915 return;
2918 /* Sun MAC prefix then 3 random bytes. */
2919 dev_addr[0] = 0x08;
2920 dev_addr[1] = 0x00;
2921 dev_addr[2] = 0x20;
2922 get_random_bytes(dev_addr + 3, 3);
2923 return;
2925 #endif /* not Sparc and not PPC */
2927 static int __devinit gem_get_device_address(struct gem *gp)
2929 #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2930 struct net_device *dev = gp->dev;
2931 const unsigned char *addr;
2933 addr = of_get_property(gp->of_node, "local-mac-address", NULL);
2934 if (addr == NULL) {
2935 #ifdef CONFIG_SPARC
2936 addr = idprom->id_ethaddr;
2937 #else
2938 printk("\n");
2939 printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2940 return -1;
2941 #endif
2943 memcpy(dev->dev_addr, addr, 6);
2944 #else
2945 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2946 #endif
2947 return 0;
2950 static void gem_remove_one(struct pci_dev *pdev)
2952 struct net_device *dev = pci_get_drvdata(pdev);
2954 if (dev) {
2955 struct gem *gp = netdev_priv(dev);
2957 unregister_netdev(dev);
2959 /* Stop the link timer */
2960 del_timer_sync(&gp->link_timer);
2962 /* We shouldn't need any locking here */
2963 gem_get_cell(gp);
2965 /* Wait for a pending reset task to complete */
2966 while (gp->reset_task_pending)
2967 yield();
2968 flush_scheduled_work();
2970 /* Shut the PHY down */
2971 gem_stop_phy(gp, 0);
2973 gem_put_cell(gp);
2975 /* Make sure bus master is disabled */
2976 pci_disable_device(gp->pdev);
2978 /* Free resources */
2979 pci_free_consistent(pdev,
2980 sizeof(struct gem_init_block),
2981 gp->init_block,
2982 gp->gblock_dvma);
2983 iounmap(gp->regs);
2984 pci_release_regions(pdev);
2985 free_netdev(dev);
2987 pci_set_drvdata(pdev, NULL);
2991 static const struct net_device_ops gem_netdev_ops = {
2992 .ndo_open = gem_open,
2993 .ndo_stop = gem_close,
2994 .ndo_start_xmit = gem_start_xmit,
2995 .ndo_get_stats = gem_get_stats,
2996 .ndo_set_multicast_list = gem_set_multicast,
2997 .ndo_do_ioctl = gem_ioctl,
2998 .ndo_tx_timeout = gem_tx_timeout,
2999 .ndo_change_mtu = gem_change_mtu,
3000 .ndo_validate_addr = eth_validate_addr,
3001 .ndo_set_mac_address = gem_set_mac_address,
3002 #ifdef CONFIG_NET_POLL_CONTROLLER
3003 .ndo_poll_controller = gem_poll_controller,
3004 #endif
3007 static int __devinit gem_init_one(struct pci_dev *pdev,
3008 const struct pci_device_id *ent)
3010 static int gem_version_printed = 0;
3011 unsigned long gemreg_base, gemreg_len;
3012 struct net_device *dev;
3013 struct gem *gp;
3014 int err, pci_using_dac;
3016 if (gem_version_printed++ == 0)
3017 printk(KERN_INFO "%s", version);
3019 /* Apple gmac note: during probe, the chip is powered up by
3020 * the arch code to allow the code below to work (and to let
3021 * the chip be probed on the config space. It won't stay powered
3022 * up until the interface is brought up however, so we can't rely
3023 * on register configuration done at this point.
3025 err = pci_enable_device(pdev);
3026 if (err) {
3027 printk(KERN_ERR PFX "Cannot enable MMIO operation, "
3028 "aborting.\n");
3029 return err;
3031 pci_set_master(pdev);
3033 /* Configure DMA attributes. */
3035 /* All of the GEM documentation states that 64-bit DMA addressing
3036 * is fully supported and should work just fine. However the
3037 * front end for RIO based GEMs is different and only supports
3038 * 32-bit addressing.
3040 * For now we assume the various PPC GEMs are 32-bit only as well.
3042 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
3043 pdev->device == PCI_DEVICE_ID_SUN_GEM &&
3044 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3045 pci_using_dac = 1;
3046 } else {
3047 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3048 if (err) {
3049 printk(KERN_ERR PFX "No usable DMA configuration, "
3050 "aborting.\n");
3051 goto err_disable_device;
3053 pci_using_dac = 0;
3056 gemreg_base = pci_resource_start(pdev, 0);
3057 gemreg_len = pci_resource_len(pdev, 0);
3059 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3060 printk(KERN_ERR PFX "Cannot find proper PCI device "
3061 "base address, aborting.\n");
3062 err = -ENODEV;
3063 goto err_disable_device;
3066 dev = alloc_etherdev(sizeof(*gp));
3067 if (!dev) {
3068 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
3069 err = -ENOMEM;
3070 goto err_disable_device;
3072 SET_NETDEV_DEV(dev, &pdev->dev);
3074 gp = netdev_priv(dev);
3076 err = pci_request_regions(pdev, DRV_NAME);
3077 if (err) {
3078 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
3079 "aborting.\n");
3080 goto err_out_free_netdev;
3083 gp->pdev = pdev;
3084 dev->base_addr = (long) pdev;
3085 gp->dev = dev;
3087 gp->msg_enable = DEFAULT_MSG;
3089 spin_lock_init(&gp->lock);
3090 spin_lock_init(&gp->tx_lock);
3091 mutex_init(&gp->pm_mutex);
3093 init_timer(&gp->link_timer);
3094 gp->link_timer.function = gem_link_timer;
3095 gp->link_timer.data = (unsigned long) gp;
3097 INIT_WORK(&gp->reset_task, gem_reset_task);
3099 gp->lstate = link_down;
3100 gp->timer_ticks = 0;
3101 netif_carrier_off(dev);
3103 gp->regs = ioremap(gemreg_base, gemreg_len);
3104 if (!gp->regs) {
3105 printk(KERN_ERR PFX "Cannot map device registers, "
3106 "aborting.\n");
3107 err = -EIO;
3108 goto err_out_free_res;
3111 /* On Apple, we want a reference to the Open Firmware device-tree
3112 * node. We use it for clock control.
3114 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
3115 gp->of_node = pci_device_to_OF_node(pdev);
3116 #endif
3118 /* Only Apple version supports WOL afaik */
3119 if (pdev->vendor == PCI_VENDOR_ID_APPLE)
3120 gp->has_wol = 1;
3122 /* Make sure cell is enabled */
3123 gem_get_cell(gp);
3125 /* Make sure everything is stopped and in init state */
3126 gem_reset(gp);
3128 /* Fill up the mii_phy structure (even if we won't use it) */
3129 gp->phy_mii.dev = dev;
3130 gp->phy_mii.mdio_read = _phy_read;
3131 gp->phy_mii.mdio_write = _phy_write;
3132 #ifdef CONFIG_PPC_PMAC
3133 gp->phy_mii.platform_data = gp->of_node;
3134 #endif
3135 /* By default, we start with autoneg */
3136 gp->want_autoneg = 1;
3138 /* Check fifo sizes, PHY type, etc... */
3139 if (gem_check_invariants(gp)) {
3140 err = -ENODEV;
3141 goto err_out_iounmap;
3144 /* It is guaranteed that the returned buffer will be at least
3145 * PAGE_SIZE aligned.
3147 gp->init_block = (struct gem_init_block *)
3148 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3149 &gp->gblock_dvma);
3150 if (!gp->init_block) {
3151 printk(KERN_ERR PFX "Cannot allocate init block, "
3152 "aborting.\n");
3153 err = -ENOMEM;
3154 goto err_out_iounmap;
3157 if (gem_get_device_address(gp))
3158 goto err_out_free_consistent;
3160 dev->netdev_ops = &gem_netdev_ops;
3161 netif_napi_add(dev, &gp->napi, gem_poll, 64);
3162 dev->ethtool_ops = &gem_ethtool_ops;
3163 dev->watchdog_timeo = 5 * HZ;
3164 dev->irq = pdev->irq;
3165 dev->dma = 0;
3167 /* Set that now, in case PM kicks in now */
3168 pci_set_drvdata(pdev, dev);
3170 /* Detect & init PHY, start autoneg, we release the cell now
3171 * too, it will be managed by whoever needs it
3173 gem_init_phy(gp);
3175 spin_lock_irq(&gp->lock);
3176 gem_put_cell(gp);
3177 spin_unlock_irq(&gp->lock);
3179 /* Register with kernel */
3180 if (register_netdev(dev)) {
3181 printk(KERN_ERR PFX "Cannot register net device, "
3182 "aborting.\n");
3183 err = -ENOMEM;
3184 goto err_out_free_consistent;
3187 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3188 dev->name, dev->dev_addr);
3190 if (gp->phy_type == phy_mii_mdio0 ||
3191 gp->phy_type == phy_mii_mdio1)
3192 printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
3193 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3195 /* GEM can do it all... */
3196 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
3197 if (pci_using_dac)
3198 dev->features |= NETIF_F_HIGHDMA;
3200 return 0;
3202 err_out_free_consistent:
3203 gem_remove_one(pdev);
3204 err_out_iounmap:
3205 gem_put_cell(gp);
3206 iounmap(gp->regs);
3208 err_out_free_res:
3209 pci_release_regions(pdev);
3211 err_out_free_netdev:
3212 free_netdev(dev);
3213 err_disable_device:
3214 pci_disable_device(pdev);
3215 return err;
3220 static struct pci_driver gem_driver = {
3221 .name = GEM_MODULE_NAME,
3222 .id_table = gem_pci_tbl,
3223 .probe = gem_init_one,
3224 .remove = gem_remove_one,
3225 #ifdef CONFIG_PM
3226 .suspend = gem_suspend,
3227 .resume = gem_resume,
3228 #endif /* CONFIG_PM */
3231 static int __init gem_init(void)
3233 return pci_register_driver(&gem_driver);
3236 static void __exit gem_cleanup(void)
3238 pci_unregister_driver(&gem_driver);
3241 module_init(gem_init);
3242 module_exit(gem_cleanup);