Staging: hv: mousevsc: Cleanup and properly implement reportdesc_callback()
[zen-stable.git] / drivers / net / sc92031.c
blob9da47337b7c3ab86d182e2378f58d105f46f2b7c
1 /* Silan SC92031 PCI Fast Ethernet Adapter driver
3 * Based on vendor drivers:
4 * Silan Fast Ethernet Netcard Driver:
5 * MODULE_AUTHOR ("gaoyonghong");
6 * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
7 * MODULE_LICENSE("GPL");
8 * 8139D Fast Ethernet driver:
9 * (C) 2002 by gaoyonghong
10 * MODULE_AUTHOR ("gaoyonghong");
11 * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
12 * MODULE_LICENSE("GPL");
13 * Both are almost identical and seem to be based on pci-skeleton.c
15 * Rewritten for 2.6 by Cesar Eduardo Barros
17 * A datasheet for this chip can be found at
18 * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf
21 /* Note about set_mac_address: I don't know how to change the hardware
22 * matching, so you need to enable IFF_PROMISC when using it.
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/delay.h>
29 #include <linux/pci.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/crc32.h>
36 #include <asm/irq.h>
38 #define SC92031_NAME "sc92031"
40 /* BAR 0 is MMIO, BAR 1 is PIO */
41 #ifndef SC92031_USE_BAR
42 #define SC92031_USE_BAR 0
43 #endif
45 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
46 static int multicast_filter_limit = 64;
47 module_param(multicast_filter_limit, int, 0);
48 MODULE_PARM_DESC(multicast_filter_limit,
49 "Maximum number of filtered multicast addresses");
51 static int media;
52 module_param(media, int, 0);
53 MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
54 " 0x01 = 10M half, 0x02 = 10M full,"
55 " 0x04 = 100M half, 0x08 = 100M full)");
57 /* Size of the in-memory receive ring. */
58 #define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
59 #define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
61 /* Number of Tx descriptor registers. */
62 #define NUM_TX_DESC 4
64 /* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
65 #define MAX_ETH_FRAME_SIZE 1536
67 /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
68 #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
69 #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
71 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
72 #define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
74 /* Time in jiffies before concluding the transmitter is hung. */
75 #define TX_TIMEOUT (4*HZ)
77 #define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
79 /* media options */
80 #define AUTOSELECT 0x00
81 #define M10_HALF 0x01
82 #define M10_FULL 0x02
83 #define M100_HALF 0x04
84 #define M100_FULL 0x08
86 /* Symbolic offsets to registers. */
87 enum silan_registers {
88 Config0 = 0x00, // Config0
89 Config1 = 0x04, // Config1
90 RxBufWPtr = 0x08, // Rx buffer writer poiter
91 IntrStatus = 0x0C, // Interrupt status
92 IntrMask = 0x10, // Interrupt mask
93 RxbufAddr = 0x14, // Rx buffer start address
94 RxBufRPtr = 0x18, // Rx buffer read pointer
95 Txstatusall = 0x1C, // Transmit status of all descriptors
96 TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
97 TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
98 RxConfig = 0x40, // Rx configuration
99 MAC0 = 0x44, // Ethernet hardware address.
100 MAR0 = 0x4C, // Multicast filter.
101 RxStatus0 = 0x54, // Rx status
102 TxConfig = 0x5C, // Tx configuration
103 PhyCtrl = 0x60, // physical control
104 FlowCtrlConfig = 0x64, // flow control
105 Miicmd0 = 0x68, // Mii command0 register
106 Miicmd1 = 0x6C, // Mii command1 register
107 Miistatus = 0x70, // Mii status register
108 Timercnt = 0x74, // Timer counter register
109 TimerIntr = 0x78, // Timer interrupt register
110 PMConfig = 0x7C, // Power Manager configuration
111 CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
112 Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
113 LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
114 TestD0 = 0xD0,
115 TestD4 = 0xD4,
116 TestD8 = 0xD8,
119 #define MII_BMCR 0 // Basic mode control register
120 #define MII_BMSR 1 // Basic mode status register
121 #define MII_JAB 16
122 #define MII_OutputStatus 24
124 #define BMCR_FULLDPLX 0x0100 // Full duplex
125 #define BMCR_ANRESTART 0x0200 // Auto negotiation restart
126 #define BMCR_ANENABLE 0x1000 // Enable auto negotiation
127 #define BMCR_SPEED100 0x2000 // Select 100Mbps
128 #define BMSR_LSTATUS 0x0004 // Link status
129 #define PHY_16_JAB_ENB 0x1000
130 #define PHY_16_PORT_ENB 0x1
132 enum IntrStatusBits {
133 LinkFail = 0x80000000,
134 LinkOK = 0x40000000,
135 TimeOut = 0x20000000,
136 RxOverflow = 0x0040,
137 RxOK = 0x0020,
138 TxOK = 0x0001,
139 IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
142 enum TxStatusBits {
143 TxCarrierLost = 0x20000000,
144 TxAborted = 0x10000000,
145 TxOutOfWindow = 0x08000000,
146 TxNccShift = 22,
147 EarlyTxThresShift = 16,
148 TxStatOK = 0x8000,
149 TxUnderrun = 0x4000,
150 TxOwn = 0x2000,
153 enum RxStatusBits {
154 RxStatesOK = 0x80000,
155 RxBadAlign = 0x40000,
156 RxHugeFrame = 0x20000,
157 RxSmallFrame = 0x10000,
158 RxCRCOK = 0x8000,
159 RxCrlFrame = 0x4000,
160 Rx_Broadcast = 0x2000,
161 Rx_Multicast = 0x1000,
162 RxAddrMatch = 0x0800,
163 MiiErr = 0x0400,
166 enum RxConfigBits {
167 RxFullDx = 0x80000000,
168 RxEnb = 0x40000000,
169 RxSmall = 0x20000000,
170 RxHuge = 0x10000000,
171 RxErr = 0x08000000,
172 RxAllphys = 0x04000000,
173 RxMulticast = 0x02000000,
174 RxBroadcast = 0x01000000,
175 RxLoopBack = (1 << 23) | (1 << 22),
176 LowThresholdShift = 12,
177 HighThresholdShift = 2,
180 enum TxConfigBits {
181 TxFullDx = 0x80000000,
182 TxEnb = 0x40000000,
183 TxEnbPad = 0x20000000,
184 TxEnbHuge = 0x10000000,
185 TxEnbFCS = 0x08000000,
186 TxNoBackOff = 0x04000000,
187 TxEnbPrem = 0x02000000,
188 TxCareLostCrs = 0x1000000,
189 TxExdCollNum = 0xf00000,
190 TxDataRate = 0x80000,
193 enum PhyCtrlconfigbits {
194 PhyCtrlAne = 0x80000000,
195 PhyCtrlSpd100 = 0x40000000,
196 PhyCtrlSpd10 = 0x20000000,
197 PhyCtrlPhyBaseAddr = 0x1f000000,
198 PhyCtrlDux = 0x800000,
199 PhyCtrlReset = 0x400000,
202 enum FlowCtrlConfigBits {
203 FlowCtrlFullDX = 0x80000000,
204 FlowCtrlEnb = 0x40000000,
207 enum Config0Bits {
208 Cfg0_Reset = 0x80000000,
209 Cfg0_Anaoff = 0x40000000,
210 Cfg0_LDPS = 0x20000000,
213 enum Config1Bits {
214 Cfg1_EarlyRx = 1 << 31,
215 Cfg1_EarlyTx = 1 << 30,
217 //rx buffer size
218 Cfg1_Rcv8K = 0x0,
219 Cfg1_Rcv16K = 0x1,
220 Cfg1_Rcv32K = 0x3,
221 Cfg1_Rcv64K = 0x7,
222 Cfg1_Rcv128K = 0xf,
225 enum MiiCmd0Bits {
226 Mii_Divider = 0x20000000,
227 Mii_WRITE = 0x400000,
228 Mii_READ = 0x200000,
229 Mii_SCAN = 0x100000,
230 Mii_Tamod = 0x80000,
231 Mii_Drvmod = 0x40000,
232 Mii_mdc = 0x20000,
233 Mii_mdoen = 0x10000,
234 Mii_mdo = 0x8000,
235 Mii_mdi = 0x4000,
238 enum MiiStatusBits {
239 Mii_StatusBusy = 0x80000000,
242 enum PMConfigBits {
243 PM_Enable = 1 << 31,
244 PM_LongWF = 1 << 30,
245 PM_Magic = 1 << 29,
246 PM_LANWake = 1 << 28,
247 PM_LWPTN = (1 << 27 | 1<< 26),
248 PM_LinkUp = 1 << 25,
249 PM_WakeUp = 1 << 24,
252 /* Locking rules:
253 * priv->lock protects most of the fields of priv and most of the
254 * hardware registers. It does not have to protect against softirqs
255 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
256 * it also does not need to be used in ->open and ->stop while the
257 * device interrupts are off.
258 * Not having to protect against softirqs is very useful due to heavy
259 * use of mdelay() at _sc92031_reset.
260 * Functions prefixed with _sc92031_ must be called with the lock held;
261 * functions prefixed with sc92031_ must be called without the lock held.
262 * Use mmiowb() before unlocking if the hardware was written to.
265 /* Locking rules for the interrupt:
266 * - the interrupt and the tasklet never run at the same time
267 * - neither run between sc92031_disable_interrupts and
268 * sc92031_enable_interrupt
271 struct sc92031_priv {
272 spinlock_t lock;
273 /* iomap.h cookie */
274 void __iomem *port_base;
275 /* pci device structure */
276 struct pci_dev *pdev;
277 /* tasklet */
278 struct tasklet_struct tasklet;
280 /* CPU address of rx ring */
281 void *rx_ring;
282 /* PCI address of rx ring */
283 dma_addr_t rx_ring_dma_addr;
284 /* PCI address of rx ring read pointer */
285 dma_addr_t rx_ring_tail;
287 /* tx ring write index */
288 unsigned tx_head;
289 /* tx ring read index */
290 unsigned tx_tail;
291 /* CPU address of tx bounce buffer */
292 void *tx_bufs;
293 /* PCI address of tx bounce buffer */
294 dma_addr_t tx_bufs_dma_addr;
296 /* copies of some hardware registers */
297 u32 intr_status;
298 atomic_t intr_mask;
299 u32 rx_config;
300 u32 tx_config;
301 u32 pm_config;
303 /* copy of some flags from dev->flags */
304 unsigned int mc_flags;
306 /* for ETHTOOL_GSTATS */
307 u64 tx_timeouts;
308 u64 rx_loss;
310 /* for dev->get_stats */
311 long rx_value;
314 /* I don't know which registers can be safely read; however, I can guess
315 * MAC0 is one of them. */
316 static inline void _sc92031_dummy_read(void __iomem *port_base)
318 ioread32(port_base + MAC0);
321 static u32 _sc92031_mii_wait(void __iomem *port_base)
323 u32 mii_status;
325 do {
326 udelay(10);
327 mii_status = ioread32(port_base + Miistatus);
328 } while (mii_status & Mii_StatusBusy);
330 return mii_status;
333 static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
335 iowrite32(Mii_Divider, port_base + Miicmd0);
337 _sc92031_mii_wait(port_base);
339 iowrite32(cmd1, port_base + Miicmd1);
340 iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
342 return _sc92031_mii_wait(port_base);
345 static void _sc92031_mii_scan(void __iomem *port_base)
347 _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
350 static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
352 return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
355 static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
357 _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
360 static void sc92031_disable_interrupts(struct net_device *dev)
362 struct sc92031_priv *priv = netdev_priv(dev);
363 void __iomem *port_base = priv->port_base;
365 /* tell the tasklet/interrupt not to enable interrupts */
366 atomic_set(&priv->intr_mask, 0);
367 wmb();
369 /* stop interrupts */
370 iowrite32(0, port_base + IntrMask);
371 _sc92031_dummy_read(port_base);
372 mmiowb();
374 /* wait for any concurrent interrupt/tasklet to finish */
375 synchronize_irq(dev->irq);
376 tasklet_disable(&priv->tasklet);
379 static void sc92031_enable_interrupts(struct net_device *dev)
381 struct sc92031_priv *priv = netdev_priv(dev);
382 void __iomem *port_base = priv->port_base;
384 tasklet_enable(&priv->tasklet);
386 atomic_set(&priv->intr_mask, IntrBits);
387 wmb();
389 iowrite32(IntrBits, port_base + IntrMask);
390 mmiowb();
393 static void _sc92031_disable_tx_rx(struct net_device *dev)
395 struct sc92031_priv *priv = netdev_priv(dev);
396 void __iomem *port_base = priv->port_base;
398 priv->rx_config &= ~RxEnb;
399 priv->tx_config &= ~TxEnb;
400 iowrite32(priv->rx_config, port_base + RxConfig);
401 iowrite32(priv->tx_config, port_base + TxConfig);
404 static void _sc92031_enable_tx_rx(struct net_device *dev)
406 struct sc92031_priv *priv = netdev_priv(dev);
407 void __iomem *port_base = priv->port_base;
409 priv->rx_config |= RxEnb;
410 priv->tx_config |= TxEnb;
411 iowrite32(priv->rx_config, port_base + RxConfig);
412 iowrite32(priv->tx_config, port_base + TxConfig);
415 static void _sc92031_tx_clear(struct net_device *dev)
417 struct sc92031_priv *priv = netdev_priv(dev);
419 while (priv->tx_head - priv->tx_tail > 0) {
420 priv->tx_tail++;
421 dev->stats.tx_dropped++;
423 priv->tx_head = priv->tx_tail = 0;
426 static void _sc92031_set_mar(struct net_device *dev)
428 struct sc92031_priv *priv = netdev_priv(dev);
429 void __iomem *port_base = priv->port_base;
430 u32 mar0 = 0, mar1 = 0;
432 if ((dev->flags & IFF_PROMISC) ||
433 netdev_mc_count(dev) > multicast_filter_limit ||
434 (dev->flags & IFF_ALLMULTI))
435 mar0 = mar1 = 0xffffffff;
436 else if (dev->flags & IFF_MULTICAST) {
437 struct netdev_hw_addr *ha;
439 netdev_for_each_mc_addr(ha, dev) {
440 u32 crc;
441 unsigned bit = 0;
443 crc = ~ether_crc(ETH_ALEN, ha->addr);
444 crc >>= 24;
446 if (crc & 0x01) bit |= 0x02;
447 if (crc & 0x02) bit |= 0x01;
448 if (crc & 0x10) bit |= 0x20;
449 if (crc & 0x20) bit |= 0x10;
450 if (crc & 0x40) bit |= 0x08;
451 if (crc & 0x80) bit |= 0x04;
453 if (bit > 31)
454 mar0 |= 0x1 << (bit - 32);
455 else
456 mar1 |= 0x1 << bit;
460 iowrite32(mar0, port_base + MAR0);
461 iowrite32(mar1, port_base + MAR0 + 4);
464 static void _sc92031_set_rx_config(struct net_device *dev)
466 struct sc92031_priv *priv = netdev_priv(dev);
467 void __iomem *port_base = priv->port_base;
468 unsigned int old_mc_flags;
469 u32 rx_config_bits = 0;
471 old_mc_flags = priv->mc_flags;
473 if (dev->flags & IFF_PROMISC)
474 rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
475 | RxMulticast | RxAllphys;
477 if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
478 rx_config_bits |= RxMulticast;
480 if (dev->flags & IFF_BROADCAST)
481 rx_config_bits |= RxBroadcast;
483 priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
484 | RxMulticast | RxAllphys);
485 priv->rx_config |= rx_config_bits;
487 priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
488 | IFF_MULTICAST | IFF_BROADCAST);
490 if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
491 iowrite32(priv->rx_config, port_base + RxConfig);
494 static bool _sc92031_check_media(struct net_device *dev)
496 struct sc92031_priv *priv = netdev_priv(dev);
497 void __iomem *port_base = priv->port_base;
498 u16 bmsr;
500 bmsr = _sc92031_mii_read(port_base, MII_BMSR);
501 rmb();
502 if (bmsr & BMSR_LSTATUS) {
503 bool speed_100, duplex_full;
504 u32 flow_ctrl_config = 0;
505 u16 output_status = _sc92031_mii_read(port_base,
506 MII_OutputStatus);
507 _sc92031_mii_scan(port_base);
509 speed_100 = output_status & 0x2;
510 duplex_full = output_status & 0x4;
512 /* Initial Tx/Rx configuration */
513 priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
514 priv->tx_config = 0x48800000;
516 /* NOTE: vendor driver had dead code here to enable tx padding */
518 if (!speed_100)
519 priv->tx_config |= 0x80000;
521 // configure rx mode
522 _sc92031_set_rx_config(dev);
524 if (duplex_full) {
525 priv->rx_config |= RxFullDx;
526 priv->tx_config |= TxFullDx;
527 flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
528 } else {
529 priv->rx_config &= ~RxFullDx;
530 priv->tx_config &= ~TxFullDx;
533 _sc92031_set_mar(dev);
534 _sc92031_set_rx_config(dev);
535 _sc92031_enable_tx_rx(dev);
536 iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
538 netif_carrier_on(dev);
540 if (printk_ratelimit())
541 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
542 dev->name,
543 speed_100 ? "100" : "10",
544 duplex_full ? "full" : "half");
545 return true;
546 } else {
547 _sc92031_mii_scan(port_base);
549 netif_carrier_off(dev);
551 _sc92031_disable_tx_rx(dev);
553 if (printk_ratelimit())
554 printk(KERN_INFO "%s: link down\n", dev->name);
555 return false;
559 static void _sc92031_phy_reset(struct net_device *dev)
561 struct sc92031_priv *priv = netdev_priv(dev);
562 void __iomem *port_base = priv->port_base;
563 u32 phy_ctrl;
565 phy_ctrl = ioread32(port_base + PhyCtrl);
566 phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
567 phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
569 switch (media) {
570 default:
571 case AUTOSELECT:
572 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
573 break;
574 case M10_HALF:
575 phy_ctrl |= PhyCtrlSpd10;
576 break;
577 case M10_FULL:
578 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
579 break;
580 case M100_HALF:
581 phy_ctrl |= PhyCtrlSpd100;
582 break;
583 case M100_FULL:
584 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
585 break;
588 iowrite32(phy_ctrl, port_base + PhyCtrl);
589 mdelay(10);
591 phy_ctrl &= ~PhyCtrlReset;
592 iowrite32(phy_ctrl, port_base + PhyCtrl);
593 mdelay(1);
595 _sc92031_mii_write(port_base, MII_JAB,
596 PHY_16_JAB_ENB | PHY_16_PORT_ENB);
597 _sc92031_mii_scan(port_base);
599 netif_carrier_off(dev);
600 netif_stop_queue(dev);
603 static void _sc92031_reset(struct net_device *dev)
605 struct sc92031_priv *priv = netdev_priv(dev);
606 void __iomem *port_base = priv->port_base;
608 /* disable PM */
609 iowrite32(0, port_base + PMConfig);
611 /* soft reset the chip */
612 iowrite32(Cfg0_Reset, port_base + Config0);
613 mdelay(200);
615 iowrite32(0, port_base + Config0);
616 mdelay(10);
618 /* disable interrupts */
619 iowrite32(0, port_base + IntrMask);
621 /* clear multicast address */
622 iowrite32(0, port_base + MAR0);
623 iowrite32(0, port_base + MAR0 + 4);
625 /* init rx ring */
626 iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
627 priv->rx_ring_tail = priv->rx_ring_dma_addr;
629 /* init tx ring */
630 _sc92031_tx_clear(dev);
632 /* clear old register values */
633 priv->intr_status = 0;
634 atomic_set(&priv->intr_mask, 0);
635 priv->rx_config = 0;
636 priv->tx_config = 0;
637 priv->mc_flags = 0;
639 /* configure rx buffer size */
640 /* NOTE: vendor driver had dead code here to enable early tx/rx */
641 iowrite32(Cfg1_Rcv64K, port_base + Config1);
643 _sc92031_phy_reset(dev);
644 _sc92031_check_media(dev);
646 /* calculate rx fifo overflow */
647 priv->rx_value = 0;
649 /* enable PM */
650 iowrite32(priv->pm_config, port_base + PMConfig);
652 /* clear intr register */
653 ioread32(port_base + IntrStatus);
656 static void _sc92031_tx_tasklet(struct net_device *dev)
658 struct sc92031_priv *priv = netdev_priv(dev);
659 void __iomem *port_base = priv->port_base;
661 unsigned old_tx_tail;
662 unsigned entry;
663 u32 tx_status;
665 old_tx_tail = priv->tx_tail;
666 while (priv->tx_head - priv->tx_tail > 0) {
667 entry = priv->tx_tail % NUM_TX_DESC;
668 tx_status = ioread32(port_base + TxStatus0 + entry * 4);
670 if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
671 break;
673 priv->tx_tail++;
675 if (tx_status & TxStatOK) {
676 dev->stats.tx_bytes += tx_status & 0x1fff;
677 dev->stats.tx_packets++;
678 /* Note: TxCarrierLost is always asserted at 100mbps. */
679 dev->stats.collisions += (tx_status >> 22) & 0xf;
682 if (tx_status & (TxOutOfWindow | TxAborted)) {
683 dev->stats.tx_errors++;
685 if (tx_status & TxAborted)
686 dev->stats.tx_aborted_errors++;
688 if (tx_status & TxCarrierLost)
689 dev->stats.tx_carrier_errors++;
691 if (tx_status & TxOutOfWindow)
692 dev->stats.tx_window_errors++;
695 if (tx_status & TxUnderrun)
696 dev->stats.tx_fifo_errors++;
699 if (priv->tx_tail != old_tx_tail)
700 if (netif_queue_stopped(dev))
701 netif_wake_queue(dev);
704 static void _sc92031_rx_tasklet_error(struct net_device *dev,
705 u32 rx_status, unsigned rx_size)
707 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
708 dev->stats.rx_errors++;
709 dev->stats.rx_length_errors++;
712 if (!(rx_status & RxStatesOK)) {
713 dev->stats.rx_errors++;
715 if (rx_status & (RxHugeFrame | RxSmallFrame))
716 dev->stats.rx_length_errors++;
718 if (rx_status & RxBadAlign)
719 dev->stats.rx_frame_errors++;
721 if (!(rx_status & RxCRCOK))
722 dev->stats.rx_crc_errors++;
723 } else {
724 struct sc92031_priv *priv = netdev_priv(dev);
725 priv->rx_loss++;
729 static void _sc92031_rx_tasklet(struct net_device *dev)
731 struct sc92031_priv *priv = netdev_priv(dev);
732 void __iomem *port_base = priv->port_base;
734 dma_addr_t rx_ring_head;
735 unsigned rx_len;
736 unsigned rx_ring_offset;
737 void *rx_ring = priv->rx_ring;
739 rx_ring_head = ioread32(port_base + RxBufWPtr);
740 rmb();
742 /* rx_ring_head is only 17 bits in the RxBufWPtr register.
743 * we need to change it to 32 bits physical address
745 rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
746 rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
747 if (rx_ring_head < priv->rx_ring_dma_addr)
748 rx_ring_head += RX_BUF_LEN;
750 if (rx_ring_head >= priv->rx_ring_tail)
751 rx_len = rx_ring_head - priv->rx_ring_tail;
752 else
753 rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
755 if (!rx_len)
756 return;
758 if (unlikely(rx_len > RX_BUF_LEN)) {
759 if (printk_ratelimit())
760 printk(KERN_ERR "%s: rx packets length > rx buffer\n",
761 dev->name);
762 return;
765 rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
767 while (rx_len) {
768 u32 rx_status;
769 unsigned rx_size, rx_size_align, pkt_size;
770 struct sk_buff *skb;
772 rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
773 rmb();
775 rx_size = rx_status >> 20;
776 rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
777 pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
779 rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
781 if (unlikely(rx_status == 0 ||
782 rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
783 rx_size < 16 ||
784 !(rx_status & RxStatesOK))) {
785 _sc92031_rx_tasklet_error(dev, rx_status, rx_size);
786 break;
789 if (unlikely(rx_size_align + 4 > rx_len)) {
790 if (printk_ratelimit())
791 printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
792 break;
795 rx_len -= rx_size_align + 4;
797 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
798 if (unlikely(!skb)) {
799 if (printk_ratelimit())
800 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
801 dev->name, pkt_size);
802 goto next;
805 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
806 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
807 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
808 memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
809 rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
810 } else {
811 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
814 skb->protocol = eth_type_trans(skb, dev);
815 netif_rx(skb);
817 dev->stats.rx_bytes += pkt_size;
818 dev->stats.rx_packets++;
820 if (rx_status & Rx_Multicast)
821 dev->stats.multicast++;
823 next:
824 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
826 mb();
828 priv->rx_ring_tail = rx_ring_head;
829 iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
832 static void _sc92031_link_tasklet(struct net_device *dev)
834 if (_sc92031_check_media(dev))
835 netif_wake_queue(dev);
836 else {
837 netif_stop_queue(dev);
838 dev->stats.tx_carrier_errors++;
842 static void sc92031_tasklet(unsigned long data)
844 struct net_device *dev = (struct net_device *)data;
845 struct sc92031_priv *priv = netdev_priv(dev);
846 void __iomem *port_base = priv->port_base;
847 u32 intr_status, intr_mask;
849 intr_status = priv->intr_status;
851 spin_lock(&priv->lock);
853 if (unlikely(!netif_running(dev)))
854 goto out;
856 if (intr_status & TxOK)
857 _sc92031_tx_tasklet(dev);
859 if (intr_status & RxOK)
860 _sc92031_rx_tasklet(dev);
862 if (intr_status & RxOverflow)
863 dev->stats.rx_errors++;
865 if (intr_status & TimeOut) {
866 dev->stats.rx_errors++;
867 dev->stats.rx_length_errors++;
870 if (intr_status & (LinkFail | LinkOK))
871 _sc92031_link_tasklet(dev);
873 out:
874 intr_mask = atomic_read(&priv->intr_mask);
875 rmb();
877 iowrite32(intr_mask, port_base + IntrMask);
878 mmiowb();
880 spin_unlock(&priv->lock);
883 static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
885 struct net_device *dev = dev_id;
886 struct sc92031_priv *priv = netdev_priv(dev);
887 void __iomem *port_base = priv->port_base;
888 u32 intr_status, intr_mask;
890 /* mask interrupts before clearing IntrStatus */
891 iowrite32(0, port_base + IntrMask);
892 _sc92031_dummy_read(port_base);
894 intr_status = ioread32(port_base + IntrStatus);
895 if (unlikely(intr_status == 0xffffffff))
896 return IRQ_NONE; // hardware has gone missing
898 intr_status &= IntrBits;
899 if (!intr_status)
900 goto out_none;
902 priv->intr_status = intr_status;
903 tasklet_schedule(&priv->tasklet);
905 return IRQ_HANDLED;
907 out_none:
908 intr_mask = atomic_read(&priv->intr_mask);
909 rmb();
911 iowrite32(intr_mask, port_base + IntrMask);
912 mmiowb();
914 return IRQ_NONE;
917 static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
919 struct sc92031_priv *priv = netdev_priv(dev);
920 void __iomem *port_base = priv->port_base;
922 // FIXME I do not understand what is this trying to do.
923 if (netif_running(dev)) {
924 int temp;
926 spin_lock_bh(&priv->lock);
928 /* Update the error count. */
929 temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
931 if (temp == 0xffff) {
932 priv->rx_value += temp;
933 dev->stats.rx_fifo_errors = priv->rx_value;
934 } else
935 dev->stats.rx_fifo_errors = temp + priv->rx_value;
937 spin_unlock_bh(&priv->lock);
940 return &dev->stats;
943 static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
944 struct net_device *dev)
946 struct sc92031_priv *priv = netdev_priv(dev);
947 void __iomem *port_base = priv->port_base;
948 unsigned len;
949 unsigned entry;
950 u32 tx_status;
952 if (unlikely(skb->len > TX_BUF_SIZE)) {
953 dev->stats.tx_dropped++;
954 goto out;
957 spin_lock(&priv->lock);
959 if (unlikely(!netif_carrier_ok(dev))) {
960 dev->stats.tx_dropped++;
961 goto out_unlock;
964 BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
966 entry = priv->tx_head++ % NUM_TX_DESC;
968 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
970 len = skb->len;
971 if (len < ETH_ZLEN) {
972 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
973 0, ETH_ZLEN - len);
974 len = ETH_ZLEN;
977 wmb();
979 if (len < 100)
980 tx_status = len;
981 else if (len < 300)
982 tx_status = 0x30000 | len;
983 else
984 tx_status = 0x50000 | len;
986 iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
987 port_base + TxAddr0 + entry * 4);
988 iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
989 mmiowb();
991 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
992 netif_stop_queue(dev);
994 out_unlock:
995 spin_unlock(&priv->lock);
997 out:
998 dev_kfree_skb(skb);
1000 return NETDEV_TX_OK;
1003 static int sc92031_open(struct net_device *dev)
1005 int err;
1006 struct sc92031_priv *priv = netdev_priv(dev);
1007 struct pci_dev *pdev = priv->pdev;
1009 priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1010 &priv->rx_ring_dma_addr);
1011 if (unlikely(!priv->rx_ring)) {
1012 err = -ENOMEM;
1013 goto out_alloc_rx_ring;
1016 priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1017 &priv->tx_bufs_dma_addr);
1018 if (unlikely(!priv->tx_bufs)) {
1019 err = -ENOMEM;
1020 goto out_alloc_tx_bufs;
1022 priv->tx_head = priv->tx_tail = 0;
1024 err = request_irq(pdev->irq, sc92031_interrupt,
1025 IRQF_SHARED, dev->name, dev);
1026 if (unlikely(err < 0))
1027 goto out_request_irq;
1029 priv->pm_config = 0;
1031 /* Interrupts already disabled by sc92031_stop or sc92031_probe */
1032 spin_lock_bh(&priv->lock);
1034 _sc92031_reset(dev);
1035 mmiowb();
1037 spin_unlock_bh(&priv->lock);
1038 sc92031_enable_interrupts(dev);
1040 if (netif_carrier_ok(dev))
1041 netif_start_queue(dev);
1042 else
1043 netif_tx_disable(dev);
1045 return 0;
1047 out_request_irq:
1048 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1049 priv->tx_bufs_dma_addr);
1050 out_alloc_tx_bufs:
1051 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1052 priv->rx_ring_dma_addr);
1053 out_alloc_rx_ring:
1054 return err;
1057 static int sc92031_stop(struct net_device *dev)
1059 struct sc92031_priv *priv = netdev_priv(dev);
1060 struct pci_dev *pdev = priv->pdev;
1062 netif_tx_disable(dev);
1064 /* Disable interrupts, stop Tx and Rx. */
1065 sc92031_disable_interrupts(dev);
1067 spin_lock_bh(&priv->lock);
1069 _sc92031_disable_tx_rx(dev);
1070 _sc92031_tx_clear(dev);
1071 mmiowb();
1073 spin_unlock_bh(&priv->lock);
1075 free_irq(pdev->irq, dev);
1076 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1077 priv->tx_bufs_dma_addr);
1078 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1079 priv->rx_ring_dma_addr);
1081 return 0;
1084 static void sc92031_set_multicast_list(struct net_device *dev)
1086 struct sc92031_priv *priv = netdev_priv(dev);
1088 spin_lock_bh(&priv->lock);
1090 _sc92031_set_mar(dev);
1091 _sc92031_set_rx_config(dev);
1092 mmiowb();
1094 spin_unlock_bh(&priv->lock);
1097 static void sc92031_tx_timeout(struct net_device *dev)
1099 struct sc92031_priv *priv = netdev_priv(dev);
1101 /* Disable interrupts by clearing the interrupt mask.*/
1102 sc92031_disable_interrupts(dev);
1104 spin_lock(&priv->lock);
1106 priv->tx_timeouts++;
1108 _sc92031_reset(dev);
1109 mmiowb();
1111 spin_unlock(&priv->lock);
1113 /* enable interrupts */
1114 sc92031_enable_interrupts(dev);
1116 if (netif_carrier_ok(dev))
1117 netif_wake_queue(dev);
1120 #ifdef CONFIG_NET_POLL_CONTROLLER
1121 static void sc92031_poll_controller(struct net_device *dev)
1123 disable_irq(dev->irq);
1124 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1125 sc92031_tasklet((unsigned long)dev);
1126 enable_irq(dev->irq);
1128 #endif
1130 static int sc92031_ethtool_get_settings(struct net_device *dev,
1131 struct ethtool_cmd *cmd)
1133 struct sc92031_priv *priv = netdev_priv(dev);
1134 void __iomem *port_base = priv->port_base;
1135 u8 phy_address;
1136 u32 phy_ctrl;
1137 u16 output_status;
1139 spin_lock_bh(&priv->lock);
1141 phy_address = ioread32(port_base + Miicmd1) >> 27;
1142 phy_ctrl = ioread32(port_base + PhyCtrl);
1144 output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1145 _sc92031_mii_scan(port_base);
1146 mmiowb();
1148 spin_unlock_bh(&priv->lock);
1150 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1151 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1152 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1154 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1156 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1157 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1158 cmd->advertising |= ADVERTISED_Autoneg;
1160 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1161 cmd->advertising |= ADVERTISED_10baseT_Half;
1163 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1164 == (PhyCtrlSpd10 | PhyCtrlDux))
1165 cmd->advertising |= ADVERTISED_10baseT_Full;
1167 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1168 cmd->advertising |= ADVERTISED_100baseT_Half;
1170 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1171 == (PhyCtrlSpd100 | PhyCtrlDux))
1172 cmd->advertising |= ADVERTISED_100baseT_Full;
1174 if (phy_ctrl & PhyCtrlAne)
1175 cmd->advertising |= ADVERTISED_Autoneg;
1177 ethtool_cmd_speed_set(cmd,
1178 (output_status & 0x2) ? SPEED_100 : SPEED_10);
1179 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1180 cmd->port = PORT_MII;
1181 cmd->phy_address = phy_address;
1182 cmd->transceiver = XCVR_INTERNAL;
1183 cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1185 return 0;
1188 static int sc92031_ethtool_set_settings(struct net_device *dev,
1189 struct ethtool_cmd *cmd)
1191 struct sc92031_priv *priv = netdev_priv(dev);
1192 void __iomem *port_base = priv->port_base;
1193 u32 speed = ethtool_cmd_speed(cmd);
1194 u32 phy_ctrl;
1195 u32 old_phy_ctrl;
1197 if (!(speed == SPEED_10 || speed == SPEED_100))
1198 return -EINVAL;
1199 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1200 return -EINVAL;
1201 if (!(cmd->port == PORT_MII))
1202 return -EINVAL;
1203 if (!(cmd->phy_address == 0x1f))
1204 return -EINVAL;
1205 if (!(cmd->transceiver == XCVR_INTERNAL))
1206 return -EINVAL;
1207 if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1208 return -EINVAL;
1210 if (cmd->autoneg == AUTONEG_ENABLE) {
1211 if (!(cmd->advertising & (ADVERTISED_Autoneg
1212 | ADVERTISED_100baseT_Full
1213 | ADVERTISED_100baseT_Half
1214 | ADVERTISED_10baseT_Full
1215 | ADVERTISED_10baseT_Half)))
1216 return -EINVAL;
1218 phy_ctrl = PhyCtrlAne;
1220 // FIXME: I'm not sure what the original code was trying to do
1221 if (cmd->advertising & ADVERTISED_Autoneg)
1222 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1223 if (cmd->advertising & ADVERTISED_100baseT_Full)
1224 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1225 if (cmd->advertising & ADVERTISED_100baseT_Half)
1226 phy_ctrl |= PhyCtrlSpd100;
1227 if (cmd->advertising & ADVERTISED_10baseT_Full)
1228 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1229 if (cmd->advertising & ADVERTISED_10baseT_Half)
1230 phy_ctrl |= PhyCtrlSpd10;
1231 } else {
1232 // FIXME: Whole branch guessed
1233 phy_ctrl = 0;
1235 if (speed == SPEED_10)
1236 phy_ctrl |= PhyCtrlSpd10;
1237 else /* cmd->speed == SPEED_100 */
1238 phy_ctrl |= PhyCtrlSpd100;
1240 if (cmd->duplex == DUPLEX_FULL)
1241 phy_ctrl |= PhyCtrlDux;
1244 spin_lock_bh(&priv->lock);
1246 old_phy_ctrl = ioread32(port_base + PhyCtrl);
1247 phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1248 | PhyCtrlSpd100 | PhyCtrlSpd10);
1249 if (phy_ctrl != old_phy_ctrl)
1250 iowrite32(phy_ctrl, port_base + PhyCtrl);
1252 spin_unlock_bh(&priv->lock);
1254 return 0;
1257 static void sc92031_ethtool_get_wol(struct net_device *dev,
1258 struct ethtool_wolinfo *wolinfo)
1260 struct sc92031_priv *priv = netdev_priv(dev);
1261 void __iomem *port_base = priv->port_base;
1262 u32 pm_config;
1264 spin_lock_bh(&priv->lock);
1265 pm_config = ioread32(port_base + PMConfig);
1266 spin_unlock_bh(&priv->lock);
1268 // FIXME: Guessed
1269 wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1270 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1271 wolinfo->wolopts = 0;
1273 if (pm_config & PM_LinkUp)
1274 wolinfo->wolopts |= WAKE_PHY;
1276 if (pm_config & PM_Magic)
1277 wolinfo->wolopts |= WAKE_MAGIC;
1279 if (pm_config & PM_WakeUp)
1280 // FIXME: Guessed
1281 wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1284 static int sc92031_ethtool_set_wol(struct net_device *dev,
1285 struct ethtool_wolinfo *wolinfo)
1287 struct sc92031_priv *priv = netdev_priv(dev);
1288 void __iomem *port_base = priv->port_base;
1289 u32 pm_config;
1291 spin_lock_bh(&priv->lock);
1293 pm_config = ioread32(port_base + PMConfig)
1294 & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1296 if (wolinfo->wolopts & WAKE_PHY)
1297 pm_config |= PM_LinkUp;
1299 if (wolinfo->wolopts & WAKE_MAGIC)
1300 pm_config |= PM_Magic;
1302 // FIXME: Guessed
1303 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1304 pm_config |= PM_WakeUp;
1306 priv->pm_config = pm_config;
1307 iowrite32(pm_config, port_base + PMConfig);
1308 mmiowb();
1310 spin_unlock_bh(&priv->lock);
1312 return 0;
1315 static int sc92031_ethtool_nway_reset(struct net_device *dev)
1317 int err = 0;
1318 struct sc92031_priv *priv = netdev_priv(dev);
1319 void __iomem *port_base = priv->port_base;
1320 u16 bmcr;
1322 spin_lock_bh(&priv->lock);
1324 bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1325 if (!(bmcr & BMCR_ANENABLE)) {
1326 err = -EINVAL;
1327 goto out;
1330 _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1332 out:
1333 _sc92031_mii_scan(port_base);
1334 mmiowb();
1336 spin_unlock_bh(&priv->lock);
1338 return err;
1341 static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1342 "tx_timeout",
1343 "rx_loss",
1346 static void sc92031_ethtool_get_strings(struct net_device *dev,
1347 u32 stringset, u8 *data)
1349 if (stringset == ETH_SS_STATS)
1350 memcpy(data, sc92031_ethtool_stats_strings,
1351 SILAN_STATS_NUM * ETH_GSTRING_LEN);
1354 static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset)
1356 switch (sset) {
1357 case ETH_SS_STATS:
1358 return SILAN_STATS_NUM;
1359 default:
1360 return -EOPNOTSUPP;
1364 static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1365 struct ethtool_stats *stats, u64 *data)
1367 struct sc92031_priv *priv = netdev_priv(dev);
1369 spin_lock_bh(&priv->lock);
1370 data[0] = priv->tx_timeouts;
1371 data[1] = priv->rx_loss;
1372 spin_unlock_bh(&priv->lock);
1375 static const struct ethtool_ops sc92031_ethtool_ops = {
1376 .get_settings = sc92031_ethtool_get_settings,
1377 .set_settings = sc92031_ethtool_set_settings,
1378 .get_wol = sc92031_ethtool_get_wol,
1379 .set_wol = sc92031_ethtool_set_wol,
1380 .nway_reset = sc92031_ethtool_nway_reset,
1381 .get_link = ethtool_op_get_link,
1382 .get_strings = sc92031_ethtool_get_strings,
1383 .get_sset_count = sc92031_ethtool_get_sset_count,
1384 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1388 static const struct net_device_ops sc92031_netdev_ops = {
1389 .ndo_get_stats = sc92031_get_stats,
1390 .ndo_start_xmit = sc92031_start_xmit,
1391 .ndo_open = sc92031_open,
1392 .ndo_stop = sc92031_stop,
1393 .ndo_set_multicast_list = sc92031_set_multicast_list,
1394 .ndo_change_mtu = eth_change_mtu,
1395 .ndo_validate_addr = eth_validate_addr,
1396 .ndo_set_mac_address = eth_mac_addr,
1397 .ndo_tx_timeout = sc92031_tx_timeout,
1398 #ifdef CONFIG_NET_POLL_CONTROLLER
1399 .ndo_poll_controller = sc92031_poll_controller,
1400 #endif
1403 static int __devinit sc92031_probe(struct pci_dev *pdev,
1404 const struct pci_device_id *id)
1406 int err;
1407 void __iomem* port_base;
1408 struct net_device *dev;
1409 struct sc92031_priv *priv;
1410 u32 mac0, mac1;
1411 unsigned long base_addr;
1413 err = pci_enable_device(pdev);
1414 if (unlikely(err < 0))
1415 goto out_enable_device;
1417 pci_set_master(pdev);
1419 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1420 if (unlikely(err < 0))
1421 goto out_set_dma_mask;
1423 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1424 if (unlikely(err < 0))
1425 goto out_set_dma_mask;
1427 err = pci_request_regions(pdev, SC92031_NAME);
1428 if (unlikely(err < 0))
1429 goto out_request_regions;
1431 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1432 if (unlikely(!port_base)) {
1433 err = -EIO;
1434 goto out_iomap;
1437 dev = alloc_etherdev(sizeof(struct sc92031_priv));
1438 if (unlikely(!dev)) {
1439 err = -ENOMEM;
1440 goto out_alloc_etherdev;
1443 pci_set_drvdata(pdev, dev);
1444 SET_NETDEV_DEV(dev, &pdev->dev);
1446 #if SC92031_USE_BAR == 0
1447 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1448 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1449 #elif SC92031_USE_BAR == 1
1450 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1451 #endif
1452 dev->irq = pdev->irq;
1454 /* faked with skb_copy_and_csum_dev */
1455 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1456 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1458 dev->netdev_ops = &sc92031_netdev_ops;
1459 dev->watchdog_timeo = TX_TIMEOUT;
1460 dev->ethtool_ops = &sc92031_ethtool_ops;
1462 priv = netdev_priv(dev);
1463 spin_lock_init(&priv->lock);
1464 priv->port_base = port_base;
1465 priv->pdev = pdev;
1466 tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1467 /* Fudge tasklet count so the call to sc92031_enable_interrupts at
1468 * sc92031_open will work correctly */
1469 tasklet_disable_nosync(&priv->tasklet);
1471 /* PCI PM Wakeup */
1472 iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1474 mac0 = ioread32(port_base + MAC0);
1475 mac1 = ioread32(port_base + MAC0 + 4);
1476 dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1477 dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1478 dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1479 dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1480 dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1481 dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1483 err = register_netdev(dev);
1484 if (err < 0)
1485 goto out_register_netdev;
1487 #if SC92031_USE_BAR == 0
1488 base_addr = dev->mem_start;
1489 #elif SC92031_USE_BAR == 1
1490 base_addr = dev->base_addr;
1491 #endif
1492 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1493 base_addr, dev->dev_addr, dev->irq);
1495 return 0;
1497 out_register_netdev:
1498 free_netdev(dev);
1499 out_alloc_etherdev:
1500 pci_iounmap(pdev, port_base);
1501 out_iomap:
1502 pci_release_regions(pdev);
1503 out_request_regions:
1504 out_set_dma_mask:
1505 pci_disable_device(pdev);
1506 out_enable_device:
1507 return err;
1510 static void __devexit sc92031_remove(struct pci_dev *pdev)
1512 struct net_device *dev = pci_get_drvdata(pdev);
1513 struct sc92031_priv *priv = netdev_priv(dev);
1514 void __iomem* port_base = priv->port_base;
1516 unregister_netdev(dev);
1517 free_netdev(dev);
1518 pci_iounmap(pdev, port_base);
1519 pci_release_regions(pdev);
1520 pci_disable_device(pdev);
1523 static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1525 struct net_device *dev = pci_get_drvdata(pdev);
1526 struct sc92031_priv *priv = netdev_priv(dev);
1528 pci_save_state(pdev);
1530 if (!netif_running(dev))
1531 goto out;
1533 netif_device_detach(dev);
1535 /* Disable interrupts, stop Tx and Rx. */
1536 sc92031_disable_interrupts(dev);
1538 spin_lock_bh(&priv->lock);
1540 _sc92031_disable_tx_rx(dev);
1541 _sc92031_tx_clear(dev);
1542 mmiowb();
1544 spin_unlock_bh(&priv->lock);
1546 out:
1547 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1549 return 0;
1552 static int sc92031_resume(struct pci_dev *pdev)
1554 struct net_device *dev = pci_get_drvdata(pdev);
1555 struct sc92031_priv *priv = netdev_priv(dev);
1557 pci_restore_state(pdev);
1558 pci_set_power_state(pdev, PCI_D0);
1560 if (!netif_running(dev))
1561 goto out;
1563 /* Interrupts already disabled by sc92031_suspend */
1564 spin_lock_bh(&priv->lock);
1566 _sc92031_reset(dev);
1567 mmiowb();
1569 spin_unlock_bh(&priv->lock);
1570 sc92031_enable_interrupts(dev);
1572 netif_device_attach(dev);
1574 if (netif_carrier_ok(dev))
1575 netif_wake_queue(dev);
1576 else
1577 netif_tx_disable(dev);
1579 out:
1580 return 0;
1583 static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1584 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1585 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1586 { PCI_DEVICE(0x1088, 0x2031) },
1587 { 0, }
1589 MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1591 static struct pci_driver sc92031_pci_driver = {
1592 .name = SC92031_NAME,
1593 .id_table = sc92031_pci_device_id_table,
1594 .probe = sc92031_probe,
1595 .remove = __devexit_p(sc92031_remove),
1596 .suspend = sc92031_suspend,
1597 .resume = sc92031_resume,
1600 static int __init sc92031_init(void)
1602 return pci_register_driver(&sc92031_pci_driver);
1605 static void __exit sc92031_exit(void)
1607 pci_unregister_driver(&sc92031_pci_driver);
1610 module_init(sc92031_init);
1611 module_exit(sc92031_exit);
1613 MODULE_LICENSE("GPL");
1614 MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1615 MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");