x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / drivers / net / sis190.c
blob7cc9898f4e00e27e5135e6b24c178ad7c07fea68
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #define DRV_VERSION "1.3"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
58 #define MAC_ADDR_LEN 6
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
221 /* RxDesc.status */
222 IPON = 0x20000000,
223 TCPON = 0x10000000,
224 UDPON = 0x08000000,
225 Wakup = 0x00400000,
226 Magic = 0x00200000,
227 Pause = 0x00100000,
228 DEFbit = 0x00200000,
229 BCAST = 0x000c0000,
230 MCAST = 0x00080000,
231 UCAST = 0x00040000,
232 /* RxDesc.PSize */
233 TAGON = 0x80000000,
234 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
235 ABORT = 0x00800000,
236 SHORT = 0x00400000,
237 LIMIT = 0x00200000,
238 MIIER = 0x00100000,
239 OVRUN = 0x00080000,
240 NIBON = 0x00040000,
241 COLON = 0x00020000,
242 CRCOK = 0x00010000,
243 RxSizeMask = 0x0000ffff
245 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
246 * provide two (unused with Linux) Tx queues. No publically
247 * available documentation alas.
251 enum sis190_eeprom_access_register_bits {
252 EECS = 0x00000001, // unused
253 EECLK = 0x00000002, // unused
254 EEDO = 0x00000008, // unused
255 EEDI = 0x00000004, // unused
256 EEREQ = 0x00000080,
257 EEROP = 0x00000200,
258 EEWOP = 0x00000100 // unused
261 /* EEPROM Addresses */
262 enum sis190_eeprom_address {
263 EEPROMSignature = 0x00,
264 EEPROMCLK = 0x01, // unused
265 EEPROMInfo = 0x02,
266 EEPROMMACAddr = 0x03
269 enum sis190_feature {
270 F_HAS_RGMII = 1,
271 F_PHY_88E1111 = 2,
272 F_PHY_BCM5461 = 4
275 struct sis190_private {
276 void __iomem *mmio_addr;
277 struct pci_dev *pci_dev;
278 struct net_device *dev;
279 spinlock_t lock;
280 u32 rx_buf_sz;
281 u32 cur_rx;
282 u32 cur_tx;
283 u32 dirty_rx;
284 u32 dirty_tx;
285 dma_addr_t rx_dma;
286 dma_addr_t tx_dma;
287 struct RxDesc *RxDescRing;
288 struct TxDesc *TxDescRing;
289 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
290 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
291 struct work_struct phy_task;
292 struct timer_list timer;
293 u32 msg_enable;
294 struct mii_if_info mii_if;
295 struct list_head first_phy;
296 u32 features;
299 struct sis190_phy {
300 struct list_head list;
301 int phy_id;
302 u16 id[2];
303 u16 status;
304 u8 type;
307 enum sis190_phy_type {
308 UNKNOWN = 0x00,
309 HOME = 0x01,
310 LAN = 0x02,
311 MIX = 0x03
314 static struct mii_chip_info {
315 const char *name;
316 u16 id[2];
317 unsigned int type;
318 u32 feature;
319 } mii_chip_table[] = {
320 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
321 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
322 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
323 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
324 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
325 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
326 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
327 { NULL, }
330 static const struct {
331 const char *name;
332 } sis_chip_info[] = {
333 { "SiS 190 PCI Fast Ethernet adapter" },
334 { "SiS 191 PCI Gigabit Ethernet adapter" },
337 static struct pci_device_id sis190_pci_tbl[] = {
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
340 { 0, },
343 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
345 static int rx_copybreak = 200;
347 static struct {
348 u32 msg_enable;
349 } debug = { -1 };
351 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
352 module_param(rx_copybreak, int, 0);
353 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
354 module_param_named(debug, debug.msg_enable, int, 0);
355 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
356 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
357 MODULE_VERSION(DRV_VERSION);
358 MODULE_LICENSE("GPL");
360 static const u32 sis190_intr_mask =
361 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
364 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
365 * The chips use a 64 element hash table based on the Ethernet CRC.
367 static const int multicast_filter_limit = 32;
369 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
371 unsigned int i;
373 SIS_W32(GMIIControl, ctl);
375 msleep(1);
377 for (i = 0; i < 100; i++) {
378 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
379 break;
380 msleep(1);
383 if (i > 99)
384 printk(KERN_ERR PFX "PHY command failed !\n");
387 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
389 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
390 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
391 (((u32) val) << EhnMIIdataShift));
394 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
396 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
397 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
399 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
402 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
404 struct sis190_private *tp = netdev_priv(dev);
406 mdio_write(tp->mmio_addr, phy_id, reg, val);
409 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
411 struct sis190_private *tp = netdev_priv(dev);
413 return mdio_read(tp->mmio_addr, phy_id, reg);
416 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
418 mdio_read(ioaddr, phy_id, reg);
419 return mdio_read(ioaddr, phy_id, reg);
422 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
424 u16 data = 0xffff;
425 unsigned int i;
427 if (!(SIS_R32(ROMControl) & 0x0002))
428 return 0;
430 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
432 for (i = 0; i < 200; i++) {
433 if (!(SIS_R32(ROMInterface) & EEREQ)) {
434 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
435 break;
437 msleep(1);
440 return data;
443 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
445 SIS_W32(IntrMask, 0x00);
446 SIS_W32(IntrStatus, 0xffffffff);
447 SIS_PCI_COMMIT();
450 static void sis190_asic_down(void __iomem *ioaddr)
452 /* Stop the chip's Tx and Rx DMA processes. */
454 SIS_W32(TxControl, 0x1a00);
455 SIS_W32(RxControl, 0x1a00);
457 sis190_irq_mask_and_ack(ioaddr);
460 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
462 desc->size |= cpu_to_le32(RingEnd);
465 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
467 u32 eor = le32_to_cpu(desc->size) & RingEnd;
469 desc->PSize = 0x0;
470 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
471 wmb();
472 desc->status = cpu_to_le32(OWNbit | INTbit);
475 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
476 u32 rx_buf_sz)
478 desc->addr = cpu_to_le32(mapping);
479 sis190_give_to_asic(desc, rx_buf_sz);
482 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
484 desc->PSize = 0x0;
485 desc->addr = cpu_to_le32(0xdeadbeef);
486 desc->size &= cpu_to_le32(RingEnd);
487 wmb();
488 desc->status = 0x0;
491 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
492 struct RxDesc *desc)
494 u32 rx_buf_sz = tp->rx_buf_sz;
495 struct sk_buff *skb;
497 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
498 if (likely(skb)) {
499 dma_addr_t mapping;
501 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
502 PCI_DMA_FROMDEVICE);
503 sis190_map_to_asic(desc, mapping, rx_buf_sz);
504 } else
505 sis190_make_unusable_by_asic(desc);
507 return skb;
510 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
511 u32 start, u32 end)
513 u32 cur;
515 for (cur = start; cur < end; cur++) {
516 unsigned int i = cur % NUM_RX_DESC;
518 if (tp->Rx_skbuff[i])
519 continue;
521 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
523 if (!tp->Rx_skbuff[i])
524 break;
526 return cur - start;
529 static bool sis190_try_rx_copy(struct sis190_private *tp,
530 struct sk_buff **sk_buff, int pkt_size,
531 dma_addr_t addr)
533 struct sk_buff *skb;
534 bool done = false;
536 if (pkt_size >= rx_copybreak)
537 goto out;
539 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
540 if (!skb)
541 goto out;
543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
544 PCI_DMA_FROMDEVICE);
545 skb_reserve(skb, 2);
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
547 *sk_buff = skb;
548 done = true;
549 out:
550 return done;
553 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 if ((status & CRCOK) && !(status & ErrMask))
558 return 0;
560 if (!(status & CRCOK))
561 stats->rx_crc_errors++;
562 else if (status & OVRUN)
563 stats->rx_over_errors++;
564 else if (status & (SHORT | LIMIT))
565 stats->rx_length_errors++;
566 else if (status & (MIIER | NIBON | COLON))
567 stats->rx_frame_errors++;
569 stats->rx_errors++;
570 return -1;
573 static int sis190_rx_interrupt(struct net_device *dev,
574 struct sis190_private *tp, void __iomem *ioaddr)
576 struct net_device_stats *stats = &dev->stats;
577 u32 rx_left, cur_rx = tp->cur_rx;
578 u32 delta, count;
580 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
581 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 for (; rx_left > 0; rx_left--, cur_rx++) {
584 unsigned int entry = cur_rx % NUM_RX_DESC;
585 struct RxDesc *desc = tp->RxDescRing + entry;
586 u32 status;
588 if (le32_to_cpu(desc->status) & OWNbit)
589 break;
591 status = le32_to_cpu(desc->PSize);
593 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
594 // status);
596 if (sis190_rx_pkt_err(status, stats) < 0)
597 sis190_give_to_asic(desc, tp->rx_buf_sz);
598 else {
599 struct sk_buff *skb = tp->Rx_skbuff[entry];
600 dma_addr_t addr = le32_to_cpu(desc->addr);
601 int pkt_size = (status & RxSizeMask) - 4;
602 struct pci_dev *pdev = tp->pci_dev;
604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 net_intr(tp, KERN_INFO
606 "%s: (frag) status = %08x.\n",
607 dev->name, status);
608 stats->rx_dropped++;
609 stats->rx_length_errors++;
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
611 continue;
615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
616 pci_dma_sync_single_for_device(pdev, addr,
617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
618 sis190_give_to_asic(desc, tp->rx_buf_sz);
619 } else {
620 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622 tp->Rx_skbuff[entry] = NULL;
623 sis190_make_unusable_by_asic(desc);
626 skb_put(skb, pkt_size);
627 skb->protocol = eth_type_trans(skb, dev);
629 sis190_rx_skb(skb);
631 stats->rx_packets++;
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
634 stats->multicast++;
637 count = cur_rx - tp->cur_rx;
638 tp->cur_rx = cur_rx;
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count && netif_msg_intr(tp))
642 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
643 tp->dirty_rx += delta;
645 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
646 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
648 return count;
651 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
652 struct TxDesc *desc)
654 unsigned int len;
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
660 memset(desc, 0x00, sizeof(*desc));
663 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
665 #define TxErrMask (WND | TABRT | FIFO | LINK)
667 if (!unlikely(status & TxErrMask))
668 return 0;
670 if (status & WND)
671 stats->tx_window_errors++;
672 if (status & TABRT)
673 stats->tx_aborted_errors++;
674 if (status & FIFO)
675 stats->tx_fifo_errors++;
676 if (status & LINK)
677 stats->tx_carrier_errors++;
679 stats->tx_errors++;
681 return -1;
684 static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
690 * It would not be needed if queueing was allowed to be enabled
691 * again too early (hint: think preempt and unclocked smp systems).
693 unsigned int queue_stopped;
695 smp_rmb();
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
703 struct sk_buff *skb;
705 if (status & OWNbit)
706 break;
708 skb = tp->Tx_skbuff[entry];
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
711 stats->tx_packets++;
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
723 smp_wmb();
724 if (queue_stopped)
725 netif_wake_queue(dev);
730 * The interrupt handler does all of the Rx thread work and cleans up after
731 * the Tx thread.
733 static irqreturn_t sis190_interrupt(int irq, void *__dev)
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
739 u32 status;
741 status = SIS_R32(IntrStatus);
743 if ((status == 0xffffffff) || !status)
744 goto out;
746 handled = 1;
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
750 goto out;
753 SIS_W32(IntrStatus, status);
755 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
757 if (status & LinkChange) {
758 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
759 schedule_work(&tp->phy_task);
762 if (status & RxQInt)
763 sis190_rx_interrupt(dev, tp, ioaddr);
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768 return IRQ_RETVAL(handled);
771 #ifdef CONFIG_NET_POLL_CONTROLLER
772 static void sis190_netpoll(struct net_device *dev)
774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev;
777 disable_irq(pdev->irq);
778 sis190_interrupt(pdev->irq, dev);
779 enable_irq(pdev->irq);
781 #endif
783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
786 struct pci_dev *pdev = tp->pci_dev;
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 PCI_DMA_FROMDEVICE);
790 dev_kfree_skb(*sk_buff);
791 *sk_buff = NULL;
792 sis190_make_unusable_by_asic(desc);
795 static void sis190_rx_clear(struct sis190_private *tp)
797 unsigned int i;
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
801 continue;
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
806 static void sis190_init_ring_indexes(struct sis190_private *tp)
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
811 static int sis190_init_ring(struct net_device *dev)
813 struct sis190_private *tp = netdev_priv(dev);
815 sis190_init_ring_indexes(tp);
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 goto err_rx_clear;
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
825 return 0;
827 err_rx_clear:
828 sis190_rx_clear(tp);
829 return -ENOMEM;
832 static void sis190_set_rx_mode(struct net_device *dev)
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
836 unsigned long flags;
837 u32 mc_filter[2]; /* Multicast hash filter */
838 u16 rx_mode;
840 if (dev->flags & IFF_PROMISC) {
841 rx_mode =
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 AcceptAllPhys;
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((dev->mc_count > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847 /* Too many to filter perfectly -- accept all multicasts. */
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
850 } else {
851 struct dev_mc_list *mclist;
852 unsigned int i;
854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0;
856 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
857 i++, mclist = mclist->next) {
858 int bit_nr =
859 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
860 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
861 rx_mode |= AcceptMulticast;
865 spin_lock_irqsave(&tp->lock, flags);
867 SIS_W16(RxMacControl, rx_mode | 0x2);
868 SIS_W32(RxHashTable, mc_filter[0]);
869 SIS_W32(RxHashTable + 4, mc_filter[1]);
871 spin_unlock_irqrestore(&tp->lock, flags);
874 static void sis190_soft_reset(void __iomem *ioaddr)
876 SIS_W32(IntrControl, 0x8000);
877 SIS_PCI_COMMIT();
878 SIS_W32(IntrControl, 0x0);
879 sis190_asic_down(ioaddr);
882 static void sis190_hw_start(struct net_device *dev)
884 struct sis190_private *tp = netdev_priv(dev);
885 void __iomem *ioaddr = tp->mmio_addr;
887 sis190_soft_reset(ioaddr);
889 SIS_W32(TxDescStartAddr, tp->tx_dma);
890 SIS_W32(RxDescStartAddr, tp->rx_dma);
892 SIS_W32(IntrStatus, 0xffffffff);
893 SIS_W32(IntrMask, 0x0);
894 SIS_W32(GMIIControl, 0x0);
895 SIS_W32(TxMacControl, 0x60);
896 SIS_W16(RxMacControl, 0x02);
897 SIS_W32(RxHashTable, 0x0);
898 SIS_W32(0x6c, 0x0);
899 SIS_W32(RxWolCtrl, 0x0);
900 SIS_W32(RxWolData, 0x0);
902 SIS_PCI_COMMIT();
904 sis190_set_rx_mode(dev);
906 /* Enable all known interrupts by setting the interrupt mask. */
907 SIS_W32(IntrMask, sis190_intr_mask);
909 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
910 SIS_W32(RxControl, 0x1a1d);
912 netif_start_queue(dev);
915 static void sis190_phy_task(struct work_struct *work)
917 struct sis190_private *tp =
918 container_of(work, struct sis190_private, phy_task);
919 struct net_device *dev = tp->dev;
920 void __iomem *ioaddr = tp->mmio_addr;
921 int phy_id = tp->mii_if.phy_id;
922 u16 val;
924 rtnl_lock();
926 if (!netif_running(dev))
927 goto out_unlock;
929 val = mdio_read(ioaddr, phy_id, MII_BMCR);
930 if (val & BMCR_RESET) {
931 // FIXME: needlessly high ? -- FR 02/07/2005
932 mod_timer(&tp->timer, jiffies + HZ/10);
933 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
934 BMSR_ANEGCOMPLETE)) {
935 netif_carrier_off(dev);
936 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
937 dev->name);
938 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
939 } else {
940 /* Rejoice ! */
941 struct {
942 int val;
943 u32 ctl;
944 const char *msg;
945 } reg31[] = {
946 { LPA_1000FULL, 0x07000c00 | 0x00001000,
947 "1000 Mbps Full Duplex" },
948 { LPA_1000HALF, 0x07000c00,
949 "1000 Mbps Half Duplex" },
950 { LPA_100FULL, 0x04000800 | 0x00001000,
951 "100 Mbps Full Duplex" },
952 { LPA_100HALF, 0x04000800,
953 "100 Mbps Half Duplex" },
954 { LPA_10FULL, 0x04000400 | 0x00001000,
955 "10 Mbps Full Duplex" },
956 { LPA_10HALF, 0x04000400,
957 "10 Mbps Half Duplex" },
958 { 0, 0x04000400, "unknown" }
959 }, *p = NULL;
960 u16 adv, autoexp, gigadv, gigrec;
962 val = mdio_read(ioaddr, phy_id, 0x1f);
963 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
965 val = mdio_read(ioaddr, phy_id, MII_LPA);
966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
969 dev->name, val, adv, autoexp);
971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972 /* check for gigabit speed */
973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 val = (gigadv & (gigrec >> 2));
976 if (val & ADVERTISE_1000FULL)
977 p = reg31;
978 else if (val & ADVERTISE_1000HALF)
979 p = reg31 + 1;
981 if (!p) {
982 val &= adv;
984 for (p = reg31; p->val; p++) {
985 if ((val & p->val) == p->val)
986 break;
990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
992 if ((tp->features & F_HAS_RGMII) &&
993 (tp->features & F_PHY_BCM5461)) {
994 // Set Tx Delay in RGMII mode.
995 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 udelay(200);
997 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 p->ctl |= 0x03000000;
1001 SIS_W32(StationControl, p->ctl);
1003 if (tp->features & F_HAS_RGMII) {
1004 SIS_W32(RGDelay, 0x0441);
1005 SIS_W32(RGDelay, 0x0440);
1008 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
1009 p->msg);
1010 netif_carrier_on(dev);
1013 out_unlock:
1014 rtnl_unlock();
1017 static void sis190_phy_timer(unsigned long __opaque)
1019 struct net_device *dev = (struct net_device *)__opaque;
1020 struct sis190_private *tp = netdev_priv(dev);
1022 if (likely(netif_running(dev)))
1023 schedule_work(&tp->phy_task);
1026 static inline void sis190_delete_timer(struct net_device *dev)
1028 struct sis190_private *tp = netdev_priv(dev);
1030 del_timer_sync(&tp->timer);
1033 static inline void sis190_request_timer(struct net_device *dev)
1035 struct sis190_private *tp = netdev_priv(dev);
1036 struct timer_list *timer = &tp->timer;
1038 init_timer(timer);
1039 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1040 timer->data = (unsigned long)dev;
1041 timer->function = sis190_phy_timer;
1042 add_timer(timer);
1045 static void sis190_set_rxbufsize(struct sis190_private *tp,
1046 struct net_device *dev)
1048 unsigned int mtu = dev->mtu;
1050 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1051 /* RxDesc->size has a licence to kill the lower bits */
1052 if (tp->rx_buf_sz & 0x07) {
1053 tp->rx_buf_sz += 8;
1054 tp->rx_buf_sz &= RX_BUF_MASK;
1058 static int sis190_open(struct net_device *dev)
1060 struct sis190_private *tp = netdev_priv(dev);
1061 struct pci_dev *pdev = tp->pci_dev;
1062 int rc = -ENOMEM;
1064 sis190_set_rxbufsize(tp, dev);
1067 * Rx and Tx descriptors need 256 bytes alignment.
1068 * pci_alloc_consistent() guarantees a stronger alignment.
1070 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1071 if (!tp->TxDescRing)
1072 goto out;
1074 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1075 if (!tp->RxDescRing)
1076 goto err_free_tx_0;
1078 rc = sis190_init_ring(dev);
1079 if (rc < 0)
1080 goto err_free_rx_1;
1082 sis190_request_timer(dev);
1084 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1085 if (rc < 0)
1086 goto err_release_timer_2;
1088 sis190_hw_start(dev);
1089 out:
1090 return rc;
1092 err_release_timer_2:
1093 sis190_delete_timer(dev);
1094 sis190_rx_clear(tp);
1095 err_free_rx_1:
1096 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1097 tp->rx_dma);
1098 err_free_tx_0:
1099 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1100 tp->tx_dma);
1101 goto out;
1104 static void sis190_tx_clear(struct sis190_private *tp)
1106 unsigned int i;
1108 for (i = 0; i < NUM_TX_DESC; i++) {
1109 struct sk_buff *skb = tp->Tx_skbuff[i];
1111 if (!skb)
1112 continue;
1114 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1115 tp->Tx_skbuff[i] = NULL;
1116 dev_kfree_skb(skb);
1118 tp->dev->stats.tx_dropped++;
1120 tp->cur_tx = tp->dirty_tx = 0;
1123 static void sis190_down(struct net_device *dev)
1125 struct sis190_private *tp = netdev_priv(dev);
1126 void __iomem *ioaddr = tp->mmio_addr;
1127 unsigned int poll_locked = 0;
1129 sis190_delete_timer(dev);
1131 netif_stop_queue(dev);
1133 do {
1134 spin_lock_irq(&tp->lock);
1136 sis190_asic_down(ioaddr);
1138 spin_unlock_irq(&tp->lock);
1140 synchronize_irq(dev->irq);
1142 if (!poll_locked)
1143 poll_locked++;
1145 synchronize_sched();
1147 } while (SIS_R32(IntrMask));
1149 sis190_tx_clear(tp);
1150 sis190_rx_clear(tp);
1153 static int sis190_close(struct net_device *dev)
1155 struct sis190_private *tp = netdev_priv(dev);
1156 struct pci_dev *pdev = tp->pci_dev;
1158 sis190_down(dev);
1160 free_irq(dev->irq, dev);
1162 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1163 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1165 tp->TxDescRing = NULL;
1166 tp->RxDescRing = NULL;
1168 return 0;
1171 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1172 struct net_device *dev)
1174 struct sis190_private *tp = netdev_priv(dev);
1175 void __iomem *ioaddr = tp->mmio_addr;
1176 u32 len, entry, dirty_tx;
1177 struct TxDesc *desc;
1178 dma_addr_t mapping;
1180 if (unlikely(skb->len < ETH_ZLEN)) {
1181 if (skb_padto(skb, ETH_ZLEN)) {
1182 dev->stats.tx_dropped++;
1183 goto out;
1185 len = ETH_ZLEN;
1186 } else {
1187 len = skb->len;
1190 entry = tp->cur_tx % NUM_TX_DESC;
1191 desc = tp->TxDescRing + entry;
1193 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1194 netif_stop_queue(dev);
1195 net_tx_err(tp, KERN_ERR PFX
1196 "%s: BUG! Tx Ring full when queue awake!\n",
1197 dev->name);
1198 return NETDEV_TX_BUSY;
1201 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1203 tp->Tx_skbuff[entry] = skb;
1205 desc->PSize = cpu_to_le32(len);
1206 desc->addr = cpu_to_le32(mapping);
1208 desc->size = cpu_to_le32(len);
1209 if (entry == (NUM_TX_DESC - 1))
1210 desc->size |= cpu_to_le32(RingEnd);
1212 wmb();
1214 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1216 tp->cur_tx++;
1218 smp_wmb();
1220 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1222 dirty_tx = tp->dirty_tx;
1223 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1224 netif_stop_queue(dev);
1225 smp_rmb();
1226 if (dirty_tx != tp->dirty_tx)
1227 netif_wake_queue(dev);
1229 out:
1230 return NETDEV_TX_OK;
1233 static void sis190_free_phy(struct list_head *first_phy)
1235 struct sis190_phy *cur, *next;
1237 list_for_each_entry_safe(cur, next, first_phy, list) {
1238 kfree(cur);
1243 * sis190_default_phy - Select default PHY for sis190 mac.
1244 * @dev: the net device to probe for
1246 * Select first detected PHY with link as default.
1247 * If no one is link on, select PHY whose types is HOME as default.
1248 * If HOME doesn't exist, select LAN.
1250 static u16 sis190_default_phy(struct net_device *dev)
1252 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1253 struct sis190_private *tp = netdev_priv(dev);
1254 struct mii_if_info *mii_if = &tp->mii_if;
1255 void __iomem *ioaddr = tp->mmio_addr;
1256 u16 status;
1258 phy_home = phy_default = phy_lan = NULL;
1260 list_for_each_entry(phy, &tp->first_phy, list) {
1261 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1263 // Link ON & Not select default PHY & not ghost PHY.
1264 if ((status & BMSR_LSTATUS) &&
1265 !phy_default &&
1266 (phy->type != UNKNOWN)) {
1267 phy_default = phy;
1268 } else {
1269 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1270 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1271 status | BMCR_ANENABLE | BMCR_ISOLATE);
1272 if (phy->type == HOME)
1273 phy_home = phy;
1274 else if (phy->type == LAN)
1275 phy_lan = phy;
1279 if (!phy_default) {
1280 if (phy_home)
1281 phy_default = phy_home;
1282 else if (phy_lan)
1283 phy_default = phy_lan;
1284 else
1285 phy_default = list_first_entry(&tp->first_phy,
1286 struct sis190_phy, list);
1289 if (mii_if->phy_id != phy_default->phy_id) {
1290 mii_if->phy_id = phy_default->phy_id;
1291 net_probe(tp, KERN_INFO
1292 "%s: Using transceiver at address %d as default.\n",
1293 pci_name(tp->pci_dev), mii_if->phy_id);
1296 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1297 status &= (~BMCR_ISOLATE);
1299 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1300 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1302 return status;
1305 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1306 struct sis190_phy *phy, unsigned int phy_id,
1307 u16 mii_status)
1309 void __iomem *ioaddr = tp->mmio_addr;
1310 struct mii_chip_info *p;
1312 INIT_LIST_HEAD(&phy->list);
1313 phy->status = mii_status;
1314 phy->phy_id = phy_id;
1316 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1317 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1319 for (p = mii_chip_table; p->type; p++) {
1320 if ((p->id[0] == phy->id[0]) &&
1321 (p->id[1] == (phy->id[1] & 0xfff0))) {
1322 break;
1326 if (p->id[1]) {
1327 phy->type = (p->type == MIX) ?
1328 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1329 LAN : HOME) : p->type;
1330 tp->features |= p->feature;
1331 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1332 pci_name(tp->pci_dev), p->name, phy_id);
1333 } else {
1334 phy->type = UNKNOWN;
1335 net_probe(tp, KERN_INFO
1336 "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1337 pci_name(tp->pci_dev),
1338 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1342 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1344 if (tp->features & F_PHY_88E1111) {
1345 void __iomem *ioaddr = tp->mmio_addr;
1346 int phy_id = tp->mii_if.phy_id;
1347 u16 reg[2][2] = {
1348 { 0x808b, 0x0ce1 },
1349 { 0x808f, 0x0c60 }
1350 }, *p;
1352 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1354 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1355 udelay(200);
1356 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1357 udelay(200);
1362 * sis190_mii_probe - Probe MII PHY for sis190
1363 * @dev: the net device to probe for
1365 * Search for total of 32 possible mii phy addresses.
1366 * Identify and set current phy if found one,
1367 * return error if it failed to found.
1369 static int __devinit sis190_mii_probe(struct net_device *dev)
1371 struct sis190_private *tp = netdev_priv(dev);
1372 struct mii_if_info *mii_if = &tp->mii_if;
1373 void __iomem *ioaddr = tp->mmio_addr;
1374 int phy_id;
1375 int rc = 0;
1377 INIT_LIST_HEAD(&tp->first_phy);
1379 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1380 struct sis190_phy *phy;
1381 u16 status;
1383 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1385 // Try next mii if the current one is not accessible.
1386 if (status == 0xffff || status == 0x0000)
1387 continue;
1389 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1390 if (!phy) {
1391 sis190_free_phy(&tp->first_phy);
1392 rc = -ENOMEM;
1393 goto out;
1396 sis190_init_phy(dev, tp, phy, phy_id, status);
1398 list_add(&tp->first_phy, &phy->list);
1401 if (list_empty(&tp->first_phy)) {
1402 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1403 pci_name(tp->pci_dev));
1404 rc = -EIO;
1405 goto out;
1408 /* Select default PHY for mac */
1409 sis190_default_phy(dev);
1411 sis190_mii_probe_88e1111_fixup(tp);
1413 mii_if->dev = dev;
1414 mii_if->mdio_read = __mdio_read;
1415 mii_if->mdio_write = __mdio_write;
1416 mii_if->phy_id_mask = PHY_ID_ANY;
1417 mii_if->reg_num_mask = MII_REG_ANY;
1418 out:
1419 return rc;
1422 static void sis190_mii_remove(struct net_device *dev)
1424 struct sis190_private *tp = netdev_priv(dev);
1426 sis190_free_phy(&tp->first_phy);
1429 static void sis190_release_board(struct pci_dev *pdev)
1431 struct net_device *dev = pci_get_drvdata(pdev);
1432 struct sis190_private *tp = netdev_priv(dev);
1434 iounmap(tp->mmio_addr);
1435 pci_release_regions(pdev);
1436 pci_disable_device(pdev);
1437 free_netdev(dev);
1440 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1442 struct sis190_private *tp;
1443 struct net_device *dev;
1444 void __iomem *ioaddr;
1445 int rc;
1447 dev = alloc_etherdev(sizeof(*tp));
1448 if (!dev) {
1449 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1450 rc = -ENOMEM;
1451 goto err_out_0;
1454 SET_NETDEV_DEV(dev, &pdev->dev);
1456 tp = netdev_priv(dev);
1457 tp->dev = dev;
1458 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1460 rc = pci_enable_device(pdev);
1461 if (rc < 0) {
1462 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1463 goto err_free_dev_1;
1466 rc = -ENODEV;
1468 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1469 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1470 pci_name(pdev));
1471 goto err_pci_disable_2;
1473 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1474 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1475 pci_name(pdev));
1476 goto err_pci_disable_2;
1479 rc = pci_request_regions(pdev, DRV_NAME);
1480 if (rc < 0) {
1481 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1482 pci_name(pdev));
1483 goto err_pci_disable_2;
1486 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1487 if (rc < 0) {
1488 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1489 pci_name(pdev));
1490 goto err_free_res_3;
1493 pci_set_master(pdev);
1495 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1496 if (!ioaddr) {
1497 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1498 pci_name(pdev));
1499 rc = -EIO;
1500 goto err_free_res_3;
1503 tp->pci_dev = pdev;
1504 tp->mmio_addr = ioaddr;
1506 sis190_irq_mask_and_ack(ioaddr);
1508 sis190_soft_reset(ioaddr);
1509 out:
1510 return dev;
1512 err_free_res_3:
1513 pci_release_regions(pdev);
1514 err_pci_disable_2:
1515 pci_disable_device(pdev);
1516 err_free_dev_1:
1517 free_netdev(dev);
1518 err_out_0:
1519 dev = ERR_PTR(rc);
1520 goto out;
1523 static void sis190_tx_timeout(struct net_device *dev)
1525 struct sis190_private *tp = netdev_priv(dev);
1526 void __iomem *ioaddr = tp->mmio_addr;
1527 u8 tmp8;
1529 /* Disable Tx, if not already */
1530 tmp8 = SIS_R8(TxControl);
1531 if (tmp8 & CmdTxEnb)
1532 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1535 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1536 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1538 /* Disable interrupts by clearing the interrupt mask. */
1539 SIS_W32(IntrMask, 0x0000);
1541 /* Stop a shared interrupt from scavenging while we are. */
1542 spin_lock_irq(&tp->lock);
1543 sis190_tx_clear(tp);
1544 spin_unlock_irq(&tp->lock);
1546 /* ...and finally, reset everything. */
1547 sis190_hw_start(dev);
1549 netif_wake_queue(dev);
1552 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1554 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1557 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1558 struct net_device *dev)
1560 struct sis190_private *tp = netdev_priv(dev);
1561 void __iomem *ioaddr = tp->mmio_addr;
1562 u16 sig;
1563 int i;
1565 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1566 pci_name(pdev));
1568 /* Check to see if there is a sane EEPROM */
1569 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1571 if ((sig == 0xffff) || (sig == 0x0000)) {
1572 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1573 pci_name(pdev), sig);
1574 return -EIO;
1577 /* Get MAC address from EEPROM */
1578 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1579 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1581 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1584 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1586 return 0;
1590 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1591 * @pdev: PCI device
1592 * @dev: network device to get address for
1594 * SiS96x model, use APC CMOS RAM to store MAC address.
1595 * APC CMOS RAM is accessed through ISA bridge.
1596 * MAC address is read into @net_dev->dev_addr.
1598 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1599 struct net_device *dev)
1601 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1602 struct sis190_private *tp = netdev_priv(dev);
1603 struct pci_dev *isa_bridge;
1604 u8 reg, tmp8;
1605 unsigned int i;
1607 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1608 pci_name(pdev));
1610 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1611 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1612 if (isa_bridge)
1613 break;
1616 if (!isa_bridge) {
1617 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1618 pci_name(pdev));
1619 return -EIO;
1622 /* Enable port 78h & 79h to access APC Registers. */
1623 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1624 reg = (tmp8 & ~0x02);
1625 pci_write_config_byte(isa_bridge, 0x48, reg);
1626 udelay(50);
1627 pci_read_config_byte(isa_bridge, 0x48, &reg);
1629 for (i = 0; i < MAC_ADDR_LEN; i++) {
1630 outb(0x9 + i, 0x78);
1631 dev->dev_addr[i] = inb(0x79);
1634 outb(0x12, 0x78);
1635 reg = inb(0x79);
1637 sis190_set_rgmii(tp, reg);
1639 /* Restore the value to ISA Bridge */
1640 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1641 pci_dev_put(isa_bridge);
1643 return 0;
1647 * sis190_init_rxfilter - Initialize the Rx filter
1648 * @dev: network device to initialize
1650 * Set receive filter address to our MAC address
1651 * and enable packet filtering.
1653 static inline void sis190_init_rxfilter(struct net_device *dev)
1655 struct sis190_private *tp = netdev_priv(dev);
1656 void __iomem *ioaddr = tp->mmio_addr;
1657 u16 ctl;
1658 int i;
1660 ctl = SIS_R16(RxMacControl);
1662 * Disable packet filtering before setting filter.
1663 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1664 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1666 SIS_W16(RxMacControl, ctl & ~0x0f00);
1668 for (i = 0; i < MAC_ADDR_LEN; i++)
1669 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1671 SIS_W16(RxMacControl, ctl);
1672 SIS_PCI_COMMIT();
1675 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1676 struct net_device *dev)
1678 int rc;
1680 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1681 if (rc < 0) {
1682 u8 reg;
1684 pci_read_config_byte(pdev, 0x73, &reg);
1686 if (reg & 0x00000001)
1687 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1689 return rc;
1692 static void sis190_set_speed_auto(struct net_device *dev)
1694 struct sis190_private *tp = netdev_priv(dev);
1695 void __iomem *ioaddr = tp->mmio_addr;
1696 int phy_id = tp->mii_if.phy_id;
1697 int val;
1699 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1701 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1703 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1704 // unchanged.
1705 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1706 ADVERTISE_100FULL | ADVERTISE_10FULL |
1707 ADVERTISE_100HALF | ADVERTISE_10HALF);
1709 // Enable 1000 Full Mode.
1710 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1712 // Enable auto-negotiation and restart auto-negotiation.
1713 mdio_write(ioaddr, phy_id, MII_BMCR,
1714 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1717 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1719 struct sis190_private *tp = netdev_priv(dev);
1721 return mii_ethtool_gset(&tp->mii_if, cmd);
1724 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1726 struct sis190_private *tp = netdev_priv(dev);
1728 return mii_ethtool_sset(&tp->mii_if, cmd);
1731 static void sis190_get_drvinfo(struct net_device *dev,
1732 struct ethtool_drvinfo *info)
1734 struct sis190_private *tp = netdev_priv(dev);
1736 strcpy(info->driver, DRV_NAME);
1737 strcpy(info->version, DRV_VERSION);
1738 strcpy(info->bus_info, pci_name(tp->pci_dev));
1741 static int sis190_get_regs_len(struct net_device *dev)
1743 return SIS190_REGS_SIZE;
1746 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1747 void *p)
1749 struct sis190_private *tp = netdev_priv(dev);
1750 unsigned long flags;
1752 if (regs->len > SIS190_REGS_SIZE)
1753 regs->len = SIS190_REGS_SIZE;
1755 spin_lock_irqsave(&tp->lock, flags);
1756 memcpy_fromio(p, tp->mmio_addr, regs->len);
1757 spin_unlock_irqrestore(&tp->lock, flags);
1760 static int sis190_nway_reset(struct net_device *dev)
1762 struct sis190_private *tp = netdev_priv(dev);
1764 return mii_nway_restart(&tp->mii_if);
1767 static u32 sis190_get_msglevel(struct net_device *dev)
1769 struct sis190_private *tp = netdev_priv(dev);
1771 return tp->msg_enable;
1774 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1776 struct sis190_private *tp = netdev_priv(dev);
1778 tp->msg_enable = value;
1781 static const struct ethtool_ops sis190_ethtool_ops = {
1782 .get_settings = sis190_get_settings,
1783 .set_settings = sis190_set_settings,
1784 .get_drvinfo = sis190_get_drvinfo,
1785 .get_regs_len = sis190_get_regs_len,
1786 .get_regs = sis190_get_regs,
1787 .get_link = ethtool_op_get_link,
1788 .get_msglevel = sis190_get_msglevel,
1789 .set_msglevel = sis190_set_msglevel,
1790 .nway_reset = sis190_nway_reset,
1793 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1795 struct sis190_private *tp = netdev_priv(dev);
1797 return !netif_running(dev) ? -EINVAL :
1798 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1801 static const struct net_device_ops sis190_netdev_ops = {
1802 .ndo_open = sis190_open,
1803 .ndo_stop = sis190_close,
1804 .ndo_do_ioctl = sis190_ioctl,
1805 .ndo_start_xmit = sis190_start_xmit,
1806 .ndo_tx_timeout = sis190_tx_timeout,
1807 .ndo_set_multicast_list = sis190_set_rx_mode,
1808 .ndo_change_mtu = eth_change_mtu,
1809 .ndo_set_mac_address = eth_mac_addr,
1810 .ndo_validate_addr = eth_validate_addr,
1811 #ifdef CONFIG_NET_POLL_CONTROLLER
1812 .ndo_poll_controller = sis190_netpoll,
1813 #endif
1816 static int __devinit sis190_init_one(struct pci_dev *pdev,
1817 const struct pci_device_id *ent)
1819 static int printed_version = 0;
1820 struct sis190_private *tp;
1821 struct net_device *dev;
1822 void __iomem *ioaddr;
1823 int rc;
1825 if (!printed_version) {
1826 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1827 printed_version = 1;
1830 dev = sis190_init_board(pdev);
1831 if (IS_ERR(dev)) {
1832 rc = PTR_ERR(dev);
1833 goto out;
1836 pci_set_drvdata(pdev, dev);
1838 tp = netdev_priv(dev);
1839 ioaddr = tp->mmio_addr;
1841 rc = sis190_get_mac_addr(pdev, dev);
1842 if (rc < 0)
1843 goto err_release_board;
1845 sis190_init_rxfilter(dev);
1847 INIT_WORK(&tp->phy_task, sis190_phy_task);
1849 dev->netdev_ops = &sis190_netdev_ops;
1851 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1852 dev->irq = pdev->irq;
1853 dev->base_addr = (unsigned long) 0xdead;
1854 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1856 spin_lock_init(&tp->lock);
1858 rc = sis190_mii_probe(dev);
1859 if (rc < 0)
1860 goto err_release_board;
1862 rc = register_netdev(dev);
1863 if (rc < 0)
1864 goto err_remove_mii;
1866 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1867 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1868 ioaddr, dev->irq, dev->dev_addr);
1870 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1871 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1873 netif_carrier_off(dev);
1875 sis190_set_speed_auto(dev);
1876 out:
1877 return rc;
1879 err_remove_mii:
1880 sis190_mii_remove(dev);
1881 err_release_board:
1882 sis190_release_board(pdev);
1883 goto out;
1886 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1888 struct net_device *dev = pci_get_drvdata(pdev);
1890 sis190_mii_remove(dev);
1891 flush_scheduled_work();
1892 unregister_netdev(dev);
1893 sis190_release_board(pdev);
1894 pci_set_drvdata(pdev, NULL);
1897 static struct pci_driver sis190_pci_driver = {
1898 .name = DRV_NAME,
1899 .id_table = sis190_pci_tbl,
1900 .probe = sis190_init_one,
1901 .remove = __devexit_p(sis190_remove_one),
1904 static int __init sis190_init_module(void)
1906 return pci_register_driver(&sis190_pci_driver);
1909 static void __exit sis190_cleanup_module(void)
1911 pci_unregister_driver(&sis190_pci_driver);
1914 module_init(sis190_init_module);
1915 module_exit(sis190_cleanup_module);