OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / net / ethernet / sis / sis190.c
blob5b118cd5bf942c48e5d6be5ecf3e8f5b9c4c9559
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/netdevice.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/pci.h>
32 #include <linux/mii.h>
33 #include <linux/delay.h>
34 #include <linux/crc32.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <asm/irq.h>
39 #define PHY_MAX_ADDR 32
40 #define PHY_ID_ANY 0x1f
41 #define MII_REG_ANY 0x1f
43 #define DRV_VERSION "1.4"
44 #define DRV_NAME "sis190"
45 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
47 #define sis190_rx_skb netif_rx
48 #define sis190_rx_quota(count, quota) count
50 #define NUM_TX_DESC 64 /* [8..1024] */
51 #define NUM_RX_DESC 64 /* [8..8192] */
52 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
53 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
54 #define RX_BUF_SIZE 1536
55 #define RX_BUF_MASK 0xfff8
57 #define SIS190_REGS_SIZE 0x80
58 #define SIS190_TX_TIMEOUT (6*HZ)
59 #define SIS190_PHY_TIMEOUT (10*HZ)
60 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62 NETIF_MSG_IFDOWN)
64 /* Enhanced PHY access register bit definitions */
65 #define EhnMIIread 0x0000
66 #define EhnMIIwrite 0x0020
67 #define EhnMIIdataShift 16
68 #define EhnMIIpmdShift 6 /* 7016 only */
69 #define EhnMIIregShift 11
70 #define EhnMIIreq 0x0010
71 #define EhnMIInotDone 0x0010
73 /* Write/read MMIO register */
74 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
75 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
76 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
77 #define SIS_R8(reg) readb (ioaddr + (reg))
78 #define SIS_R16(reg) readw (ioaddr + (reg))
79 #define SIS_R32(reg) readl (ioaddr + (reg))
81 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
83 enum sis190_registers {
84 TxControl = 0x00,
85 TxDescStartAddr = 0x04,
86 rsv0 = 0x08, // reserved
87 TxSts = 0x0c, // unused (Control/Status)
88 RxControl = 0x10,
89 RxDescStartAddr = 0x14,
90 rsv1 = 0x18, // reserved
91 RxSts = 0x1c, // unused
92 IntrStatus = 0x20,
93 IntrMask = 0x24,
94 IntrControl = 0x28,
95 IntrTimer = 0x2c, // unused (Interrupt Timer)
96 PMControl = 0x30, // unused (Power Mgmt Control/Status)
97 rsv2 = 0x34, // reserved
98 ROMControl = 0x38,
99 ROMInterface = 0x3c,
100 StationControl = 0x40,
101 GMIIControl = 0x44,
102 GIoCR = 0x48, // unused (GMAC IO Compensation)
103 GIoCtrl = 0x4c, // unused (GMAC IO Control)
104 TxMacControl = 0x50,
105 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
106 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
107 rsv3 = 0x5c, // reserved
108 RxMacControl = 0x60,
109 RxMacAddr = 0x62,
110 RxHashTable = 0x68,
111 // Undocumented = 0x6c,
112 RxWolCtrl = 0x70,
113 RxWolData = 0x74, // unused (Rx WOL Data Access)
114 RxMPSControl = 0x78, // unused (Rx MPS Control)
115 rsv4 = 0x7c, // reserved
118 enum sis190_register_content {
119 /* IntrStatus */
120 SoftInt = 0x40000000, // unused
121 Timeup = 0x20000000, // unused
122 PauseFrame = 0x00080000, // unused
123 MagicPacket = 0x00040000, // unused
124 WakeupFrame = 0x00020000, // unused
125 LinkChange = 0x00010000,
126 RxQEmpty = 0x00000080,
127 RxQInt = 0x00000040,
128 TxQ1Empty = 0x00000020, // unused
129 TxQ1Int = 0x00000010,
130 TxQ0Empty = 0x00000008, // unused
131 TxQ0Int = 0x00000004,
132 RxHalt = 0x00000002,
133 TxHalt = 0x00000001,
135 /* {Rx/Tx}CmdBits */
136 CmdReset = 0x10,
137 CmdRxEnb = 0x08, // unused
138 CmdTxEnb = 0x01,
139 RxBufEmpty = 0x01, // unused
141 /* Cfg9346Bits */
142 Cfg9346_Lock = 0x00, // unused
143 Cfg9346_Unlock = 0xc0, // unused
145 /* RxMacControl */
146 AcceptErr = 0x20, // unused
147 AcceptRunt = 0x10, // unused
148 AcceptBroadcast = 0x0800,
149 AcceptMulticast = 0x0400,
150 AcceptMyPhys = 0x0200,
151 AcceptAllPhys = 0x0100,
153 /* RxConfigBits */
154 RxCfgFIFOShift = 13,
155 RxCfgDMAShift = 8, // 0x1a in RxControl ?
157 /* TxConfigBits */
158 TxInterFrameGapShift = 24,
159 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
161 LinkStatus = 0x02, // unused
162 FullDup = 0x01, // unused
164 /* TBICSRBit */
165 TBILinkOK = 0x02000000, // unused
168 struct TxDesc {
169 __le32 PSize;
170 __le32 status;
171 __le32 addr;
172 __le32 size;
175 struct RxDesc {
176 __le32 PSize;
177 __le32 status;
178 __le32 addr;
179 __le32 size;
182 enum _DescStatusBit {
183 /* _Desc.status */
184 OWNbit = 0x80000000, // RXOWN/TXOWN
185 INTbit = 0x40000000, // RXINT/TXINT
186 CRCbit = 0x00020000, // CRCOFF/CRCEN
187 PADbit = 0x00010000, // PREADD/PADEN
188 /* _Desc.size */
189 RingEnd = 0x80000000,
190 /* TxDesc.status */
191 LSEN = 0x08000000, // TSO ? -- FR
192 IPCS = 0x04000000,
193 TCPCS = 0x02000000,
194 UDPCS = 0x01000000,
195 BSTEN = 0x00800000,
196 EXTEN = 0x00400000,
197 DEFEN = 0x00200000,
198 BKFEN = 0x00100000,
199 CRSEN = 0x00080000,
200 COLEN = 0x00040000,
201 THOL3 = 0x30000000,
202 THOL2 = 0x20000000,
203 THOL1 = 0x10000000,
204 THOL0 = 0x00000000,
206 WND = 0x00080000,
207 TABRT = 0x00040000,
208 FIFO = 0x00020000,
209 LINK = 0x00010000,
210 ColCountMask = 0x0000ffff,
211 /* RxDesc.status */
212 IPON = 0x20000000,
213 TCPON = 0x10000000,
214 UDPON = 0x08000000,
215 Wakup = 0x00400000,
216 Magic = 0x00200000,
217 Pause = 0x00100000,
218 DEFbit = 0x00200000,
219 BCAST = 0x000c0000,
220 MCAST = 0x00080000,
221 UCAST = 0x00040000,
222 /* RxDesc.PSize */
223 TAGON = 0x80000000,
224 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
225 ABORT = 0x00800000,
226 SHORT = 0x00400000,
227 LIMIT = 0x00200000,
228 MIIER = 0x00100000,
229 OVRUN = 0x00080000,
230 NIBON = 0x00040000,
231 COLON = 0x00020000,
232 CRCOK = 0x00010000,
233 RxSizeMask = 0x0000ffff
235 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236 * provide two (unused with Linux) Tx queues. No publicly
237 * available documentation alas.
241 enum sis190_eeprom_access_register_bits {
242 EECS = 0x00000001, // unused
243 EECLK = 0x00000002, // unused
244 EEDO = 0x00000008, // unused
245 EEDI = 0x00000004, // unused
246 EEREQ = 0x00000080,
247 EEROP = 0x00000200,
248 EEWOP = 0x00000100 // unused
251 /* EEPROM Addresses */
252 enum sis190_eeprom_address {
253 EEPROMSignature = 0x00,
254 EEPROMCLK = 0x01, // unused
255 EEPROMInfo = 0x02,
256 EEPROMMACAddr = 0x03
259 enum sis190_feature {
260 F_HAS_RGMII = 1,
261 F_PHY_88E1111 = 2,
262 F_PHY_BCM5461 = 4
265 struct sis190_private {
266 void __iomem *mmio_addr;
267 struct pci_dev *pci_dev;
268 struct net_device *dev;
269 spinlock_t lock;
270 u32 rx_buf_sz;
271 u32 cur_rx;
272 u32 cur_tx;
273 u32 dirty_rx;
274 u32 dirty_tx;
275 dma_addr_t rx_dma;
276 dma_addr_t tx_dma;
277 struct RxDesc *RxDescRing;
278 struct TxDesc *TxDescRing;
279 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281 struct work_struct phy_task;
282 struct timer_list timer;
283 u32 msg_enable;
284 struct mii_if_info mii_if;
285 struct list_head first_phy;
286 u32 features;
287 u32 negotiated_lpa;
288 enum {
289 LNK_OFF,
290 LNK_ON,
291 LNK_AUTONEG,
292 } link_status;
295 struct sis190_phy {
296 struct list_head list;
297 int phy_id;
298 u16 id[2];
299 u16 status;
300 u8 type;
303 enum sis190_phy_type {
304 UNKNOWN = 0x00,
305 HOME = 0x01,
306 LAN = 0x02,
307 MIX = 0x03
310 static struct mii_chip_info {
311 const char *name;
312 u16 id[2];
313 unsigned int type;
314 u32 feature;
315 } mii_chip_table[] = {
316 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
317 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
318 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
320 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
321 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
323 { NULL, }
326 static const struct {
327 const char *name;
328 } sis_chip_info[] = {
329 { "SiS 190 PCI Fast Ethernet adapter" },
330 { "SiS 191 PCI Gigabit Ethernet adapter" },
333 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
334 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336 { 0, },
339 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
341 static int rx_copybreak = 200;
343 static struct {
344 u32 msg_enable;
345 } debug = { -1 };
347 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348 module_param(rx_copybreak, int, 0);
349 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350 module_param_named(debug, debug.msg_enable, int, 0);
351 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
353 MODULE_VERSION(DRV_VERSION);
354 MODULE_LICENSE("GPL");
356 static const u32 sis190_intr_mask =
357 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
360 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
361 * The chips use a 64 element hash table based on the Ethernet CRC.
363 static const int multicast_filter_limit = 32;
365 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
367 unsigned int i;
369 SIS_W32(GMIIControl, ctl);
371 msleep(1);
373 for (i = 0; i < 100; i++) {
374 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375 break;
376 msleep(1);
379 if (i > 99)
380 pr_err("PHY command failed !\n");
383 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
385 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387 (((u32) val) << EhnMIIdataShift));
390 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
392 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
395 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
398 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
400 struct sis190_private *tp = netdev_priv(dev);
402 mdio_write(tp->mmio_addr, phy_id, reg, val);
405 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
407 struct sis190_private *tp = netdev_priv(dev);
409 return mdio_read(tp->mmio_addr, phy_id, reg);
412 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
414 mdio_read(ioaddr, phy_id, reg);
415 return mdio_read(ioaddr, phy_id, reg);
418 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
420 u16 data = 0xffff;
421 unsigned int i;
423 if (!(SIS_R32(ROMControl) & 0x0002))
424 return 0;
426 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
428 for (i = 0; i < 200; i++) {
429 if (!(SIS_R32(ROMInterface) & EEREQ)) {
430 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431 break;
433 msleep(1);
436 return data;
439 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
441 SIS_W32(IntrMask, 0x00);
442 SIS_W32(IntrStatus, 0xffffffff);
443 SIS_PCI_COMMIT();
446 static void sis190_asic_down(void __iomem *ioaddr)
448 /* Stop the chip's Tx and Rx DMA processes. */
450 SIS_W32(TxControl, 0x1a00);
451 SIS_W32(RxControl, 0x1a00);
453 sis190_irq_mask_and_ack(ioaddr);
456 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
458 desc->size |= cpu_to_le32(RingEnd);
461 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
463 u32 eor = le32_to_cpu(desc->size) & RingEnd;
465 desc->PSize = 0x0;
466 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467 wmb();
468 desc->status = cpu_to_le32(OWNbit | INTbit);
471 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472 u32 rx_buf_sz)
474 desc->addr = cpu_to_le32(mapping);
475 sis190_give_to_asic(desc, rx_buf_sz);
478 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
480 desc->PSize = 0x0;
481 desc->addr = cpu_to_le32(0xdeadbeef);
482 desc->size &= cpu_to_le32(RingEnd);
483 wmb();
484 desc->status = 0x0;
487 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488 struct RxDesc *desc)
490 u32 rx_buf_sz = tp->rx_buf_sz;
491 struct sk_buff *skb;
492 dma_addr_t mapping;
494 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495 if (unlikely(!skb))
496 goto skb_alloc_failed;
497 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
498 PCI_DMA_FROMDEVICE);
499 if (pci_dma_mapping_error(tp->pci_dev, mapping))
500 goto out;
501 sis190_map_to_asic(desc, mapping, rx_buf_sz);
503 return skb;
505 out:
506 dev_kfree_skb_any(skb);
507 skb_alloc_failed:
508 sis190_make_unusable_by_asic(desc);
509 return NULL;
512 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513 u32 start, u32 end)
515 u32 cur;
517 for (cur = start; cur < end; cur++) {
518 unsigned int i = cur % NUM_RX_DESC;
520 if (tp->Rx_skbuff[i])
521 continue;
523 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
525 if (!tp->Rx_skbuff[i])
526 break;
528 return cur - start;
531 static bool sis190_try_rx_copy(struct sis190_private *tp,
532 struct sk_buff **sk_buff, int pkt_size,
533 dma_addr_t addr)
535 struct sk_buff *skb;
536 bool done = false;
538 if (pkt_size >= rx_copybreak)
539 goto out;
541 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542 if (!skb)
543 goto out;
545 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
546 PCI_DMA_FROMDEVICE);
547 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548 *sk_buff = skb;
549 done = true;
550 out:
551 return done;
554 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
556 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
558 if ((status & CRCOK) && !(status & ErrMask))
559 return 0;
561 if (!(status & CRCOK))
562 stats->rx_crc_errors++;
563 else if (status & OVRUN)
564 stats->rx_over_errors++;
565 else if (status & (SHORT | LIMIT))
566 stats->rx_length_errors++;
567 else if (status & (MIIER | NIBON | COLON))
568 stats->rx_frame_errors++;
570 stats->rx_errors++;
571 return -1;
574 static int sis190_rx_interrupt(struct net_device *dev,
575 struct sis190_private *tp, void __iomem *ioaddr)
577 struct net_device_stats *stats = &dev->stats;
578 u32 rx_left, cur_rx = tp->cur_rx;
579 u32 delta, count;
581 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
584 for (; rx_left > 0; rx_left--, cur_rx++) {
585 unsigned int entry = cur_rx % NUM_RX_DESC;
586 struct RxDesc *desc = tp->RxDescRing + entry;
587 u32 status;
589 if (le32_to_cpu(desc->status) & OWNbit)
590 break;
592 status = le32_to_cpu(desc->PSize);
594 //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
596 if (sis190_rx_pkt_err(status, stats) < 0)
597 sis190_give_to_asic(desc, tp->rx_buf_sz);
598 else {
599 struct sk_buff *skb = tp->Rx_skbuff[entry];
600 dma_addr_t addr = le32_to_cpu(desc->addr);
601 int pkt_size = (status & RxSizeMask) - 4;
602 struct pci_dev *pdev = tp->pci_dev;
604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 netif_info(tp, intr, dev,
606 "(frag) status = %08x\n", status);
607 stats->rx_dropped++;
608 stats->rx_length_errors++;
609 sis190_give_to_asic(desc, tp->rx_buf_sz);
610 continue;
614 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 pci_dma_sync_single_for_device(pdev, addr,
616 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
617 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 } else {
619 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
620 PCI_DMA_FROMDEVICE);
621 tp->Rx_skbuff[entry] = NULL;
622 sis190_make_unusable_by_asic(desc);
625 skb_put(skb, pkt_size);
626 skb->protocol = eth_type_trans(skb, dev);
628 sis190_rx_skb(skb);
630 stats->rx_packets++;
631 stats->rx_bytes += pkt_size;
632 if ((status & BCAST) == MCAST)
633 stats->multicast++;
636 count = cur_rx - tp->cur_rx;
637 tp->cur_rx = cur_rx;
639 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 if (!delta && count)
641 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
642 tp->dirty_rx += delta;
644 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
645 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
647 return count;
650 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651 struct TxDesc *desc)
653 unsigned int len;
655 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
657 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
659 memset(desc, 0x00, sizeof(*desc));
662 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
664 #define TxErrMask (WND | TABRT | FIFO | LINK)
666 if (!unlikely(status & TxErrMask))
667 return 0;
669 if (status & WND)
670 stats->tx_window_errors++;
671 if (status & TABRT)
672 stats->tx_aborted_errors++;
673 if (status & FIFO)
674 stats->tx_fifo_errors++;
675 if (status & LINK)
676 stats->tx_carrier_errors++;
678 stats->tx_errors++;
680 return -1;
683 static void sis190_tx_interrupt(struct net_device *dev,
684 struct sis190_private *tp, void __iomem *ioaddr)
686 struct net_device_stats *stats = &dev->stats;
687 u32 pending, dirty_tx = tp->dirty_tx;
689 * It would not be needed if queueing was allowed to be enabled
690 * again too early (hint: think preempt and unclocked smp systems).
692 unsigned int queue_stopped;
694 smp_rmb();
695 pending = tp->cur_tx - dirty_tx;
696 queue_stopped = (pending == NUM_TX_DESC);
698 for (; pending; pending--, dirty_tx++) {
699 unsigned int entry = dirty_tx % NUM_TX_DESC;
700 struct TxDesc *txd = tp->TxDescRing + entry;
701 u32 status = le32_to_cpu(txd->status);
702 struct sk_buff *skb;
704 if (status & OWNbit)
705 break;
707 skb = tp->Tx_skbuff[entry];
709 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
710 stats->tx_packets++;
711 stats->tx_bytes += skb->len;
712 stats->collisions += ((status & ColCountMask) - 1);
715 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
716 tp->Tx_skbuff[entry] = NULL;
717 dev_kfree_skb_irq(skb);
720 if (tp->dirty_tx != dirty_tx) {
721 tp->dirty_tx = dirty_tx;
722 smp_wmb();
723 if (queue_stopped)
724 netif_wake_queue(dev);
729 * The interrupt handler does all of the Rx thread work and cleans up after
730 * the Tx thread.
732 static irqreturn_t sis190_interrupt(int irq, void *__dev)
734 struct net_device *dev = __dev;
735 struct sis190_private *tp = netdev_priv(dev);
736 void __iomem *ioaddr = tp->mmio_addr;
737 unsigned int handled = 0;
738 u32 status;
740 status = SIS_R32(IntrStatus);
742 if ((status == 0xffffffff) || !status)
743 goto out;
745 handled = 1;
747 if (unlikely(!netif_running(dev))) {
748 sis190_asic_down(ioaddr);
749 goto out;
752 SIS_W32(IntrStatus, status);
754 // netif_info(tp, intr, dev, "status = %08x\n", status);
756 if (status & LinkChange) {
757 netif_info(tp, intr, dev, "link change\n");
758 del_timer(&tp->timer);
759 schedule_work(&tp->phy_task);
762 if (status & RxQInt)
763 sis190_rx_interrupt(dev, tp, ioaddr);
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768 return IRQ_RETVAL(handled);
771 #ifdef CONFIG_NET_POLL_CONTROLLER
772 static void sis190_netpoll(struct net_device *dev)
774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev;
777 disable_irq(pdev->irq);
778 sis190_interrupt(pdev->irq, dev);
779 enable_irq(pdev->irq);
781 #endif
783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
786 struct pci_dev *pdev = tp->pci_dev;
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 PCI_DMA_FROMDEVICE);
790 dev_kfree_skb(*sk_buff);
791 *sk_buff = NULL;
792 sis190_make_unusable_by_asic(desc);
795 static void sis190_rx_clear(struct sis190_private *tp)
797 unsigned int i;
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
801 continue;
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
806 static void sis190_init_ring_indexes(struct sis190_private *tp)
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
811 static int sis190_init_ring(struct net_device *dev)
813 struct sis190_private *tp = netdev_priv(dev);
815 sis190_init_ring_indexes(tp);
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 goto err_rx_clear;
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
825 return 0;
827 err_rx_clear:
828 sis190_rx_clear(tp);
829 return -ENOMEM;
832 static void sis190_set_rx_mode(struct net_device *dev)
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
836 unsigned long flags;
837 u32 mc_filter[2]; /* Multicast hash filter */
838 u16 rx_mode;
840 if (dev->flags & IFF_PROMISC) {
841 rx_mode =
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 AcceptAllPhys;
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847 /* Too many to filter perfectly -- accept all multicasts. */
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
850 } else {
851 struct netdev_hw_addr *ha;
853 rx_mode = AcceptBroadcast | AcceptMyPhys;
854 mc_filter[1] = mc_filter[0] = 0;
855 netdev_for_each_mc_addr(ha, dev) {
856 int bit_nr =
857 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
858 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
859 rx_mode |= AcceptMulticast;
863 spin_lock_irqsave(&tp->lock, flags);
865 SIS_W16(RxMacControl, rx_mode | 0x2);
866 SIS_W32(RxHashTable, mc_filter[0]);
867 SIS_W32(RxHashTable + 4, mc_filter[1]);
869 spin_unlock_irqrestore(&tp->lock, flags);
872 static void sis190_soft_reset(void __iomem *ioaddr)
874 SIS_W32(IntrControl, 0x8000);
875 SIS_PCI_COMMIT();
876 SIS_W32(IntrControl, 0x0);
877 sis190_asic_down(ioaddr);
880 static void sis190_hw_start(struct net_device *dev)
882 struct sis190_private *tp = netdev_priv(dev);
883 void __iomem *ioaddr = tp->mmio_addr;
885 sis190_soft_reset(ioaddr);
887 SIS_W32(TxDescStartAddr, tp->tx_dma);
888 SIS_W32(RxDescStartAddr, tp->rx_dma);
890 SIS_W32(IntrStatus, 0xffffffff);
891 SIS_W32(IntrMask, 0x0);
892 SIS_W32(GMIIControl, 0x0);
893 SIS_W32(TxMacControl, 0x60);
894 SIS_W16(RxMacControl, 0x02);
895 SIS_W32(RxHashTable, 0x0);
896 SIS_W32(0x6c, 0x0);
897 SIS_W32(RxWolCtrl, 0x0);
898 SIS_W32(RxWolData, 0x0);
900 SIS_PCI_COMMIT();
902 sis190_set_rx_mode(dev);
904 /* Enable all known interrupts by setting the interrupt mask. */
905 SIS_W32(IntrMask, sis190_intr_mask);
907 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
908 SIS_W32(RxControl, 0x1a1d);
910 netif_start_queue(dev);
913 static void sis190_phy_task(struct work_struct *work)
915 struct sis190_private *tp =
916 container_of(work, struct sis190_private, phy_task);
917 struct net_device *dev = tp->dev;
918 void __iomem *ioaddr = tp->mmio_addr;
919 int phy_id = tp->mii_if.phy_id;
920 u16 val;
922 rtnl_lock();
924 if (!netif_running(dev))
925 goto out_unlock;
927 val = mdio_read(ioaddr, phy_id, MII_BMCR);
928 if (val & BMCR_RESET) {
929 // FIXME: needlessly high ? -- FR 02/07/2005
930 mod_timer(&tp->timer, jiffies + HZ/10);
931 goto out_unlock;
934 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
935 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
936 netif_carrier_off(dev);
937 netif_warn(tp, link, dev, "auto-negotiating...\n");
938 tp->link_status = LNK_AUTONEG;
939 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
940 /* Rejoice ! */
941 struct {
942 int val;
943 u32 ctl;
944 const char *msg;
945 } reg31[] = {
946 { LPA_1000FULL, 0x07000c00 | 0x00001000,
947 "1000 Mbps Full Duplex" },
948 { LPA_1000HALF, 0x07000c00,
949 "1000 Mbps Half Duplex" },
950 { LPA_100FULL, 0x04000800 | 0x00001000,
951 "100 Mbps Full Duplex" },
952 { LPA_100HALF, 0x04000800,
953 "100 Mbps Half Duplex" },
954 { LPA_10FULL, 0x04000400 | 0x00001000,
955 "10 Mbps Full Duplex" },
956 { LPA_10HALF, 0x04000400,
957 "10 Mbps Half Duplex" },
958 { 0, 0x04000400, "unknown" }
959 }, *p = NULL;
960 u16 adv, autoexp, gigadv, gigrec;
962 val = mdio_read(ioaddr, phy_id, 0x1f);
963 netif_info(tp, link, dev, "mii ext = %04x\n", val);
965 val = mdio_read(ioaddr, phy_id, MII_LPA);
966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
969 val, adv, autoexp);
971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972 /* check for gigabit speed */
973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 val = (gigadv & (gigrec >> 2));
976 if (val & ADVERTISE_1000FULL)
977 p = reg31;
978 else if (val & ADVERTISE_1000HALF)
979 p = reg31 + 1;
981 if (!p) {
982 val &= adv;
984 for (p = reg31; p->val; p++) {
985 if ((val & p->val) == p->val)
986 break;
990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
992 if ((tp->features & F_HAS_RGMII) &&
993 (tp->features & F_PHY_BCM5461)) {
994 // Set Tx Delay in RGMII mode.
995 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 udelay(200);
997 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 p->ctl |= 0x03000000;
1001 SIS_W32(StationControl, p->ctl);
1003 if (tp->features & F_HAS_RGMII) {
1004 SIS_W32(RGDelay, 0x0441);
1005 SIS_W32(RGDelay, 0x0440);
1008 tp->negotiated_lpa = p->val;
1010 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1011 netif_carrier_on(dev);
1012 tp->link_status = LNK_ON;
1013 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1014 tp->link_status = LNK_OFF;
1015 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1017 out_unlock:
1018 rtnl_unlock();
1021 static void sis190_phy_timer(unsigned long __opaque)
1023 struct net_device *dev = (struct net_device *)__opaque;
1024 struct sis190_private *tp = netdev_priv(dev);
1026 if (likely(netif_running(dev)))
1027 schedule_work(&tp->phy_task);
1030 static inline void sis190_delete_timer(struct net_device *dev)
1032 struct sis190_private *tp = netdev_priv(dev);
1034 del_timer_sync(&tp->timer);
1037 static inline void sis190_request_timer(struct net_device *dev)
1039 struct sis190_private *tp = netdev_priv(dev);
1040 struct timer_list *timer = &tp->timer;
1042 init_timer(timer);
1043 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1044 timer->data = (unsigned long)dev;
1045 timer->function = sis190_phy_timer;
1046 add_timer(timer);
1049 static void sis190_set_rxbufsize(struct sis190_private *tp,
1050 struct net_device *dev)
1052 unsigned int mtu = dev->mtu;
1054 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1055 /* RxDesc->size has a licence to kill the lower bits */
1056 if (tp->rx_buf_sz & 0x07) {
1057 tp->rx_buf_sz += 8;
1058 tp->rx_buf_sz &= RX_BUF_MASK;
1062 static int sis190_open(struct net_device *dev)
1064 struct sis190_private *tp = netdev_priv(dev);
1065 struct pci_dev *pdev = tp->pci_dev;
1066 int rc = -ENOMEM;
1068 sis190_set_rxbufsize(tp, dev);
1071 * Rx and Tx descriptors need 256 bytes alignment.
1072 * pci_alloc_consistent() guarantees a stronger alignment.
1074 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1075 if (!tp->TxDescRing)
1076 goto out;
1078 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1079 if (!tp->RxDescRing)
1080 goto err_free_tx_0;
1082 rc = sis190_init_ring(dev);
1083 if (rc < 0)
1084 goto err_free_rx_1;
1086 sis190_request_timer(dev);
1088 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1089 if (rc < 0)
1090 goto err_release_timer_2;
1092 sis190_hw_start(dev);
1093 out:
1094 return rc;
1096 err_release_timer_2:
1097 sis190_delete_timer(dev);
1098 sis190_rx_clear(tp);
1099 err_free_rx_1:
1100 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1101 tp->rx_dma);
1102 err_free_tx_0:
1103 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1104 tp->tx_dma);
1105 goto out;
1108 static void sis190_tx_clear(struct sis190_private *tp)
1110 unsigned int i;
1112 for (i = 0; i < NUM_TX_DESC; i++) {
1113 struct sk_buff *skb = tp->Tx_skbuff[i];
1115 if (!skb)
1116 continue;
1118 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1119 tp->Tx_skbuff[i] = NULL;
1120 dev_kfree_skb(skb);
1122 tp->dev->stats.tx_dropped++;
1124 tp->cur_tx = tp->dirty_tx = 0;
1127 static void sis190_down(struct net_device *dev)
1129 struct sis190_private *tp = netdev_priv(dev);
1130 void __iomem *ioaddr = tp->mmio_addr;
1131 unsigned int poll_locked = 0;
1133 sis190_delete_timer(dev);
1135 netif_stop_queue(dev);
1137 do {
1138 spin_lock_irq(&tp->lock);
1140 sis190_asic_down(ioaddr);
1142 spin_unlock_irq(&tp->lock);
1144 synchronize_irq(dev->irq);
1146 if (!poll_locked)
1147 poll_locked++;
1149 synchronize_sched();
1151 } while (SIS_R32(IntrMask));
1153 sis190_tx_clear(tp);
1154 sis190_rx_clear(tp);
1157 static int sis190_close(struct net_device *dev)
1159 struct sis190_private *tp = netdev_priv(dev);
1160 struct pci_dev *pdev = tp->pci_dev;
1162 sis190_down(dev);
1164 free_irq(dev->irq, dev);
1166 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1167 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1169 tp->TxDescRing = NULL;
1170 tp->RxDescRing = NULL;
1172 return 0;
1175 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1176 struct net_device *dev)
1178 struct sis190_private *tp = netdev_priv(dev);
1179 void __iomem *ioaddr = tp->mmio_addr;
1180 u32 len, entry, dirty_tx;
1181 struct TxDesc *desc;
1182 dma_addr_t mapping;
1184 if (unlikely(skb->len < ETH_ZLEN)) {
1185 if (skb_padto(skb, ETH_ZLEN)) {
1186 dev->stats.tx_dropped++;
1187 goto out;
1189 len = ETH_ZLEN;
1190 } else {
1191 len = skb->len;
1194 entry = tp->cur_tx % NUM_TX_DESC;
1195 desc = tp->TxDescRing + entry;
1197 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1198 netif_stop_queue(dev);
1199 netif_err(tp, tx_err, dev,
1200 "BUG! Tx Ring full when queue awake!\n");
1201 return NETDEV_TX_BUSY;
1204 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1205 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1206 netif_err(tp, tx_err, dev,
1207 "PCI mapping failed, dropping packet");
1208 return NETDEV_TX_BUSY;
1211 tp->Tx_skbuff[entry] = skb;
1213 desc->PSize = cpu_to_le32(len);
1214 desc->addr = cpu_to_le32(mapping);
1216 desc->size = cpu_to_le32(len);
1217 if (entry == (NUM_TX_DESC - 1))
1218 desc->size |= cpu_to_le32(RingEnd);
1220 wmb();
1222 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1223 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1224 /* Half Duplex */
1225 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1226 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1227 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1230 tp->cur_tx++;
1232 smp_wmb();
1234 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1236 dirty_tx = tp->dirty_tx;
1237 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1238 netif_stop_queue(dev);
1239 smp_rmb();
1240 if (dirty_tx != tp->dirty_tx)
1241 netif_wake_queue(dev);
1243 out:
1244 return NETDEV_TX_OK;
1247 static void sis190_free_phy(struct list_head *first_phy)
1249 struct sis190_phy *cur, *next;
1251 list_for_each_entry_safe(cur, next, first_phy, list) {
1252 kfree(cur);
1257 * sis190_default_phy - Select default PHY for sis190 mac.
1258 * @dev: the net device to probe for
1260 * Select first detected PHY with link as default.
1261 * If no one is link on, select PHY whose types is HOME as default.
1262 * If HOME doesn't exist, select LAN.
1264 static u16 sis190_default_phy(struct net_device *dev)
1266 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1267 struct sis190_private *tp = netdev_priv(dev);
1268 struct mii_if_info *mii_if = &tp->mii_if;
1269 void __iomem *ioaddr = tp->mmio_addr;
1270 u16 status;
1272 phy_home = phy_default = phy_lan = NULL;
1274 list_for_each_entry(phy, &tp->first_phy, list) {
1275 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1277 // Link ON & Not select default PHY & not ghost PHY.
1278 if ((status & BMSR_LSTATUS) &&
1279 !phy_default &&
1280 (phy->type != UNKNOWN)) {
1281 phy_default = phy;
1282 } else {
1283 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1284 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1285 status | BMCR_ANENABLE | BMCR_ISOLATE);
1286 if (phy->type == HOME)
1287 phy_home = phy;
1288 else if (phy->type == LAN)
1289 phy_lan = phy;
1293 if (!phy_default) {
1294 if (phy_home)
1295 phy_default = phy_home;
1296 else if (phy_lan)
1297 phy_default = phy_lan;
1298 else
1299 phy_default = list_first_entry(&tp->first_phy,
1300 struct sis190_phy, list);
1303 if (mii_if->phy_id != phy_default->phy_id) {
1304 mii_if->phy_id = phy_default->phy_id;
1305 if (netif_msg_probe(tp))
1306 pr_info("%s: Using transceiver at address %d as default\n",
1307 pci_name(tp->pci_dev), mii_if->phy_id);
1310 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1311 status &= (~BMCR_ISOLATE);
1313 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1314 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1316 return status;
1319 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1320 struct sis190_phy *phy, unsigned int phy_id,
1321 u16 mii_status)
1323 void __iomem *ioaddr = tp->mmio_addr;
1324 struct mii_chip_info *p;
1326 INIT_LIST_HEAD(&phy->list);
1327 phy->status = mii_status;
1328 phy->phy_id = phy_id;
1330 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1331 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1333 for (p = mii_chip_table; p->type; p++) {
1334 if ((p->id[0] == phy->id[0]) &&
1335 (p->id[1] == (phy->id[1] & 0xfff0))) {
1336 break;
1340 if (p->id[1]) {
1341 phy->type = (p->type == MIX) ?
1342 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1343 LAN : HOME) : p->type;
1344 tp->features |= p->feature;
1345 if (netif_msg_probe(tp))
1346 pr_info("%s: %s transceiver at address %d\n",
1347 pci_name(tp->pci_dev), p->name, phy_id);
1348 } else {
1349 phy->type = UNKNOWN;
1350 if (netif_msg_probe(tp))
1351 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1352 pci_name(tp->pci_dev),
1353 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1357 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1359 if (tp->features & F_PHY_88E1111) {
1360 void __iomem *ioaddr = tp->mmio_addr;
1361 int phy_id = tp->mii_if.phy_id;
1362 u16 reg[2][2] = {
1363 { 0x808b, 0x0ce1 },
1364 { 0x808f, 0x0c60 }
1365 }, *p;
1367 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1369 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1370 udelay(200);
1371 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1372 udelay(200);
1377 * sis190_mii_probe - Probe MII PHY for sis190
1378 * @dev: the net device to probe for
1380 * Search for total of 32 possible mii phy addresses.
1381 * Identify and set current phy if found one,
1382 * return error if it failed to found.
1384 static int __devinit sis190_mii_probe(struct net_device *dev)
1386 struct sis190_private *tp = netdev_priv(dev);
1387 struct mii_if_info *mii_if = &tp->mii_if;
1388 void __iomem *ioaddr = tp->mmio_addr;
1389 int phy_id;
1390 int rc = 0;
1392 INIT_LIST_HEAD(&tp->first_phy);
1394 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1395 struct sis190_phy *phy;
1396 u16 status;
1398 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1400 // Try next mii if the current one is not accessible.
1401 if (status == 0xffff || status == 0x0000)
1402 continue;
1404 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1405 if (!phy) {
1406 sis190_free_phy(&tp->first_phy);
1407 rc = -ENOMEM;
1408 goto out;
1411 sis190_init_phy(dev, tp, phy, phy_id, status);
1413 list_add(&tp->first_phy, &phy->list);
1416 if (list_empty(&tp->first_phy)) {
1417 if (netif_msg_probe(tp))
1418 pr_info("%s: No MII transceivers found!\n",
1419 pci_name(tp->pci_dev));
1420 rc = -EIO;
1421 goto out;
1424 /* Select default PHY for mac */
1425 sis190_default_phy(dev);
1427 sis190_mii_probe_88e1111_fixup(tp);
1429 mii_if->dev = dev;
1430 mii_if->mdio_read = __mdio_read;
1431 mii_if->mdio_write = __mdio_write;
1432 mii_if->phy_id_mask = PHY_ID_ANY;
1433 mii_if->reg_num_mask = MII_REG_ANY;
1434 out:
1435 return rc;
1438 static void sis190_mii_remove(struct net_device *dev)
1440 struct sis190_private *tp = netdev_priv(dev);
1442 sis190_free_phy(&tp->first_phy);
1445 static void sis190_release_board(struct pci_dev *pdev)
1447 struct net_device *dev = pci_get_drvdata(pdev);
1448 struct sis190_private *tp = netdev_priv(dev);
1450 iounmap(tp->mmio_addr);
1451 pci_release_regions(pdev);
1452 pci_disable_device(pdev);
1453 free_netdev(dev);
1456 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1458 struct sis190_private *tp;
1459 struct net_device *dev;
1460 void __iomem *ioaddr;
1461 int rc;
1463 dev = alloc_etherdev(sizeof(*tp));
1464 if (!dev) {
1465 if (netif_msg_drv(&debug))
1466 pr_err("unable to alloc new ethernet\n");
1467 rc = -ENOMEM;
1468 goto err_out_0;
1471 SET_NETDEV_DEV(dev, &pdev->dev);
1473 tp = netdev_priv(dev);
1474 tp->dev = dev;
1475 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1477 rc = pci_enable_device(pdev);
1478 if (rc < 0) {
1479 if (netif_msg_probe(tp))
1480 pr_err("%s: enable failure\n", pci_name(pdev));
1481 goto err_free_dev_1;
1484 rc = -ENODEV;
1486 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1487 if (netif_msg_probe(tp))
1488 pr_err("%s: region #0 is no MMIO resource\n",
1489 pci_name(pdev));
1490 goto err_pci_disable_2;
1492 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1493 if (netif_msg_probe(tp))
1494 pr_err("%s: invalid PCI region size(s)\n",
1495 pci_name(pdev));
1496 goto err_pci_disable_2;
1499 rc = pci_request_regions(pdev, DRV_NAME);
1500 if (rc < 0) {
1501 if (netif_msg_probe(tp))
1502 pr_err("%s: could not request regions\n",
1503 pci_name(pdev));
1504 goto err_pci_disable_2;
1507 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1508 if (rc < 0) {
1509 if (netif_msg_probe(tp))
1510 pr_err("%s: DMA configuration failed\n",
1511 pci_name(pdev));
1512 goto err_free_res_3;
1515 pci_set_master(pdev);
1517 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1518 if (!ioaddr) {
1519 if (netif_msg_probe(tp))
1520 pr_err("%s: cannot remap MMIO, aborting\n",
1521 pci_name(pdev));
1522 rc = -EIO;
1523 goto err_free_res_3;
1526 tp->pci_dev = pdev;
1527 tp->mmio_addr = ioaddr;
1528 tp->link_status = LNK_OFF;
1530 sis190_irq_mask_and_ack(ioaddr);
1532 sis190_soft_reset(ioaddr);
1533 out:
1534 return dev;
1536 err_free_res_3:
1537 pci_release_regions(pdev);
1538 err_pci_disable_2:
1539 pci_disable_device(pdev);
1540 err_free_dev_1:
1541 free_netdev(dev);
1542 err_out_0:
1543 dev = ERR_PTR(rc);
1544 goto out;
1547 static void sis190_tx_timeout(struct net_device *dev)
1549 struct sis190_private *tp = netdev_priv(dev);
1550 void __iomem *ioaddr = tp->mmio_addr;
1551 u8 tmp8;
1553 /* Disable Tx, if not already */
1554 tmp8 = SIS_R8(TxControl);
1555 if (tmp8 & CmdTxEnb)
1556 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1558 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1559 SIS_R32(TxControl), SIS_R32(TxSts));
1561 /* Disable interrupts by clearing the interrupt mask. */
1562 SIS_W32(IntrMask, 0x0000);
1564 /* Stop a shared interrupt from scavenging while we are. */
1565 spin_lock_irq(&tp->lock);
1566 sis190_tx_clear(tp);
1567 spin_unlock_irq(&tp->lock);
1569 /* ...and finally, reset everything. */
1570 sis190_hw_start(dev);
1572 netif_wake_queue(dev);
1575 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1577 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1580 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1581 struct net_device *dev)
1583 struct sis190_private *tp = netdev_priv(dev);
1584 void __iomem *ioaddr = tp->mmio_addr;
1585 u16 sig;
1586 int i;
1588 if (netif_msg_probe(tp))
1589 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1591 /* Check to see if there is a sane EEPROM */
1592 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1594 if ((sig == 0xffff) || (sig == 0x0000)) {
1595 if (netif_msg_probe(tp))
1596 pr_info("%s: Error EEPROM read %x\n",
1597 pci_name(pdev), sig);
1598 return -EIO;
1601 /* Get MAC address from EEPROM */
1602 for (i = 0; i < ETH_ALEN / 2; i++) {
1603 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1605 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1608 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1610 return 0;
1614 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1615 * @pdev: PCI device
1616 * @dev: network device to get address for
1618 * SiS96x model, use APC CMOS RAM to store MAC address.
1619 * APC CMOS RAM is accessed through ISA bridge.
1620 * MAC address is read into @net_dev->dev_addr.
1622 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1623 struct net_device *dev)
1625 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1626 struct sis190_private *tp = netdev_priv(dev);
1627 struct pci_dev *isa_bridge;
1628 u8 reg, tmp8;
1629 unsigned int i;
1631 if (netif_msg_probe(tp))
1632 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1634 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1635 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1636 if (isa_bridge)
1637 break;
1640 if (!isa_bridge) {
1641 if (netif_msg_probe(tp))
1642 pr_info("%s: Can not find ISA bridge\n",
1643 pci_name(pdev));
1644 return -EIO;
1647 /* Enable port 78h & 79h to access APC Registers. */
1648 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1649 reg = (tmp8 & ~0x02);
1650 pci_write_config_byte(isa_bridge, 0x48, reg);
1651 udelay(50);
1652 pci_read_config_byte(isa_bridge, 0x48, &reg);
1654 for (i = 0; i < ETH_ALEN; i++) {
1655 outb(0x9 + i, 0x78);
1656 dev->dev_addr[i] = inb(0x79);
1659 outb(0x12, 0x78);
1660 reg = inb(0x79);
1662 sis190_set_rgmii(tp, reg);
1664 /* Restore the value to ISA Bridge */
1665 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1666 pci_dev_put(isa_bridge);
1668 return 0;
1672 * sis190_init_rxfilter - Initialize the Rx filter
1673 * @dev: network device to initialize
1675 * Set receive filter address to our MAC address
1676 * and enable packet filtering.
1678 static inline void sis190_init_rxfilter(struct net_device *dev)
1680 struct sis190_private *tp = netdev_priv(dev);
1681 void __iomem *ioaddr = tp->mmio_addr;
1682 u16 ctl;
1683 int i;
1685 ctl = SIS_R16(RxMacControl);
1687 * Disable packet filtering before setting filter.
1688 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1689 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1691 SIS_W16(RxMacControl, ctl & ~0x0f00);
1693 for (i = 0; i < ETH_ALEN; i++)
1694 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1696 SIS_W16(RxMacControl, ctl);
1697 SIS_PCI_COMMIT();
1700 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1701 struct net_device *dev)
1703 int rc;
1705 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1706 if (rc < 0) {
1707 u8 reg;
1709 pci_read_config_byte(pdev, 0x73, &reg);
1711 if (reg & 0x00000001)
1712 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1714 return rc;
1717 static void sis190_set_speed_auto(struct net_device *dev)
1719 struct sis190_private *tp = netdev_priv(dev);
1720 void __iomem *ioaddr = tp->mmio_addr;
1721 int phy_id = tp->mii_if.phy_id;
1722 int val;
1724 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1726 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1728 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1729 // unchanged.
1730 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1731 ADVERTISE_100FULL | ADVERTISE_10FULL |
1732 ADVERTISE_100HALF | ADVERTISE_10HALF);
1734 // Enable 1000 Full Mode.
1735 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1737 // Enable auto-negotiation and restart auto-negotiation.
1738 mdio_write(ioaddr, phy_id, MII_BMCR,
1739 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1742 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1744 struct sis190_private *tp = netdev_priv(dev);
1746 return mii_ethtool_gset(&tp->mii_if, cmd);
1749 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1751 struct sis190_private *tp = netdev_priv(dev);
1753 return mii_ethtool_sset(&tp->mii_if, cmd);
1756 static void sis190_get_drvinfo(struct net_device *dev,
1757 struct ethtool_drvinfo *info)
1759 struct sis190_private *tp = netdev_priv(dev);
1761 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1762 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1763 strlcpy(info->bus_info, pci_name(tp->pci_dev),
1764 sizeof(info->bus_info));
1767 static int sis190_get_regs_len(struct net_device *dev)
1769 return SIS190_REGS_SIZE;
1772 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1773 void *p)
1775 struct sis190_private *tp = netdev_priv(dev);
1776 unsigned long flags;
1778 if (regs->len > SIS190_REGS_SIZE)
1779 regs->len = SIS190_REGS_SIZE;
1781 spin_lock_irqsave(&tp->lock, flags);
1782 memcpy_fromio(p, tp->mmio_addr, regs->len);
1783 spin_unlock_irqrestore(&tp->lock, flags);
1786 static int sis190_nway_reset(struct net_device *dev)
1788 struct sis190_private *tp = netdev_priv(dev);
1790 return mii_nway_restart(&tp->mii_if);
1793 static u32 sis190_get_msglevel(struct net_device *dev)
1795 struct sis190_private *tp = netdev_priv(dev);
1797 return tp->msg_enable;
1800 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1802 struct sis190_private *tp = netdev_priv(dev);
1804 tp->msg_enable = value;
1807 static const struct ethtool_ops sis190_ethtool_ops = {
1808 .get_settings = sis190_get_settings,
1809 .set_settings = sis190_set_settings,
1810 .get_drvinfo = sis190_get_drvinfo,
1811 .get_regs_len = sis190_get_regs_len,
1812 .get_regs = sis190_get_regs,
1813 .get_link = ethtool_op_get_link,
1814 .get_msglevel = sis190_get_msglevel,
1815 .set_msglevel = sis190_set_msglevel,
1816 .nway_reset = sis190_nway_reset,
1819 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1821 struct sis190_private *tp = netdev_priv(dev);
1823 return !netif_running(dev) ? -EINVAL :
1824 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1827 static int sis190_mac_addr(struct net_device *dev, void *p)
1829 int rc;
1831 rc = eth_mac_addr(dev, p);
1832 if (!rc)
1833 sis190_init_rxfilter(dev);
1834 return rc;
1837 static const struct net_device_ops sis190_netdev_ops = {
1838 .ndo_open = sis190_open,
1839 .ndo_stop = sis190_close,
1840 .ndo_do_ioctl = sis190_ioctl,
1841 .ndo_start_xmit = sis190_start_xmit,
1842 .ndo_tx_timeout = sis190_tx_timeout,
1843 .ndo_set_rx_mode = sis190_set_rx_mode,
1844 .ndo_change_mtu = eth_change_mtu,
1845 .ndo_set_mac_address = sis190_mac_addr,
1846 .ndo_validate_addr = eth_validate_addr,
1847 #ifdef CONFIG_NET_POLL_CONTROLLER
1848 .ndo_poll_controller = sis190_netpoll,
1849 #endif
1852 static int __devinit sis190_init_one(struct pci_dev *pdev,
1853 const struct pci_device_id *ent)
1855 static int printed_version = 0;
1856 struct sis190_private *tp;
1857 struct net_device *dev;
1858 void __iomem *ioaddr;
1859 int rc;
1861 if (!printed_version) {
1862 if (netif_msg_drv(&debug))
1863 pr_info(SIS190_DRIVER_NAME " loaded\n");
1864 printed_version = 1;
1867 dev = sis190_init_board(pdev);
1868 if (IS_ERR(dev)) {
1869 rc = PTR_ERR(dev);
1870 goto out;
1873 pci_set_drvdata(pdev, dev);
1875 tp = netdev_priv(dev);
1876 ioaddr = tp->mmio_addr;
1878 rc = sis190_get_mac_addr(pdev, dev);
1879 if (rc < 0)
1880 goto err_release_board;
1882 sis190_init_rxfilter(dev);
1884 INIT_WORK(&tp->phy_task, sis190_phy_task);
1886 dev->netdev_ops = &sis190_netdev_ops;
1888 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1889 dev->irq = pdev->irq;
1890 dev->base_addr = (unsigned long) 0xdead;
1891 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1893 spin_lock_init(&tp->lock);
1895 rc = sis190_mii_probe(dev);
1896 if (rc < 0)
1897 goto err_release_board;
1899 rc = register_netdev(dev);
1900 if (rc < 0)
1901 goto err_remove_mii;
1903 if (netif_msg_probe(tp)) {
1904 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1905 pci_name(pdev),
1906 sis_chip_info[ent->driver_data].name,
1907 ioaddr, dev->irq, dev->dev_addr);
1908 netdev_info(dev, "%s mode.\n",
1909 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1912 netif_carrier_off(dev);
1914 sis190_set_speed_auto(dev);
1915 out:
1916 return rc;
1918 err_remove_mii:
1919 sis190_mii_remove(dev);
1920 err_release_board:
1921 sis190_release_board(pdev);
1922 goto out;
1925 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1927 struct net_device *dev = pci_get_drvdata(pdev);
1928 struct sis190_private *tp = netdev_priv(dev);
1930 sis190_mii_remove(dev);
1931 cancel_work_sync(&tp->phy_task);
1932 unregister_netdev(dev);
1933 sis190_release_board(pdev);
1934 pci_set_drvdata(pdev, NULL);
1937 static struct pci_driver sis190_pci_driver = {
1938 .name = DRV_NAME,
1939 .id_table = sis190_pci_tbl,
1940 .probe = sis190_init_one,
1941 .remove = __devexit_p(sis190_remove_one),
1944 static int __init sis190_init_module(void)
1946 return pci_register_driver(&sis190_pci_driver);
1949 static void __exit sis190_cleanup_module(void)
1951 pci_unregister_driver(&sis190_pci_driver);
1954 module_init(sis190_init_module);
1955 module_exit(sis190_cleanup_module);