1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define DRV_NAME "via-rhine"
33 #define DRV_VERSION "1.4.3"
34 #define DRV_RELDATE "2007-03-06"
37 /* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
40 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41 static int max_interrupt_work
= 20;
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
46 defined(CONFIG_SPARC) || defined(__ia64__) || \
47 defined(__sh__) || defined(__mips__)
48 static int rx_copybreak
= 1518;
50 static int rx_copybreak
;
53 /* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64 static const int multicast_filter_limit
= 32;
67 /* Operational parameters that are set at compile time. */
69 /* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74 #define TX_RING_SIZE 16
75 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
76 #define RX_RING_SIZE 64
78 /* Operational parameters that usually are not changed. */
80 /* Time in jiffies before concluding the transmitter is hung. */
81 #define TX_TIMEOUT (2*HZ)
83 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
85 #include <linux/module.h>
86 #include <linux/moduleparam.h>
87 #include <linux/kernel.h>
88 #include <linux/string.h>
89 #include <linux/timer.h>
90 #include <linux/errno.h>
91 #include <linux/ioport.h>
92 #include <linux/slab.h>
93 #include <linux/interrupt.h>
94 #include <linux/pci.h>
95 #include <linux/dma-mapping.h>
96 #include <linux/netdevice.h>
97 #include <linux/etherdevice.h>
98 #include <linux/skbuff.h>
99 #include <linux/init.h>
100 #include <linux/delay.h>
101 #include <linux/mii.h>
102 #include <linux/ethtool.h>
103 #include <linux/crc32.h>
104 #include <linux/bitops.h>
105 #include <linux/workqueue.h>
106 #include <asm/processor.h> /* Processor type for cache alignment. */
109 #include <asm/uaccess.h>
110 #include <linux/dmi.h>
112 /* These identify the driver base version and may not be removed. */
113 static const char version
[] __devinitconst
=
114 KERN_INFO DRV_NAME
".c:v1.10-LK" DRV_VERSION
" " DRV_RELDATE
115 " Written by Donald Becker\n";
117 /* This driver was written to use PCI memory space. Some early versions
118 of the Rhine may only work correctly with I/O space accesses. */
119 #ifdef CONFIG_VIA_RHINE_MMIO
124 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
125 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
126 MODULE_LICENSE("GPL");
128 module_param(max_interrupt_work
, int, 0);
129 module_param(debug
, int, 0);
130 module_param(rx_copybreak
, int, 0);
131 module_param(avoid_D3
, bool, 0);
132 MODULE_PARM_DESC(max_interrupt_work
, "VIA Rhine maximum events handled per interrupt");
133 MODULE_PARM_DESC(debug
, "VIA Rhine debug level (0-7)");
134 MODULE_PARM_DESC(rx_copybreak
, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3
, "Avoid power state D3 (work-around for broken BIOSes)");
140 I. Board Compatibility
142 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
145 II. Board-specific settings
147 Boards with this chip are functional only in a bus-master PCI slot.
149 Many operational settings are loaded from the EEPROM to the Config word at
150 offset 0x78. For most of these settings, this driver assumes that they are
152 If this driver is compiled to use PCI memory space operations the EEPROM
153 must be configured to enable memory ops.
155 III. Driver operation
159 This driver uses two statically allocated fixed-size descriptor lists
160 formed into rings by a branch from the final descriptor to the beginning of
161 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
163 IIIb/c. Transmit/Receive Structure
165 This driver attempts to use a zero-copy receive and transmit scheme.
167 Alas, all data buffers are required to start on a 32 bit boundary, so
168 the driver must often copy transmit packets into bounce buffers.
170 The driver allocates full frame size skbuffs for the Rx ring buffers at
171 open() time and passes the skb->data field to the chip as receive data
172 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
173 a fresh skbuff is allocated and the frame is copied to the new skbuff.
174 When the incoming frame is larger, the skbuff is passed directly up the
175 protocol stack. Buffers consumed this way are replaced by newly allocated
176 skbuffs in the last phase of rhine_rx().
178 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
179 using a full-sized skbuff for small frames vs. the copying costs of larger
180 frames. New boards are typically used in generously configured machines
181 and the underfilled buffers have negligible impact compared to the benefit of
182 a single allocation size, so the default value of zero results in never
183 copying packets. When copying is done, the cost is usually mitigated by using
184 a combined copy/checksum routine. Copying also preloads the cache, which is
185 most useful with small frames.
187 Since the VIA chips are only able to transfer data to buffers on 32 bit
188 boundaries, the IP header at offset 14 in an ethernet frame isn't
189 longword aligned for further processing. Copying these unaligned buffers
190 has the beneficial effect of 16-byte aligning the IP header.
192 IIId. Synchronization
194 The driver runs as two independent, single-threaded flows of control. One
195 is the send-packet routine, which enforces single-threaded use by the
196 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
197 which is single threaded by the hardware and interrupt handling software.
199 The send packet thread has partial control over the Tx ring. It locks the
200 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
201 the ring is not available it stops the transmit queue by
202 calling netif_stop_queue.
204 The interrupt handler has exclusive control over the Rx ring and records stats
205 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
206 empty by incrementing the dirty_tx mark. If at least half of the entries in
207 the Rx ring are available the transmit queue is woken up if it was stopped.
213 Preliminary VT86C100A manual from http://www.via.com.tw/
214 http://www.scyld.com/expert/100mbps.html
215 http://www.scyld.com/expert/NWay.html
216 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
217 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
222 The VT86C100A manual is not reliable information.
223 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
224 in significant performance degradation for bounce buffer copies on transmit
225 and unaligned IP headers on receive.
226 The chip does not pad to minimum transmit length.
231 /* This table drives the PCI probe routines. It's mostly boilerplate in all
232 of the drivers, and will likely be provided by some future kernel.
233 Note the matching code -- the first table entry matchs all 56** cards but
234 second only the 1234 card.
241 VT8231
= 0x50, /* Integrated MAC */
242 VT8233
= 0x60, /* Integrated MAC */
243 VT8235
= 0x74, /* Integrated MAC */
244 VT8237
= 0x78, /* Integrated MAC */
251 VT6105M
= 0x90, /* Management adapter */
255 rqWOL
= 0x0001, /* Wake-On-LAN support */
256 rqForceReset
= 0x0002,
257 rq6patterns
= 0x0040, /* 6 instead of 4 patterns for WOL */
258 rqStatusWBRace
= 0x0080, /* Tx Status Writeback Error possible */
259 rqRhineI
= 0x0100, /* See comment below */
262 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
263 * MMIO as well as for the collision counter and the Tx FIFO underflow
264 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
267 /* Beware of PCI posted writes */
268 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
270 static const struct pci_device_id rhine_pci_tbl
[] = {
271 { 0x1106, 0x3043, PCI_ANY_ID
, PCI_ANY_ID
, }, /* VT86C100A */
272 { 0x1106, 0x3065, PCI_ANY_ID
, PCI_ANY_ID
, }, /* VT6102 */
273 { 0x1106, 0x3106, PCI_ANY_ID
, PCI_ANY_ID
, }, /* 6105{,L,LOM} */
274 { 0x1106, 0x3053, PCI_ANY_ID
, PCI_ANY_ID
, }, /* VT6105M */
275 { } /* terminate list */
277 MODULE_DEVICE_TABLE(pci
, rhine_pci_tbl
);
280 /* Offsets to the device registers. */
281 enum register_offsets
{
282 StationAddr
=0x00, RxConfig
=0x06, TxConfig
=0x07, ChipCmd
=0x08,
284 IntrStatus
=0x0C, IntrEnable
=0x0E,
285 MulticastFilter0
=0x10, MulticastFilter1
=0x14,
286 RxRingPtr
=0x18, TxRingPtr
=0x1C, GFIFOTest
=0x54,
287 MIIPhyAddr
=0x6C, MIIStatus
=0x6D, PCIBusConfig
=0x6E,
288 MIICmd
=0x70, MIIRegAddr
=0x71, MIIData
=0x72, MACRegEEcsr
=0x74,
289 ConfigA
=0x78, ConfigB
=0x79, ConfigC
=0x7A, ConfigD
=0x7B,
290 RxMissed
=0x7C, RxCRCErrs
=0x7E, MiscCmd
=0x81,
291 StickyHW
=0x83, IntrStatus2
=0x84,
292 WOLcrSet
=0xA0, PwcfgSet
=0xA1, WOLcgSet
=0xA3, WOLcrClr
=0xA4,
293 WOLcrClr1
=0xA6, WOLcgClr
=0xA7,
294 PwrcsrSet
=0xA8, PwrcsrSet1
=0xA9, PwrcsrClr
=0xAC, PwrcsrClr1
=0xAD,
297 /* Bits in ConfigD */
299 BackOptional
=0x01, BackModify
=0x02,
300 BackCaptureEffect
=0x04, BackRandom
=0x08
304 /* Registers we check that mmio and reg are the same. */
305 static const int mmio_verify_registers
[] = {
306 RxConfig
, TxConfig
, IntrEnable
, ConfigA
, ConfigB
, ConfigC
, ConfigD
,
311 /* Bits in the interrupt status/mask registers. */
312 enum intr_status_bits
{
313 IntrRxDone
=0x0001, IntrRxErr
=0x0004, IntrRxEmpty
=0x0020,
314 IntrTxDone
=0x0002, IntrTxError
=0x0008, IntrTxUnderrun
=0x0210,
316 IntrStatsMax
=0x0080, IntrRxEarly
=0x0100,
317 IntrRxOverflow
=0x0400, IntrRxDropped
=0x0800, IntrRxNoBuf
=0x1000,
318 IntrTxAborted
=0x2000, IntrLinkChange
=0x4000,
320 IntrNormalSummary
=0x0003, IntrAbnormalSummary
=0xC260,
321 IntrTxDescRace
=0x080000, /* mapped from IntrStatus2 */
322 IntrTxErrSummary
=0x082218,
325 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
334 /* The Rx and Tx buffer descriptors. */
337 __le32 desc_length
; /* Chain flag, Buffer/frame length */
343 __le32 desc_length
; /* Chain flag, Tx Config, Frame length */
348 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
349 #define TXDESC 0x00e08000
351 enum rx_status_bits
{
352 RxOK
=0x8000, RxWholePkt
=0x0300, RxErr
=0x008F
355 /* Bits in *_desc.*_status */
356 enum desc_status_bits
{
360 /* Bits in ChipCmd. */
362 CmdInit
=0x01, CmdStart
=0x02, CmdStop
=0x04, CmdRxOn
=0x08,
363 CmdTxOn
=0x10, Cmd1TxDemand
=0x20, CmdRxDemand
=0x40,
364 Cmd1EarlyRx
=0x01, Cmd1EarlyTx
=0x02, Cmd1FDuplex
=0x04,
365 Cmd1NoTxPoll
=0x08, Cmd1Reset
=0x80,
368 struct rhine_private
{
369 /* Descriptor rings */
370 struct rx_desc
*rx_ring
;
371 struct tx_desc
*tx_ring
;
372 dma_addr_t rx_ring_dma
;
373 dma_addr_t tx_ring_dma
;
375 /* The addresses of receive-in-place skbuffs. */
376 struct sk_buff
*rx_skbuff
[RX_RING_SIZE
];
377 dma_addr_t rx_skbuff_dma
[RX_RING_SIZE
];
379 /* The saved address of a sent-in-place packet/buffer, for later free(). */
380 struct sk_buff
*tx_skbuff
[TX_RING_SIZE
];
381 dma_addr_t tx_skbuff_dma
[TX_RING_SIZE
];
383 /* Tx bounce buffers (Rhine-I only) */
384 unsigned char *tx_buf
[TX_RING_SIZE
];
385 unsigned char *tx_bufs
;
386 dma_addr_t tx_bufs_dma
;
388 struct pci_dev
*pdev
;
390 struct net_device
*dev
;
391 struct napi_struct napi
;
393 struct work_struct reset_task
;
395 /* Frequently used values: keep some adjacent for cache effect. */
397 struct rx_desc
*rx_head_desc
;
398 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
399 unsigned int cur_tx
, dirty_tx
;
400 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
403 u8 tx_thresh
, rx_thresh
;
405 struct mii_if_info mii_if
;
409 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
410 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
411 static int rhine_open(struct net_device
*dev
);
412 static void rhine_reset_task(struct work_struct
*work
);
413 static void rhine_tx_timeout(struct net_device
*dev
);
414 static netdev_tx_t
rhine_start_tx(struct sk_buff
*skb
,
415 struct net_device
*dev
);
416 static irqreturn_t
rhine_interrupt(int irq
, void *dev_instance
);
417 static void rhine_tx(struct net_device
*dev
);
418 static int rhine_rx(struct net_device
*dev
, int limit
);
419 static void rhine_error(struct net_device
*dev
, int intr_status
);
420 static void rhine_set_rx_mode(struct net_device
*dev
);
421 static struct net_device_stats
*rhine_get_stats(struct net_device
*dev
);
422 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
423 static const struct ethtool_ops netdev_ethtool_ops
;
424 static int rhine_close(struct net_device
*dev
);
425 static void rhine_shutdown (struct pci_dev
*pdev
);
427 #define RHINE_WAIT_FOR(condition) do { \
429 while (!(condition) && --i) \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
436 static inline u32
get_intr_status(struct net_device
*dev
)
438 struct rhine_private
*rp
= netdev_priv(dev
);
439 void __iomem
*ioaddr
= rp
->base
;
442 intr_status
= ioread16(ioaddr
+ IntrStatus
);
443 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
444 if (rp
->quirks
& rqStatusWBRace
)
445 intr_status
|= ioread8(ioaddr
+ IntrStatus2
) << 16;
450 * Get power related registers into sane state.
451 * Notify user about past WOL event.
453 static void rhine_power_init(struct net_device
*dev
)
455 struct rhine_private
*rp
= netdev_priv(dev
);
456 void __iomem
*ioaddr
= rp
->base
;
459 if (rp
->quirks
& rqWOL
) {
460 /* Make sure chip is in power state D0 */
461 iowrite8(ioread8(ioaddr
+ StickyHW
) & 0xFC, ioaddr
+ StickyHW
);
463 /* Disable "force PME-enable" */
464 iowrite8(0x80, ioaddr
+ WOLcgClr
);
466 /* Clear power-event config bits (WOL) */
467 iowrite8(0xFF, ioaddr
+ WOLcrClr
);
468 /* More recent cards can manage two additional patterns */
469 if (rp
->quirks
& rq6patterns
)
470 iowrite8(0x03, ioaddr
+ WOLcrClr1
);
472 /* Save power-event status bits */
473 wolstat
= ioread8(ioaddr
+ PwrcsrSet
);
474 if (rp
->quirks
& rq6patterns
)
475 wolstat
|= (ioread8(ioaddr
+ PwrcsrSet1
) & 0x03) << 8;
477 /* Clear power-event status bits */
478 iowrite8(0xFF, ioaddr
+ PwrcsrClr
);
479 if (rp
->quirks
& rq6patterns
)
480 iowrite8(0x03, ioaddr
+ PwrcsrClr1
);
486 reason
= "Magic packet";
489 reason
= "Link went up";
492 reason
= "Link went down";
495 reason
= "Unicast packet";
498 reason
= "Multicast/broadcast packet";
503 printk(KERN_INFO
"%s: Woke system up. Reason: %s.\n",
509 static void rhine_chip_reset(struct net_device
*dev
)
511 struct rhine_private
*rp
= netdev_priv(dev
);
512 void __iomem
*ioaddr
= rp
->base
;
514 iowrite8(Cmd1Reset
, ioaddr
+ ChipCmd1
);
517 if (ioread8(ioaddr
+ ChipCmd1
) & Cmd1Reset
) {
518 printk(KERN_INFO
"%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME
);
522 if (rp
->quirks
& rqForceReset
)
523 iowrite8(0x40, ioaddr
+ MiscCmd
);
525 /* Reset can take somewhat longer (rare) */
526 RHINE_WAIT_FOR(!(ioread8(ioaddr
+ ChipCmd1
) & Cmd1Reset
));
530 printk(KERN_INFO
"%s: Reset %s.\n", dev
->name
,
531 (ioread8(ioaddr
+ ChipCmd1
) & Cmd1Reset
) ?
532 "failed" : "succeeded");
536 static void enable_mmio(long pioaddr
, u32 quirks
)
539 if (quirks
& rqRhineI
) {
540 /* More recent docs say that this bit is reserved ... */
541 n
= inb(pioaddr
+ ConfigA
) | 0x20;
542 outb(n
, pioaddr
+ ConfigA
);
544 n
= inb(pioaddr
+ ConfigD
) | 0x80;
545 outb(n
, pioaddr
+ ConfigD
);
551 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
552 * (plus 0x6C for Rhine-I/II)
554 static void __devinit
rhine_reload_eeprom(long pioaddr
, struct net_device
*dev
)
556 struct rhine_private
*rp
= netdev_priv(dev
);
557 void __iomem
*ioaddr
= rp
->base
;
559 outb(0x20, pioaddr
+ MACRegEEcsr
);
560 RHINE_WAIT_FOR(!(inb(pioaddr
+ MACRegEEcsr
) & 0x20));
564 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
565 * MMIO. If reloading EEPROM was done first this could be avoided, but
566 * it is not known if that still works with the "win98-reboot" problem.
568 enable_mmio(pioaddr
, rp
->quirks
);
571 /* Turn off EEPROM-controlled wake-up (magic packet) */
572 if (rp
->quirks
& rqWOL
)
573 iowrite8(ioread8(ioaddr
+ ConfigA
) & 0xFC, ioaddr
+ ConfigA
);
577 #ifdef CONFIG_NET_POLL_CONTROLLER
578 static void rhine_poll(struct net_device
*dev
)
580 disable_irq(dev
->irq
);
581 rhine_interrupt(dev
->irq
, (void *)dev
);
582 enable_irq(dev
->irq
);
586 static int rhine_napipoll(struct napi_struct
*napi
, int budget
)
588 struct rhine_private
*rp
= container_of(napi
, struct rhine_private
, napi
);
589 struct net_device
*dev
= rp
->dev
;
590 void __iomem
*ioaddr
= rp
->base
;
593 work_done
= rhine_rx(dev
, budget
);
595 if (work_done
< budget
) {
598 iowrite16(IntrRxDone
| IntrRxErr
| IntrRxEmpty
| IntrRxOverflow
|
599 IntrRxDropped
| IntrRxNoBuf
| IntrTxAborted
|
600 IntrTxDone
| IntrTxError
| IntrTxUnderrun
|
601 IntrPCIErr
| IntrStatsMax
| IntrLinkChange
,
602 ioaddr
+ IntrEnable
);
607 static void __devinit
rhine_hw_init(struct net_device
*dev
, long pioaddr
)
609 struct rhine_private
*rp
= netdev_priv(dev
);
611 /* Reset the chip to erase previous misconfiguration. */
612 rhine_chip_reset(dev
);
614 /* Rhine-I needs extra time to recuperate before EEPROM reload */
615 if (rp
->quirks
& rqRhineI
)
618 /* Reload EEPROM controlled bytes cleared by soft reset */
619 rhine_reload_eeprom(pioaddr
, dev
);
622 static const struct net_device_ops rhine_netdev_ops
= {
623 .ndo_open
= rhine_open
,
624 .ndo_stop
= rhine_close
,
625 .ndo_start_xmit
= rhine_start_tx
,
626 .ndo_get_stats
= rhine_get_stats
,
627 .ndo_set_multicast_list
= rhine_set_rx_mode
,
628 .ndo_change_mtu
= eth_change_mtu
,
629 .ndo_validate_addr
= eth_validate_addr
,
630 .ndo_set_mac_address
= eth_mac_addr
,
631 .ndo_do_ioctl
= netdev_ioctl
,
632 .ndo_tx_timeout
= rhine_tx_timeout
,
633 #ifdef CONFIG_NET_POLL_CONTROLLER
634 .ndo_poll_controller
= rhine_poll
,
638 static int __devinit
rhine_init_one(struct pci_dev
*pdev
,
639 const struct pci_device_id
*ent
)
641 struct net_device
*dev
;
642 struct rhine_private
*rp
;
647 void __iomem
*ioaddr
;
656 /* when built into the kernel, we only print version if device is found */
658 static int printed_version
;
659 if (!printed_version
++)
667 if (pdev
->revision
< VTunknown0
) {
671 else if (pdev
->revision
>= VT6102
) {
672 quirks
= rqWOL
| rqForceReset
;
673 if (pdev
->revision
< VT6105
) {
675 quirks
|= rqStatusWBRace
; /* Rhine-II exclusive */
678 phy_id
= 1; /* Integrated PHY, phy_id fixed to 1 */
679 if (pdev
->revision
>= VT6105_B0
)
680 quirks
|= rq6patterns
;
681 if (pdev
->revision
< VT6105M
)
684 name
= "Rhine III (Management Adapter)";
688 rc
= pci_enable_device(pdev
);
692 /* this should always be supported */
693 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
695 printk(KERN_ERR
"32-bit PCI DMA addresses not supported by "
701 if ((pci_resource_len(pdev
, 0) < io_size
) ||
702 (pci_resource_len(pdev
, 1) < io_size
)) {
704 printk(KERN_ERR
"Insufficient PCI resources, aborting\n");
708 pioaddr
= pci_resource_start(pdev
, 0);
709 memaddr
= pci_resource_start(pdev
, 1);
711 pci_set_master(pdev
);
713 dev
= alloc_etherdev(sizeof(struct rhine_private
));
716 printk(KERN_ERR
"alloc_etherdev failed\n");
719 SET_NETDEV_DEV(dev
, &pdev
->dev
);
721 rp
= netdev_priv(dev
);
724 rp
->pioaddr
= pioaddr
;
727 rc
= pci_request_regions(pdev
, DRV_NAME
);
729 goto err_out_free_netdev
;
731 ioaddr
= pci_iomap(pdev
, bar
, io_size
);
734 printk(KERN_ERR
"ioremap failed for device %s, region 0x%X "
735 "@ 0x%lX\n", pci_name(pdev
), io_size
, memaddr
);
736 goto err_out_free_res
;
740 enable_mmio(pioaddr
, quirks
);
742 /* Check that selected MMIO registers match the PIO ones */
744 while (mmio_verify_registers
[i
]) {
745 int reg
= mmio_verify_registers
[i
++];
746 unsigned char a
= inb(pioaddr
+reg
);
747 unsigned char b
= readb(ioaddr
+reg
);
750 printk(KERN_ERR
"MMIO do not match PIO [%02x] "
751 "(%02x != %02x)\n", reg
, a
, b
);
755 #endif /* USE_MMIO */
757 dev
->base_addr
= (unsigned long)ioaddr
;
760 /* Get chip registers into a sane state */
761 rhine_power_init(dev
);
762 rhine_hw_init(dev
, pioaddr
);
764 for (i
= 0; i
< 6; i
++)
765 dev
->dev_addr
[i
] = ioread8(ioaddr
+ StationAddr
+ i
);
766 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
768 if (!is_valid_ether_addr(dev
->perm_addr
)) {
770 printk(KERN_ERR
"Invalid MAC address\n");
774 /* For Rhine-I/II, phy_id is loaded from EEPROM */
776 phy_id
= ioread8(ioaddr
+ 0x6C);
778 dev
->irq
= pdev
->irq
;
780 spin_lock_init(&rp
->lock
);
781 INIT_WORK(&rp
->reset_task
, rhine_reset_task
);
783 rp
->mii_if
.dev
= dev
;
784 rp
->mii_if
.mdio_read
= mdio_read
;
785 rp
->mii_if
.mdio_write
= mdio_write
;
786 rp
->mii_if
.phy_id_mask
= 0x1f;
787 rp
->mii_if
.reg_num_mask
= 0x1f;
789 /* The chip-specific entries in the device structure. */
790 dev
->netdev_ops
= &rhine_netdev_ops
;
791 dev
->ethtool_ops
= &netdev_ethtool_ops
,
792 dev
->watchdog_timeo
= TX_TIMEOUT
;
794 netif_napi_add(dev
, &rp
->napi
, rhine_napipoll
, 64);
796 if (rp
->quirks
& rqRhineI
)
797 dev
->features
|= NETIF_F_SG
|NETIF_F_HW_CSUM
;
799 /* dev->name not defined before register_netdev()! */
800 rc
= register_netdev(dev
);
804 printk(KERN_INFO
"%s: VIA %s at 0x%lx, %pM, IRQ %d.\n",
811 dev
->dev_addr
, pdev
->irq
);
813 pci_set_drvdata(pdev
, dev
);
817 int mii_status
= mdio_read(dev
, phy_id
, 1);
818 mii_cmd
= mdio_read(dev
, phy_id
, MII_BMCR
) & ~BMCR_ISOLATE
;
819 mdio_write(dev
, phy_id
, MII_BMCR
, mii_cmd
);
820 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
821 rp
->mii_if
.advertising
= mdio_read(dev
, phy_id
, 4);
822 printk(KERN_INFO
"%s: MII PHY found at address "
823 "%d, status 0x%4.4x advertising %4.4x "
824 "Link %4.4x.\n", dev
->name
, phy_id
,
825 mii_status
, rp
->mii_if
.advertising
,
826 mdio_read(dev
, phy_id
, 5));
828 /* set IFF_RUNNING */
829 if (mii_status
& BMSR_LSTATUS
)
830 netif_carrier_on(dev
);
832 netif_carrier_off(dev
);
836 rp
->mii_if
.phy_id
= phy_id
;
837 if (debug
> 1 && avoid_D3
)
838 printk(KERN_INFO
"%s: No D3 power state at shutdown.\n",
844 pci_iounmap(pdev
, ioaddr
);
846 pci_release_regions(pdev
);
853 static int alloc_ring(struct net_device
* dev
)
855 struct rhine_private
*rp
= netdev_priv(dev
);
859 ring
= pci_alloc_consistent(rp
->pdev
,
860 RX_RING_SIZE
* sizeof(struct rx_desc
) +
861 TX_RING_SIZE
* sizeof(struct tx_desc
),
864 printk(KERN_ERR
"Could not allocate DMA memory.\n");
867 if (rp
->quirks
& rqRhineI
) {
868 rp
->tx_bufs
= pci_alloc_consistent(rp
->pdev
,
869 PKT_BUF_SZ
* TX_RING_SIZE
,
871 if (rp
->tx_bufs
== NULL
) {
872 pci_free_consistent(rp
->pdev
,
873 RX_RING_SIZE
* sizeof(struct rx_desc
) +
874 TX_RING_SIZE
* sizeof(struct tx_desc
),
881 rp
->tx_ring
= ring
+ RX_RING_SIZE
* sizeof(struct rx_desc
);
882 rp
->rx_ring_dma
= ring_dma
;
883 rp
->tx_ring_dma
= ring_dma
+ RX_RING_SIZE
* sizeof(struct rx_desc
);
888 static void free_ring(struct net_device
* dev
)
890 struct rhine_private
*rp
= netdev_priv(dev
);
892 pci_free_consistent(rp
->pdev
,
893 RX_RING_SIZE
* sizeof(struct rx_desc
) +
894 TX_RING_SIZE
* sizeof(struct tx_desc
),
895 rp
->rx_ring
, rp
->rx_ring_dma
);
899 pci_free_consistent(rp
->pdev
, PKT_BUF_SZ
* TX_RING_SIZE
,
900 rp
->tx_bufs
, rp
->tx_bufs_dma
);
906 static void alloc_rbufs(struct net_device
*dev
)
908 struct rhine_private
*rp
= netdev_priv(dev
);
912 rp
->dirty_rx
= rp
->cur_rx
= 0;
914 rp
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
915 rp
->rx_head_desc
= &rp
->rx_ring
[0];
916 next
= rp
->rx_ring_dma
;
918 /* Init the ring entries */
919 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
920 rp
->rx_ring
[i
].rx_status
= 0;
921 rp
->rx_ring
[i
].desc_length
= cpu_to_le32(rp
->rx_buf_sz
);
922 next
+= sizeof(struct rx_desc
);
923 rp
->rx_ring
[i
].next_desc
= cpu_to_le32(next
);
924 rp
->rx_skbuff
[i
] = NULL
;
926 /* Mark the last entry as wrapping the ring. */
927 rp
->rx_ring
[i
-1].next_desc
= cpu_to_le32(rp
->rx_ring_dma
);
929 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
930 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
931 struct sk_buff
*skb
= netdev_alloc_skb(dev
, rp
->rx_buf_sz
);
932 rp
->rx_skbuff
[i
] = skb
;
935 skb
->dev
= dev
; /* Mark as being used by this device. */
937 rp
->rx_skbuff_dma
[i
] =
938 pci_map_single(rp
->pdev
, skb
->data
, rp
->rx_buf_sz
,
941 rp
->rx_ring
[i
].addr
= cpu_to_le32(rp
->rx_skbuff_dma
[i
]);
942 rp
->rx_ring
[i
].rx_status
= cpu_to_le32(DescOwn
);
944 rp
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
947 static void free_rbufs(struct net_device
* dev
)
949 struct rhine_private
*rp
= netdev_priv(dev
);
952 /* Free all the skbuffs in the Rx queue. */
953 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
954 rp
->rx_ring
[i
].rx_status
= 0;
955 rp
->rx_ring
[i
].addr
= cpu_to_le32(0xBADF00D0); /* An invalid address. */
956 if (rp
->rx_skbuff
[i
]) {
957 pci_unmap_single(rp
->pdev
,
958 rp
->rx_skbuff_dma
[i
],
959 rp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
960 dev_kfree_skb(rp
->rx_skbuff
[i
]);
962 rp
->rx_skbuff
[i
] = NULL
;
966 static void alloc_tbufs(struct net_device
* dev
)
968 struct rhine_private
*rp
= netdev_priv(dev
);
972 rp
->dirty_tx
= rp
->cur_tx
= 0;
973 next
= rp
->tx_ring_dma
;
974 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
975 rp
->tx_skbuff
[i
] = NULL
;
976 rp
->tx_ring
[i
].tx_status
= 0;
977 rp
->tx_ring
[i
].desc_length
= cpu_to_le32(TXDESC
);
978 next
+= sizeof(struct tx_desc
);
979 rp
->tx_ring
[i
].next_desc
= cpu_to_le32(next
);
980 if (rp
->quirks
& rqRhineI
)
981 rp
->tx_buf
[i
] = &rp
->tx_bufs
[i
* PKT_BUF_SZ
];
983 rp
->tx_ring
[i
-1].next_desc
= cpu_to_le32(rp
->tx_ring_dma
);
987 static void free_tbufs(struct net_device
* dev
)
989 struct rhine_private
*rp
= netdev_priv(dev
);
992 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
993 rp
->tx_ring
[i
].tx_status
= 0;
994 rp
->tx_ring
[i
].desc_length
= cpu_to_le32(TXDESC
);
995 rp
->tx_ring
[i
].addr
= cpu_to_le32(0xBADF00D0); /* An invalid address. */
996 if (rp
->tx_skbuff
[i
]) {
997 if (rp
->tx_skbuff_dma
[i
]) {
998 pci_unmap_single(rp
->pdev
,
999 rp
->tx_skbuff_dma
[i
],
1000 rp
->tx_skbuff
[i
]->len
,
1003 dev_kfree_skb(rp
->tx_skbuff
[i
]);
1005 rp
->tx_skbuff
[i
] = NULL
;
1006 rp
->tx_buf
[i
] = NULL
;
1010 static void rhine_check_media(struct net_device
*dev
, unsigned int init_media
)
1012 struct rhine_private
*rp
= netdev_priv(dev
);
1013 void __iomem
*ioaddr
= rp
->base
;
1015 mii_check_media(&rp
->mii_if
, debug
, init_media
);
1017 if (rp
->mii_if
.full_duplex
)
1018 iowrite8(ioread8(ioaddr
+ ChipCmd1
) | Cmd1FDuplex
,
1021 iowrite8(ioread8(ioaddr
+ ChipCmd1
) & ~Cmd1FDuplex
,
1024 printk(KERN_INFO
"%s: force_media %d, carrier %d\n", dev
->name
,
1025 rp
->mii_if
.force_media
, netif_carrier_ok(dev
));
1028 /* Called after status of force_media possibly changed */
1029 static void rhine_set_carrier(struct mii_if_info
*mii
)
1031 if (mii
->force_media
) {
1032 /* autoneg is off: Link is always assumed to be up */
1033 if (!netif_carrier_ok(mii
->dev
))
1034 netif_carrier_on(mii
->dev
);
1036 else /* Let MMI library update carrier status */
1037 rhine_check_media(mii
->dev
, 0);
1039 printk(KERN_INFO
"%s: force_media %d, carrier %d\n",
1040 mii
->dev
->name
, mii
->force_media
,
1041 netif_carrier_ok(mii
->dev
));
1044 static void init_registers(struct net_device
*dev
)
1046 struct rhine_private
*rp
= netdev_priv(dev
);
1047 void __iomem
*ioaddr
= rp
->base
;
1050 for (i
= 0; i
< 6; i
++)
1051 iowrite8(dev
->dev_addr
[i
], ioaddr
+ StationAddr
+ i
);
1053 /* Initialize other registers. */
1054 iowrite16(0x0006, ioaddr
+ PCIBusConfig
); /* Tune configuration??? */
1055 /* Configure initial FIFO thresholds. */
1056 iowrite8(0x20, ioaddr
+ TxConfig
);
1057 rp
->tx_thresh
= 0x20;
1058 rp
->rx_thresh
= 0x60; /* Written in rhine_set_rx_mode(). */
1060 iowrite32(rp
->rx_ring_dma
, ioaddr
+ RxRingPtr
);
1061 iowrite32(rp
->tx_ring_dma
, ioaddr
+ TxRingPtr
);
1063 rhine_set_rx_mode(dev
);
1065 napi_enable(&rp
->napi
);
1067 /* Enable interrupts by setting the interrupt mask. */
1068 iowrite16(IntrRxDone
| IntrRxErr
| IntrRxEmpty
| IntrRxOverflow
|
1069 IntrRxDropped
| IntrRxNoBuf
| IntrTxAborted
|
1070 IntrTxDone
| IntrTxError
| IntrTxUnderrun
|
1071 IntrPCIErr
| IntrStatsMax
| IntrLinkChange
,
1072 ioaddr
+ IntrEnable
);
1074 iowrite16(CmdStart
| CmdTxOn
| CmdRxOn
| (Cmd1NoTxPoll
<< 8),
1076 rhine_check_media(dev
, 1);
1079 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1080 static void rhine_enable_linkmon(void __iomem
*ioaddr
)
1082 iowrite8(0, ioaddr
+ MIICmd
);
1083 iowrite8(MII_BMSR
, ioaddr
+ MIIRegAddr
);
1084 iowrite8(0x80, ioaddr
+ MIICmd
);
1086 RHINE_WAIT_FOR((ioread8(ioaddr
+ MIIRegAddr
) & 0x20));
1088 iowrite8(MII_BMSR
| 0x40, ioaddr
+ MIIRegAddr
);
1091 /* Disable MII link status auto-polling (required for MDIO access) */
1092 static void rhine_disable_linkmon(void __iomem
*ioaddr
, u32 quirks
)
1094 iowrite8(0, ioaddr
+ MIICmd
);
1096 if (quirks
& rqRhineI
) {
1097 iowrite8(0x01, ioaddr
+ MIIRegAddr
); // MII_BMSR
1099 /* Can be called from ISR. Evil. */
1102 /* 0x80 must be set immediately before turning it off */
1103 iowrite8(0x80, ioaddr
+ MIICmd
);
1105 RHINE_WAIT_FOR(ioread8(ioaddr
+ MIIRegAddr
) & 0x20);
1107 /* Heh. Now clear 0x80 again. */
1108 iowrite8(0, ioaddr
+ MIICmd
);
1111 RHINE_WAIT_FOR(ioread8(ioaddr
+ MIIRegAddr
) & 0x80);
1114 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1116 static int mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
1118 struct rhine_private
*rp
= netdev_priv(dev
);
1119 void __iomem
*ioaddr
= rp
->base
;
1122 rhine_disable_linkmon(ioaddr
, rp
->quirks
);
1124 /* rhine_disable_linkmon already cleared MIICmd */
1125 iowrite8(phy_id
, ioaddr
+ MIIPhyAddr
);
1126 iowrite8(regnum
, ioaddr
+ MIIRegAddr
);
1127 iowrite8(0x40, ioaddr
+ MIICmd
); /* Trigger read */
1128 RHINE_WAIT_FOR(!(ioread8(ioaddr
+ MIICmd
) & 0x40));
1129 result
= ioread16(ioaddr
+ MIIData
);
1131 rhine_enable_linkmon(ioaddr
);
1135 static void mdio_write(struct net_device
*dev
, int phy_id
, int regnum
, int value
)
1137 struct rhine_private
*rp
= netdev_priv(dev
);
1138 void __iomem
*ioaddr
= rp
->base
;
1140 rhine_disable_linkmon(ioaddr
, rp
->quirks
);
1142 /* rhine_disable_linkmon already cleared MIICmd */
1143 iowrite8(phy_id
, ioaddr
+ MIIPhyAddr
);
1144 iowrite8(regnum
, ioaddr
+ MIIRegAddr
);
1145 iowrite16(value
, ioaddr
+ MIIData
);
1146 iowrite8(0x20, ioaddr
+ MIICmd
); /* Trigger write */
1147 RHINE_WAIT_FOR(!(ioread8(ioaddr
+ MIICmd
) & 0x20));
1149 rhine_enable_linkmon(ioaddr
);
1152 static int rhine_open(struct net_device
*dev
)
1154 struct rhine_private
*rp
= netdev_priv(dev
);
1155 void __iomem
*ioaddr
= rp
->base
;
1158 rc
= request_irq(rp
->pdev
->irq
, rhine_interrupt
, IRQF_SHARED
, dev
->name
,
1164 printk(KERN_DEBUG
"%s: rhine_open() irq %d.\n",
1165 dev
->name
, rp
->pdev
->irq
);
1167 rc
= alloc_ring(dev
);
1169 free_irq(rp
->pdev
->irq
, dev
);
1174 rhine_chip_reset(dev
);
1175 init_registers(dev
);
1177 printk(KERN_DEBUG
"%s: Done rhine_open(), status %4.4x "
1178 "MII status: %4.4x.\n",
1179 dev
->name
, ioread16(ioaddr
+ ChipCmd
),
1180 mdio_read(dev
, rp
->mii_if
.phy_id
, MII_BMSR
));
1182 netif_start_queue(dev
);
1187 static void rhine_reset_task(struct work_struct
*work
)
1189 struct rhine_private
*rp
= container_of(work
, struct rhine_private
,
1191 struct net_device
*dev
= rp
->dev
;
1193 /* protect against concurrent rx interrupts */
1194 disable_irq(rp
->pdev
->irq
);
1196 napi_disable(&rp
->napi
);
1198 spin_lock_bh(&rp
->lock
);
1200 /* clear all descriptors */
1206 /* Reinitialize the hardware. */
1207 rhine_chip_reset(dev
);
1208 init_registers(dev
);
1210 spin_unlock_bh(&rp
->lock
);
1211 enable_irq(rp
->pdev
->irq
);
1213 dev
->trans_start
= jiffies
;
1214 dev
->stats
.tx_errors
++;
1215 netif_wake_queue(dev
);
1218 static void rhine_tx_timeout(struct net_device
*dev
)
1220 struct rhine_private
*rp
= netdev_priv(dev
);
1221 void __iomem
*ioaddr
= rp
->base
;
1223 printk(KERN_WARNING
"%s: Transmit timed out, status %4.4x, PHY status "
1224 "%4.4x, resetting...\n",
1225 dev
->name
, ioread16(ioaddr
+ IntrStatus
),
1226 mdio_read(dev
, rp
->mii_if
.phy_id
, MII_BMSR
));
1228 schedule_work(&rp
->reset_task
);
1231 static netdev_tx_t
rhine_start_tx(struct sk_buff
*skb
,
1232 struct net_device
*dev
)
1234 struct rhine_private
*rp
= netdev_priv(dev
);
1235 void __iomem
*ioaddr
= rp
->base
;
1237 unsigned long flags
;
1239 /* Caution: the write order is important here, set the field
1240 with the "ownership" bits last. */
1242 /* Calculate the next Tx descriptor entry. */
1243 entry
= rp
->cur_tx
% TX_RING_SIZE
;
1245 if (skb_padto(skb
, ETH_ZLEN
))
1246 return NETDEV_TX_OK
;
1248 rp
->tx_skbuff
[entry
] = skb
;
1250 if ((rp
->quirks
& rqRhineI
) &&
1251 (((unsigned long)skb
->data
& 3) || skb_shinfo(skb
)->nr_frags
!= 0 || skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1252 /* Must use alignment buffer. */
1253 if (skb
->len
> PKT_BUF_SZ
) {
1254 /* packet too long, drop it */
1256 rp
->tx_skbuff
[entry
] = NULL
;
1257 dev
->stats
.tx_dropped
++;
1258 return NETDEV_TX_OK
;
1261 /* Padding is not copied and so must be redone. */
1262 skb_copy_and_csum_dev(skb
, rp
->tx_buf
[entry
]);
1263 if (skb
->len
< ETH_ZLEN
)
1264 memset(rp
->tx_buf
[entry
] + skb
->len
, 0,
1265 ETH_ZLEN
- skb
->len
);
1266 rp
->tx_skbuff_dma
[entry
] = 0;
1267 rp
->tx_ring
[entry
].addr
= cpu_to_le32(rp
->tx_bufs_dma
+
1268 (rp
->tx_buf
[entry
] -
1271 rp
->tx_skbuff_dma
[entry
] =
1272 pci_map_single(rp
->pdev
, skb
->data
, skb
->len
,
1274 rp
->tx_ring
[entry
].addr
= cpu_to_le32(rp
->tx_skbuff_dma
[entry
]);
1277 rp
->tx_ring
[entry
].desc_length
=
1278 cpu_to_le32(TXDESC
| (skb
->len
>= ETH_ZLEN
? skb
->len
: ETH_ZLEN
));
1281 spin_lock_irqsave(&rp
->lock
, flags
);
1283 rp
->tx_ring
[entry
].tx_status
= cpu_to_le32(DescOwn
);
1288 /* Non-x86 Todo: explicitly flush cache lines here. */
1290 /* Wake the potentially-idle transmit channel */
1291 iowrite8(ioread8(ioaddr
+ ChipCmd1
) | Cmd1TxDemand
,
1295 if (rp
->cur_tx
== rp
->dirty_tx
+ TX_QUEUE_LEN
)
1296 netif_stop_queue(dev
);
1298 dev
->trans_start
= jiffies
;
1300 spin_unlock_irqrestore(&rp
->lock
, flags
);
1303 printk(KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
1304 dev
->name
, rp
->cur_tx
-1, entry
);
1306 return NETDEV_TX_OK
;
1309 /* The interrupt handler does all of the Rx thread work and cleans up
1310 after the Tx thread. */
1311 static irqreturn_t
rhine_interrupt(int irq
, void *dev_instance
)
1313 struct net_device
*dev
= dev_instance
;
1314 struct rhine_private
*rp
= netdev_priv(dev
);
1315 void __iomem
*ioaddr
= rp
->base
;
1317 int boguscnt
= max_interrupt_work
;
1320 while ((intr_status
= get_intr_status(dev
))) {
1323 /* Acknowledge all of the current interrupt sources ASAP. */
1324 if (intr_status
& IntrTxDescRace
)
1325 iowrite8(0x08, ioaddr
+ IntrStatus2
);
1326 iowrite16(intr_status
& 0xffff, ioaddr
+ IntrStatus
);
1330 printk(KERN_DEBUG
"%s: Interrupt, status %8.8x.\n",
1331 dev
->name
, intr_status
);
1333 if (intr_status
& (IntrRxDone
| IntrRxErr
| IntrRxDropped
|
1334 IntrRxWakeUp
| IntrRxEmpty
| IntrRxNoBuf
)) {
1335 iowrite16(IntrTxAborted
|
1336 IntrTxDone
| IntrTxError
| IntrTxUnderrun
|
1337 IntrPCIErr
| IntrStatsMax
| IntrLinkChange
,
1338 ioaddr
+ IntrEnable
);
1340 napi_schedule(&rp
->napi
);
1343 if (intr_status
& (IntrTxErrSummary
| IntrTxDone
)) {
1344 if (intr_status
& IntrTxErrSummary
) {
1345 /* Avoid scavenging before Tx engine turned off */
1346 RHINE_WAIT_FOR(!(ioread8(ioaddr
+ChipCmd
) & CmdTxOn
));
1348 ioread8(ioaddr
+ChipCmd
) & CmdTxOn
)
1349 printk(KERN_WARNING
"%s: "
1350 "rhine_interrupt() Tx engine "
1351 "still on.\n", dev
->name
);
1356 /* Abnormal error summary/uncommon events handlers. */
1357 if (intr_status
& (IntrPCIErr
| IntrLinkChange
|
1358 IntrStatsMax
| IntrTxError
| IntrTxAborted
|
1359 IntrTxUnderrun
| IntrTxDescRace
))
1360 rhine_error(dev
, intr_status
);
1362 if (--boguscnt
< 0) {
1363 printk(KERN_WARNING
"%s: Too much work at interrupt, "
1365 dev
->name
, intr_status
);
1371 printk(KERN_DEBUG
"%s: exiting interrupt, status=%8.8x.\n",
1372 dev
->name
, ioread16(ioaddr
+ IntrStatus
));
1373 return IRQ_RETVAL(handled
);
1376 /* This routine is logically part of the interrupt handler, but isolated
1378 static void rhine_tx(struct net_device
*dev
)
1380 struct rhine_private
*rp
= netdev_priv(dev
);
1381 int txstatus
= 0, entry
= rp
->dirty_tx
% TX_RING_SIZE
;
1383 spin_lock(&rp
->lock
);
1385 /* find and cleanup dirty tx descriptors */
1386 while (rp
->dirty_tx
!= rp
->cur_tx
) {
1387 txstatus
= le32_to_cpu(rp
->tx_ring
[entry
].tx_status
);
1389 printk(KERN_DEBUG
"Tx scavenge %d status %8.8x.\n",
1391 if (txstatus
& DescOwn
)
1393 if (txstatus
& 0x8000) {
1395 printk(KERN_DEBUG
"%s: Transmit error, "
1396 "Tx status %8.8x.\n",
1397 dev
->name
, txstatus
);
1398 dev
->stats
.tx_errors
++;
1399 if (txstatus
& 0x0400)
1400 dev
->stats
.tx_carrier_errors
++;
1401 if (txstatus
& 0x0200)
1402 dev
->stats
.tx_window_errors
++;
1403 if (txstatus
& 0x0100)
1404 dev
->stats
.tx_aborted_errors
++;
1405 if (txstatus
& 0x0080)
1406 dev
->stats
.tx_heartbeat_errors
++;
1407 if (((rp
->quirks
& rqRhineI
) && txstatus
& 0x0002) ||
1408 (txstatus
& 0x0800) || (txstatus
& 0x1000)) {
1409 dev
->stats
.tx_fifo_errors
++;
1410 rp
->tx_ring
[entry
].tx_status
= cpu_to_le32(DescOwn
);
1411 break; /* Keep the skb - we try again */
1413 /* Transmitter restarted in 'abnormal' handler. */
1415 if (rp
->quirks
& rqRhineI
)
1416 dev
->stats
.collisions
+= (txstatus
>> 3) & 0x0F;
1418 dev
->stats
.collisions
+= txstatus
& 0x0F;
1420 printk(KERN_DEBUG
"collisions: %1.1x:%1.1x\n",
1421 (txstatus
>> 3) & 0xF,
1423 dev
->stats
.tx_bytes
+= rp
->tx_skbuff
[entry
]->len
;
1424 dev
->stats
.tx_packets
++;
1426 /* Free the original skb. */
1427 if (rp
->tx_skbuff_dma
[entry
]) {
1428 pci_unmap_single(rp
->pdev
,
1429 rp
->tx_skbuff_dma
[entry
],
1430 rp
->tx_skbuff
[entry
]->len
,
1433 dev_kfree_skb_irq(rp
->tx_skbuff
[entry
]);
1434 rp
->tx_skbuff
[entry
] = NULL
;
1435 entry
= (++rp
->dirty_tx
) % TX_RING_SIZE
;
1437 if ((rp
->cur_tx
- rp
->dirty_tx
) < TX_QUEUE_LEN
- 4)
1438 netif_wake_queue(dev
);
1440 spin_unlock(&rp
->lock
);
1443 /* Process up to limit frames from receive ring */
1444 static int rhine_rx(struct net_device
*dev
, int limit
)
1446 struct rhine_private
*rp
= netdev_priv(dev
);
1448 int entry
= rp
->cur_rx
% RX_RING_SIZE
;
1451 printk(KERN_DEBUG
"%s: rhine_rx(), entry %d status %8.8x.\n",
1453 le32_to_cpu(rp
->rx_head_desc
->rx_status
));
1456 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1457 for (count
= 0; count
< limit
; ++count
) {
1458 struct rx_desc
*desc
= rp
->rx_head_desc
;
1459 u32 desc_status
= le32_to_cpu(desc
->rx_status
);
1460 int data_size
= desc_status
>> 16;
1462 if (desc_status
& DescOwn
)
1466 printk(KERN_DEBUG
"rhine_rx() status is %8.8x.\n",
1469 if ((desc_status
& (RxWholePkt
| RxErr
)) != RxWholePkt
) {
1470 if ((desc_status
& RxWholePkt
) != RxWholePkt
) {
1471 printk(KERN_WARNING
"%s: Oversized Ethernet "
1472 "frame spanned multiple buffers, entry "
1473 "%#x length %d status %8.8x!\n",
1474 dev
->name
, entry
, data_size
,
1476 printk(KERN_WARNING
"%s: Oversized Ethernet "
1477 "frame %p vs %p.\n", dev
->name
,
1478 rp
->rx_head_desc
, &rp
->rx_ring
[entry
]);
1479 dev
->stats
.rx_length_errors
++;
1480 } else if (desc_status
& RxErr
) {
1481 /* There was a error. */
1483 printk(KERN_DEBUG
"rhine_rx() Rx "
1484 "error was %8.8x.\n",
1486 dev
->stats
.rx_errors
++;
1487 if (desc_status
& 0x0030)
1488 dev
->stats
.rx_length_errors
++;
1489 if (desc_status
& 0x0048)
1490 dev
->stats
.rx_fifo_errors
++;
1491 if (desc_status
& 0x0004)
1492 dev
->stats
.rx_frame_errors
++;
1493 if (desc_status
& 0x0002) {
1494 /* this can also be updated outside the interrupt handler */
1495 spin_lock(&rp
->lock
);
1496 dev
->stats
.rx_crc_errors
++;
1497 spin_unlock(&rp
->lock
);
1501 struct sk_buff
*skb
= NULL
;
1502 /* Length should omit the CRC */
1503 int pkt_len
= data_size
- 4;
1505 /* Check if the packet is long enough to accept without
1506 copying to a minimally-sized skbuff. */
1507 if (pkt_len
< rx_copybreak
)
1508 skb
= netdev_alloc_skb_ip_align(dev
, pkt_len
);
1510 pci_dma_sync_single_for_cpu(rp
->pdev
,
1511 rp
->rx_skbuff_dma
[entry
],
1513 PCI_DMA_FROMDEVICE
);
1515 skb_copy_to_linear_data(skb
,
1516 rp
->rx_skbuff
[entry
]->data
,
1518 skb_put(skb
, pkt_len
);
1519 pci_dma_sync_single_for_device(rp
->pdev
,
1520 rp
->rx_skbuff_dma
[entry
],
1522 PCI_DMA_FROMDEVICE
);
1524 skb
= rp
->rx_skbuff
[entry
];
1526 printk(KERN_ERR
"%s: Inconsistent Rx "
1527 "descriptor chain.\n",
1531 rp
->rx_skbuff
[entry
] = NULL
;
1532 skb_put(skb
, pkt_len
);
1533 pci_unmap_single(rp
->pdev
,
1534 rp
->rx_skbuff_dma
[entry
],
1536 PCI_DMA_FROMDEVICE
);
1538 skb
->protocol
= eth_type_trans(skb
, dev
);
1539 netif_receive_skb(skb
);
1540 dev
->stats
.rx_bytes
+= pkt_len
;
1541 dev
->stats
.rx_packets
++;
1543 entry
= (++rp
->cur_rx
) % RX_RING_SIZE
;
1544 rp
->rx_head_desc
= &rp
->rx_ring
[entry
];
1547 /* Refill the Rx ring buffers. */
1548 for (; rp
->cur_rx
- rp
->dirty_rx
> 0; rp
->dirty_rx
++) {
1549 struct sk_buff
*skb
;
1550 entry
= rp
->dirty_rx
% RX_RING_SIZE
;
1551 if (rp
->rx_skbuff
[entry
] == NULL
) {
1552 skb
= netdev_alloc_skb(dev
, rp
->rx_buf_sz
);
1553 rp
->rx_skbuff
[entry
] = skb
;
1555 break; /* Better luck next round. */
1556 skb
->dev
= dev
; /* Mark as being used by this device. */
1557 rp
->rx_skbuff_dma
[entry
] =
1558 pci_map_single(rp
->pdev
, skb
->data
,
1560 PCI_DMA_FROMDEVICE
);
1561 rp
->rx_ring
[entry
].addr
= cpu_to_le32(rp
->rx_skbuff_dma
[entry
]);
1563 rp
->rx_ring
[entry
].rx_status
= cpu_to_le32(DescOwn
);
1570 * Clears the "tally counters" for CRC errors and missed frames(?).
1571 * It has been reported that some chips need a write of 0 to clear
1572 * these, for others the counters are set to 1 when written to and
1573 * instead cleared when read. So we clear them both ways ...
1575 static inline void clear_tally_counters(void __iomem
*ioaddr
)
1577 iowrite32(0, ioaddr
+ RxMissed
);
1578 ioread16(ioaddr
+ RxCRCErrs
);
1579 ioread16(ioaddr
+ RxMissed
);
1582 static void rhine_restart_tx(struct net_device
*dev
) {
1583 struct rhine_private
*rp
= netdev_priv(dev
);
1584 void __iomem
*ioaddr
= rp
->base
;
1585 int entry
= rp
->dirty_tx
% TX_RING_SIZE
;
1589 * If new errors occured, we need to sort them out before doing Tx.
1590 * In that case the ISR will be back here RSN anyway.
1592 intr_status
= get_intr_status(dev
);
1594 if ((intr_status
& IntrTxErrSummary
) == 0) {
1596 /* We know better than the chip where it should continue. */
1597 iowrite32(rp
->tx_ring_dma
+ entry
* sizeof(struct tx_desc
),
1598 ioaddr
+ TxRingPtr
);
1600 iowrite8(ioread8(ioaddr
+ ChipCmd
) | CmdTxOn
,
1602 iowrite8(ioread8(ioaddr
+ ChipCmd1
) | Cmd1TxDemand
,
1607 /* This should never happen */
1609 printk(KERN_WARNING
"%s: rhine_restart_tx() "
1610 "Another error occured %8.8x.\n",
1611 dev
->name
, intr_status
);
1616 static void rhine_error(struct net_device
*dev
, int intr_status
)
1618 struct rhine_private
*rp
= netdev_priv(dev
);
1619 void __iomem
*ioaddr
= rp
->base
;
1621 spin_lock(&rp
->lock
);
1623 if (intr_status
& IntrLinkChange
)
1624 rhine_check_media(dev
, 0);
1625 if (intr_status
& IntrStatsMax
) {
1626 dev
->stats
.rx_crc_errors
+= ioread16(ioaddr
+ RxCRCErrs
);
1627 dev
->stats
.rx_missed_errors
+= ioread16(ioaddr
+ RxMissed
);
1628 clear_tally_counters(ioaddr
);
1630 if (intr_status
& IntrTxAborted
) {
1632 printk(KERN_INFO
"%s: Abort %8.8x, frame dropped.\n",
1633 dev
->name
, intr_status
);
1635 if (intr_status
& IntrTxUnderrun
) {
1636 if (rp
->tx_thresh
< 0xE0)
1637 iowrite8(rp
->tx_thresh
+= 0x20, ioaddr
+ TxConfig
);
1639 printk(KERN_INFO
"%s: Transmitter underrun, Tx "
1640 "threshold now %2.2x.\n",
1641 dev
->name
, rp
->tx_thresh
);
1643 if (intr_status
& IntrTxDescRace
) {
1645 printk(KERN_INFO
"%s: Tx descriptor write-back race.\n",
1648 if ((intr_status
& IntrTxError
) &&
1649 (intr_status
& (IntrTxAborted
|
1650 IntrTxUnderrun
| IntrTxDescRace
)) == 0) {
1651 if (rp
->tx_thresh
< 0xE0) {
1652 iowrite8(rp
->tx_thresh
+= 0x20, ioaddr
+ TxConfig
);
1655 printk(KERN_INFO
"%s: Unspecified error. Tx "
1656 "threshold now %2.2x.\n",
1657 dev
->name
, rp
->tx_thresh
);
1659 if (intr_status
& (IntrTxAborted
| IntrTxUnderrun
| IntrTxDescRace
|
1661 rhine_restart_tx(dev
);
1663 if (intr_status
& ~(IntrLinkChange
| IntrStatsMax
| IntrTxUnderrun
|
1664 IntrTxError
| IntrTxAborted
| IntrNormalSummary
|
1667 printk(KERN_ERR
"%s: Something Wicked happened! "
1668 "%8.8x.\n", dev
->name
, intr_status
);
1671 spin_unlock(&rp
->lock
);
1674 static struct net_device_stats
*rhine_get_stats(struct net_device
*dev
)
1676 struct rhine_private
*rp
= netdev_priv(dev
);
1677 void __iomem
*ioaddr
= rp
->base
;
1678 unsigned long flags
;
1680 spin_lock_irqsave(&rp
->lock
, flags
);
1681 dev
->stats
.rx_crc_errors
+= ioread16(ioaddr
+ RxCRCErrs
);
1682 dev
->stats
.rx_missed_errors
+= ioread16(ioaddr
+ RxMissed
);
1683 clear_tally_counters(ioaddr
);
1684 spin_unlock_irqrestore(&rp
->lock
, flags
);
1689 static void rhine_set_rx_mode(struct net_device
*dev
)
1691 struct rhine_private
*rp
= netdev_priv(dev
);
1692 void __iomem
*ioaddr
= rp
->base
;
1693 u32 mc_filter
[2]; /* Multicast hash filter */
1694 u8 rx_mode
; /* Note: 0x02=accept runt, 0x01=accept errs */
1696 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1698 iowrite32(0xffffffff, ioaddr
+ MulticastFilter0
);
1699 iowrite32(0xffffffff, ioaddr
+ MulticastFilter1
);
1700 } else if ((dev
->mc_count
> multicast_filter_limit
) ||
1701 (dev
->flags
& IFF_ALLMULTI
)) {
1702 /* Too many to match, or accept all multicasts. */
1703 iowrite32(0xffffffff, ioaddr
+ MulticastFilter0
);
1704 iowrite32(0xffffffff, ioaddr
+ MulticastFilter1
);
1707 struct dev_mc_list
*mclist
;
1709 memset(mc_filter
, 0, sizeof(mc_filter
));
1710 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1711 i
++, mclist
= mclist
->next
) {
1712 int bit_nr
= ether_crc(ETH_ALEN
, mclist
->dmi_addr
) >> 26;
1714 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
1716 iowrite32(mc_filter
[0], ioaddr
+ MulticastFilter0
);
1717 iowrite32(mc_filter
[1], ioaddr
+ MulticastFilter1
);
1720 iowrite8(rp
->rx_thresh
| rx_mode
, ioaddr
+ RxConfig
);
1723 static void netdev_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1725 struct rhine_private
*rp
= netdev_priv(dev
);
1727 strcpy(info
->driver
, DRV_NAME
);
1728 strcpy(info
->version
, DRV_VERSION
);
1729 strcpy(info
->bus_info
, pci_name(rp
->pdev
));
1732 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1734 struct rhine_private
*rp
= netdev_priv(dev
);
1737 spin_lock_irq(&rp
->lock
);
1738 rc
= mii_ethtool_gset(&rp
->mii_if
, cmd
);
1739 spin_unlock_irq(&rp
->lock
);
1744 static int netdev_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1746 struct rhine_private
*rp
= netdev_priv(dev
);
1749 spin_lock_irq(&rp
->lock
);
1750 rc
= mii_ethtool_sset(&rp
->mii_if
, cmd
);
1751 spin_unlock_irq(&rp
->lock
);
1752 rhine_set_carrier(&rp
->mii_if
);
1757 static int netdev_nway_reset(struct net_device
*dev
)
1759 struct rhine_private
*rp
= netdev_priv(dev
);
1761 return mii_nway_restart(&rp
->mii_if
);
1764 static u32
netdev_get_link(struct net_device
*dev
)
1766 struct rhine_private
*rp
= netdev_priv(dev
);
1768 return mii_link_ok(&rp
->mii_if
);
1771 static u32
netdev_get_msglevel(struct net_device
*dev
)
1776 static void netdev_set_msglevel(struct net_device
*dev
, u32 value
)
1781 static void rhine_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1783 struct rhine_private
*rp
= netdev_priv(dev
);
1785 if (!(rp
->quirks
& rqWOL
))
1788 spin_lock_irq(&rp
->lock
);
1789 wol
->supported
= WAKE_PHY
| WAKE_MAGIC
|
1790 WAKE_UCAST
| WAKE_MCAST
| WAKE_BCAST
; /* Untested */
1791 wol
->wolopts
= rp
->wolopts
;
1792 spin_unlock_irq(&rp
->lock
);
1795 static int rhine_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1797 struct rhine_private
*rp
= netdev_priv(dev
);
1798 u32 support
= WAKE_PHY
| WAKE_MAGIC
|
1799 WAKE_UCAST
| WAKE_MCAST
| WAKE_BCAST
; /* Untested */
1801 if (!(rp
->quirks
& rqWOL
))
1804 if (wol
->wolopts
& ~support
)
1807 spin_lock_irq(&rp
->lock
);
1808 rp
->wolopts
= wol
->wolopts
;
1809 spin_unlock_irq(&rp
->lock
);
1814 static const struct ethtool_ops netdev_ethtool_ops
= {
1815 .get_drvinfo
= netdev_get_drvinfo
,
1816 .get_settings
= netdev_get_settings
,
1817 .set_settings
= netdev_set_settings
,
1818 .nway_reset
= netdev_nway_reset
,
1819 .get_link
= netdev_get_link
,
1820 .get_msglevel
= netdev_get_msglevel
,
1821 .set_msglevel
= netdev_set_msglevel
,
1822 .get_wol
= rhine_get_wol
,
1823 .set_wol
= rhine_set_wol
,
1826 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1828 struct rhine_private
*rp
= netdev_priv(dev
);
1831 if (!netif_running(dev
))
1834 spin_lock_irq(&rp
->lock
);
1835 rc
= generic_mii_ioctl(&rp
->mii_if
, if_mii(rq
), cmd
, NULL
);
1836 spin_unlock_irq(&rp
->lock
);
1837 rhine_set_carrier(&rp
->mii_if
);
1842 static int rhine_close(struct net_device
*dev
)
1844 struct rhine_private
*rp
= netdev_priv(dev
);
1845 void __iomem
*ioaddr
= rp
->base
;
1847 napi_disable(&rp
->napi
);
1848 cancel_work_sync(&rp
->reset_task
);
1849 netif_stop_queue(dev
);
1851 spin_lock_irq(&rp
->lock
);
1854 printk(KERN_DEBUG
"%s: Shutting down ethercard, "
1855 "status was %4.4x.\n",
1856 dev
->name
, ioread16(ioaddr
+ ChipCmd
));
1858 /* Switch to loopback mode to avoid hardware races. */
1859 iowrite8(rp
->tx_thresh
| 0x02, ioaddr
+ TxConfig
);
1861 /* Disable interrupts by clearing the interrupt mask. */
1862 iowrite16(0x0000, ioaddr
+ IntrEnable
);
1864 /* Stop the chip's Tx and Rx processes. */
1865 iowrite16(CmdStop
, ioaddr
+ ChipCmd
);
1867 spin_unlock_irq(&rp
->lock
);
1869 free_irq(rp
->pdev
->irq
, dev
);
1878 static void __devexit
rhine_remove_one(struct pci_dev
*pdev
)
1880 struct net_device
*dev
= pci_get_drvdata(pdev
);
1881 struct rhine_private
*rp
= netdev_priv(dev
);
1883 unregister_netdev(dev
);
1885 pci_iounmap(pdev
, rp
->base
);
1886 pci_release_regions(pdev
);
1889 pci_disable_device(pdev
);
1890 pci_set_drvdata(pdev
, NULL
);
1893 static void rhine_shutdown (struct pci_dev
*pdev
)
1895 struct net_device
*dev
= pci_get_drvdata(pdev
);
1896 struct rhine_private
*rp
= netdev_priv(dev
);
1897 void __iomem
*ioaddr
= rp
->base
;
1899 if (!(rp
->quirks
& rqWOL
))
1900 return; /* Nothing to do for non-WOL adapters */
1902 rhine_power_init(dev
);
1904 /* Make sure we use pattern 0, 1 and not 4, 5 */
1905 if (rp
->quirks
& rq6patterns
)
1906 iowrite8(0x04, ioaddr
+ WOLcgClr
);
1908 if (rp
->wolopts
& WAKE_MAGIC
) {
1909 iowrite8(WOLmagic
, ioaddr
+ WOLcrSet
);
1911 * Turn EEPROM-controlled wake-up back on -- some hardware may
1912 * not cooperate otherwise.
1914 iowrite8(ioread8(ioaddr
+ ConfigA
) | 0x03, ioaddr
+ ConfigA
);
1917 if (rp
->wolopts
& (WAKE_BCAST
|WAKE_MCAST
))
1918 iowrite8(WOLbmcast
, ioaddr
+ WOLcgSet
);
1920 if (rp
->wolopts
& WAKE_PHY
)
1921 iowrite8(WOLlnkon
| WOLlnkoff
, ioaddr
+ WOLcrSet
);
1923 if (rp
->wolopts
& WAKE_UCAST
)
1924 iowrite8(WOLucast
, ioaddr
+ WOLcrSet
);
1927 /* Enable legacy WOL (for old motherboards) */
1928 iowrite8(0x01, ioaddr
+ PwcfgSet
);
1929 iowrite8(ioread8(ioaddr
+ StickyHW
) | 0x04, ioaddr
+ StickyHW
);
1932 /* Hit power state D3 (sleep) */
1934 iowrite8(ioread8(ioaddr
+ StickyHW
) | 0x03, ioaddr
+ StickyHW
);
1936 /* TODO: Check use of pci_enable_wake() */
1941 static int rhine_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1943 struct net_device
*dev
= pci_get_drvdata(pdev
);
1944 struct rhine_private
*rp
= netdev_priv(dev
);
1945 unsigned long flags
;
1947 if (!netif_running(dev
))
1950 napi_disable(&rp
->napi
);
1952 netif_device_detach(dev
);
1953 pci_save_state(pdev
);
1955 spin_lock_irqsave(&rp
->lock
, flags
);
1956 rhine_shutdown(pdev
);
1957 spin_unlock_irqrestore(&rp
->lock
, flags
);
1959 free_irq(dev
->irq
, dev
);
1963 static int rhine_resume(struct pci_dev
*pdev
)
1965 struct net_device
*dev
= pci_get_drvdata(pdev
);
1966 struct rhine_private
*rp
= netdev_priv(dev
);
1967 unsigned long flags
;
1970 if (!netif_running(dev
))
1973 if (request_irq(dev
->irq
, rhine_interrupt
, IRQF_SHARED
, dev
->name
, dev
))
1974 printk(KERN_ERR
"via-rhine %s: request_irq failed\n", dev
->name
);
1976 ret
= pci_set_power_state(pdev
, PCI_D0
);
1978 printk(KERN_INFO
"%s: Entering power state D0 %s (%d).\n",
1979 dev
->name
, ret
? "failed" : "succeeded", ret
);
1981 pci_restore_state(pdev
);
1983 spin_lock_irqsave(&rp
->lock
, flags
);
1985 enable_mmio(rp
->pioaddr
, rp
->quirks
);
1987 rhine_power_init(dev
);
1992 init_registers(dev
);
1993 spin_unlock_irqrestore(&rp
->lock
, flags
);
1995 netif_device_attach(dev
);
1999 #endif /* CONFIG_PM */
2001 static struct pci_driver rhine_driver
= {
2003 .id_table
= rhine_pci_tbl
,
2004 .probe
= rhine_init_one
,
2005 .remove
= __devexit_p(rhine_remove_one
),
2007 .suspend
= rhine_suspend
,
2008 .resume
= rhine_resume
,
2009 #endif /* CONFIG_PM */
2010 .shutdown
= rhine_shutdown
,
2013 static struct dmi_system_id __initdata rhine_dmi_table
[] = {
2017 DMI_MATCH(DMI_BIOS_VENDOR
, "Award Software International, Inc."),
2018 DMI_MATCH(DMI_BIOS_VERSION
, "6.00 PG"),
2024 DMI_MATCH(DMI_BIOS_VENDOR
, "Phoenix Technologies, LTD"),
2025 DMI_MATCH(DMI_BIOS_VERSION
, "6.00 PG"),
2031 static int __init
rhine_init(void)
2033 /* when a module, this is printed whether or not devices are found in probe */
2037 if (dmi_check_system(rhine_dmi_table
)) {
2038 /* these BIOSes fail at PXE boot if chip is in D3 */
2040 printk(KERN_WARNING
"%s: Broken BIOS detected, avoid_D3 "
2045 printk(KERN_INFO
"%s: avoid_D3 set.\n", DRV_NAME
);
2047 return pci_register_driver(&rhine_driver
);
2051 static void __exit
rhine_cleanup(void)
2053 pci_unregister_driver(&rhine_driver
);
2057 module_init(rhine_init
);
2058 module_exit(rhine_cleanup
);