1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
54 /* Automatically extracted configuration info:
55 probe-func: winbond840_probe
56 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
58 c-help-name: Winbond W89c840 PCI Ethernet support
59 c-help-symbol: CONFIG_WINBOND_840
60 c-help: This driver is for the Winbond W89c840 chip. It also works with
61 c-help: the TX9882 chip on the Compex RL100-ATX board.
62 c-help: More specific information and updates are available from
63 c-help: http://www.scyld.com/network/drivers.html
66 /* The user-configurable values.
67 These may be modified when a driver module is loaded.*/
69 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70 static int max_interrupt_work
= 20;
71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72 The '840 uses a 64 element hash table based on the Ethernet CRC. */
73 static int multicast_filter_limit
= 32;
75 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76 Setting to > 1518 effectively disables this feature. */
77 static int rx_copybreak
;
79 /* Used to pass the media type, etc.
80 Both 'options[]' and 'full_duplex[]' should exist for driver
82 The media type is usually passed in 'options[]'.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options
[MAX_UNITS
] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex
[MAX_UNITS
] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Operational parameters that are set at compile time. */
90 /* Keep the ring sizes a power of two for compile efficiency.
91 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92 Making the Tx ring too large decreases the effectiveness of channel
93 bonding and packet priority.
94 There are no ill effects from too-large receive rings. */
95 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96 #define TX_QUEUE_LEN_RESTART 5
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 /* Include files, designed to support most kernel versions 2.0.0 and later. */
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/interrupt.h>
120 #include <linux/pci.h>
121 #include <linux/dma-mapping.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/init.h>
126 #include <linux/delay.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
130 #include <linux/crc32.h>
131 #include <linux/bitops.h>
132 #include <asm/uaccess.h>
133 #include <asm/processor.h> /* Processor type for cache alignment. */
139 #undef PKT_BUF_SZ /* tulip.h also defines this */
140 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
142 /* These identify the driver base version and may not be removed. */
143 static const char version
[] __initconst
=
144 "v" DRV_VERSION
" (2.4 port) "
145 DRV_RELDATE
" Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION
);
153 module_param(max_interrupt_work
, int, 0);
154 module_param(debug
, int, 0);
155 module_param(rx_copybreak
, int, 0);
156 module_param(multicast_filter_limit
, int, 0);
157 module_param_array(options
, int, NULL
, 0);
158 module_param_array(full_duplex
, int, NULL
, 0);
159 MODULE_PARM_DESC(max_interrupt_work
, "winbond-840 maximum events handled per interrupt");
160 MODULE_PARM_DESC(debug
, "winbond-840 debug level (0-6)");
161 MODULE_PARM_DESC(rx_copybreak
, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit
, "winbond-840 maximum number of filtered multicast addresses");
163 MODULE_PARM_DESC(options
, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164 MODULE_PARM_DESC(full_duplex
, "winbond-840 full duplex setting(s) (1)");
169 I. Board Compatibility
171 This driver is for the Winbond w89c840 chip.
173 II. Board-specific settings
177 III. Driver operation
179 This chip is very similar to the Digital 21*4* "Tulip" family. The first
180 twelve registers and the descriptor format are nearly identical. Read a
181 Tulip manual for operational details.
183 A significant difference is that the multicast filter and station address are
184 stored in registers rather than loaded through a pseudo-transmit packet.
186 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187 full-sized packet we must use both data buffers in a descriptor. Thus the
188 driver uses ring mode where descriptors are implicitly sequential in memory,
189 rather than using the second descriptor address as a chain pointer to
190 subsequent descriptors.
194 If you are going to almost clone a Tulip, why not go all the way and avoid
195 the need for a new driver?
199 http://www.scyld.com/expert/100mbps.html
200 http://www.scyld.com/expert/NWay.html
201 http://www.winbond.com.tw/
205 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207 silent data corruption.
209 Test with 'ping -s 10000' on a fast computer.
218 enum chip_capability_flags
{
219 CanHaveMII
=1, HasBrokenTx
=2, AlwaysFDX
=4, FDXOnNoMII
=8,
222 static const struct pci_device_id w840_pci_tbl
[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID
, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 2 },
228 MODULE_DEVICE_TABLE(pci
, w840_pci_tbl
);
231 netdev_res_size
= 128, /* size of PCI BAR resource */
236 int drv_flags
; /* Driver use, intended as capability flags. */
239 static const struct pci_id_info pci_id_tbl
[] = {
240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII
| HasBrokenTx
| FDXOnNoMII
},
242 { "Winbond W89c840", CanHaveMII
| HasBrokenTx
},
243 { "Compex RL100-ATX", CanHaveMII
| HasBrokenTx
},
244 { } /* terminate list. */
247 /* This driver was written to use PCI memory space, however some x86 systems
248 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
251 /* Offsets to the Command and Status Registers, "CSRs".
252 While similar to the Tulip, these registers are longword aligned.
253 Note: It's not useful to define symbolic names for every register bit in
254 the device. The name can only partially document the semantics and make
255 the driver longer and more difficult to read.
258 PCIBusCfg
=0x00, TxStartDemand
=0x04, RxStartDemand
=0x08,
259 RxRingPtr
=0x0C, TxRingPtr
=0x10,
260 IntrStatus
=0x14, NetworkConfig
=0x18, IntrEnable
=0x1C,
261 RxMissed
=0x20, EECtrl
=0x24, MIICtrl
=0x24, BootRom
=0x28, GPTimer
=0x2C,
262 CurRxDescAddr
=0x30, CurRxBufAddr
=0x34, /* Debug use */
263 MulticastFilter0
=0x38, MulticastFilter1
=0x3C, StationAddr
=0x40,
264 CurTxDescAddr
=0x4C, CurTxBufAddr
=0x50,
267 /* Bits in the NetworkConfig register. */
270 RxAcceptBroadcast
=0x20, AcceptMulticast
=0x10,
271 RxAcceptAllPhys
=0x08, AcceptMyPhys
=0x02,
275 MDIO_ShiftClk
=0x10000, MDIO_DataIn
=0x80000, MDIO_DataOut
=0x20000,
276 MDIO_EnbOutput
=0x40000, MDIO_EnbIn
= 0x00000,
279 /* The Tulip Rx and Tx buffer descriptors. */
280 struct w840_rx_desc
{
287 struct w840_tx_desc
{
290 u32 buffer1
, buffer2
;
293 #define MII_CNT 1 /* winbond only supports one MII */
294 struct netdev_private
{
295 struct w840_rx_desc
*rx_ring
;
296 dma_addr_t rx_addr
[RX_RING_SIZE
];
297 struct w840_tx_desc
*tx_ring
;
298 dma_addr_t tx_addr
[TX_RING_SIZE
];
299 dma_addr_t ring_dma_addr
;
300 /* The addresses of receive-in-place skbuffs. */
301 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
302 /* The saved address of a sent-in-place packet/buffer, for later free(). */
303 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
304 struct net_device_stats stats
;
305 struct timer_list timer
; /* Media monitoring timer. */
306 /* Frequently used values: keep some adjacent for cache effect. */
308 int chip_id
, drv_flags
;
309 struct pci_dev
*pci_dev
;
311 struct w840_rx_desc
*rx_head_desc
;
312 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
313 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
314 unsigned int cur_tx
, dirty_tx
;
315 unsigned int tx_q_bytes
;
316 unsigned int tx_full
; /* The Tx queue is full. */
317 /* MII transceiver section. */
318 int mii_cnt
; /* MII device addresses. */
319 unsigned char phys
[MII_CNT
]; /* MII device addresses, but only the first is used */
321 struct mii_if_info mii_if
;
322 void __iomem
*base_addr
;
325 static int eeprom_read(void __iomem
*ioaddr
, int location
);
326 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
327 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
328 static int netdev_open(struct net_device
*dev
);
329 static int update_link(struct net_device
*dev
);
330 static void netdev_timer(unsigned long data
);
331 static void init_rxtx_rings(struct net_device
*dev
);
332 static void free_rxtx_rings(struct netdev_private
*np
);
333 static void init_registers(struct net_device
*dev
);
334 static void tx_timeout(struct net_device
*dev
);
335 static int alloc_ringdesc(struct net_device
*dev
);
336 static void free_ringdesc(struct netdev_private
*np
);
337 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
338 static irqreturn_t
intr_handler(int irq
, void *dev_instance
);
339 static void netdev_error(struct net_device
*dev
, int intr_status
);
340 static int netdev_rx(struct net_device
*dev
);
341 static u32
__set_rx_mode(struct net_device
*dev
);
342 static void set_rx_mode(struct net_device
*dev
);
343 static struct net_device_stats
*get_stats(struct net_device
*dev
);
344 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
345 static const struct ethtool_ops netdev_ethtool_ops
;
346 static int netdev_close(struct net_device
*dev
);
348 static const struct net_device_ops netdev_ops
= {
349 .ndo_open
= netdev_open
,
350 .ndo_stop
= netdev_close
,
351 .ndo_start_xmit
= start_tx
,
352 .ndo_get_stats
= get_stats
,
353 .ndo_set_rx_mode
= set_rx_mode
,
354 .ndo_do_ioctl
= netdev_ioctl
,
355 .ndo_tx_timeout
= tx_timeout
,
356 .ndo_change_mtu
= eth_change_mtu
,
357 .ndo_set_mac_address
= eth_mac_addr
,
358 .ndo_validate_addr
= eth_validate_addr
,
361 static int w840_probe1(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
363 struct net_device
*dev
;
364 struct netdev_private
*np
;
366 int chip_idx
= ent
->driver_data
;
368 int i
, option
= find_cnt
< MAX_UNITS
? options
[find_cnt
] : 0;
369 void __iomem
*ioaddr
;
371 i
= pci_enable_device(pdev
);
374 pci_set_master(pdev
);
378 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
379 pr_warn("Device %s disabled due to DMA limitations\n",
383 dev
= alloc_etherdev(sizeof(*np
));
386 SET_NETDEV_DEV(dev
, &pdev
->dev
);
388 if (pci_request_regions(pdev
, DRV_NAME
))
391 ioaddr
= pci_iomap(pdev
, TULIP_BAR
, netdev_res_size
);
393 goto err_out_free_res
;
395 for (i
= 0; i
< 3; i
++)
396 ((__le16
*)dev
->dev_addr
)[i
] = cpu_to_le16(eeprom_read(ioaddr
, i
));
398 /* Reset the chip to erase previous misconfiguration.
399 No hold time required! */
400 iowrite32(0x00000001, ioaddr
+ PCIBusCfg
);
402 np
= netdev_priv(dev
);
404 np
->chip_id
= chip_idx
;
405 np
->drv_flags
= pci_id_tbl
[chip_idx
].drv_flags
;
406 spin_lock_init(&np
->lock
);
407 np
->mii_if
.dev
= dev
;
408 np
->mii_if
.mdio_read
= mdio_read
;
409 np
->mii_if
.mdio_write
= mdio_write
;
410 np
->base_addr
= ioaddr
;
412 pci_set_drvdata(pdev
, dev
);
415 option
= dev
->mem_start
;
417 /* The lower four bits are the media type. */
420 np
->mii_if
.full_duplex
= 1;
423 "ignoring user supplied media type %d",
426 if (find_cnt
< MAX_UNITS
&& full_duplex
[find_cnt
] > 0)
427 np
->mii_if
.full_duplex
= 1;
429 if (np
->mii_if
.full_duplex
)
430 np
->mii_if
.force_media
= 1;
432 /* The chip-specific entries in the device structure. */
433 dev
->netdev_ops
= &netdev_ops
;
434 dev
->ethtool_ops
= &netdev_ethtool_ops
;
435 dev
->watchdog_timeo
= TX_TIMEOUT
;
437 i
= register_netdev(dev
);
439 goto err_out_cleardev
;
441 dev_info(&dev
->dev
, "%s at %p, %pM, IRQ %d\n",
442 pci_id_tbl
[chip_idx
].name
, ioaddr
, dev
->dev_addr
, irq
);
444 if (np
->drv_flags
& CanHaveMII
) {
445 int phy
, phy_idx
= 0;
446 for (phy
= 1; phy
< 32 && phy_idx
< MII_CNT
; phy
++) {
447 int mii_status
= mdio_read(dev
, phy
, MII_BMSR
);
448 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
449 np
->phys
[phy_idx
++] = phy
;
450 np
->mii_if
.advertising
= mdio_read(dev
, phy
, MII_ADVERTISE
);
451 np
->mii
= (mdio_read(dev
, phy
, MII_PHYSID1
) << 16)+
452 mdio_read(dev
, phy
, MII_PHYSID2
);
454 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
455 np
->mii
, phy
, mii_status
,
456 np
->mii_if
.advertising
);
459 np
->mii_cnt
= phy_idx
;
460 np
->mii_if
.phy_id
= np
->phys
[0];
463 "MII PHY not found -- this device may not operate correctly\n");
471 pci_iounmap(pdev
, ioaddr
);
473 pci_release_regions(pdev
);
480 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
481 often serial bit streams generated by the host processor.
482 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
484 /* Delay between EEPROM clock transitions.
485 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
486 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
487 made udelay() unreliable.
488 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
491 #define eeprom_delay(ee_addr) ioread32(ee_addr)
493 enum EEPROM_Ctrl_Bits
{
494 EE_ShiftClk
=0x02, EE_Write0
=0x801, EE_Write1
=0x805,
495 EE_ChipSelect
=0x801, EE_DataIn
=0x08,
498 /* The EEPROM commands include the alway-set leading bit. */
500 EE_WriteCmd
=(5 << 6), EE_ReadCmd
=(6 << 6), EE_EraseCmd
=(7 << 6),
503 static int eeprom_read(void __iomem
*addr
, int location
)
507 void __iomem
*ee_addr
= addr
+ EECtrl
;
508 int read_cmd
= location
| EE_ReadCmd
;
509 iowrite32(EE_ChipSelect
, ee_addr
);
511 /* Shift the read command bits out. */
512 for (i
= 10; i
>= 0; i
--) {
513 short dataval
= (read_cmd
& (1 << i
)) ? EE_Write1
: EE_Write0
;
514 iowrite32(dataval
, ee_addr
);
515 eeprom_delay(ee_addr
);
516 iowrite32(dataval
| EE_ShiftClk
, ee_addr
);
517 eeprom_delay(ee_addr
);
519 iowrite32(EE_ChipSelect
, ee_addr
);
520 eeprom_delay(ee_addr
);
522 for (i
= 16; i
> 0; i
--) {
523 iowrite32(EE_ChipSelect
| EE_ShiftClk
, ee_addr
);
524 eeprom_delay(ee_addr
);
525 retval
= (retval
<< 1) | ((ioread32(ee_addr
) & EE_DataIn
) ? 1 : 0);
526 iowrite32(EE_ChipSelect
, ee_addr
);
527 eeprom_delay(ee_addr
);
530 /* Terminate the EEPROM access. */
531 iowrite32(0, ee_addr
);
535 /* MII transceiver control section.
536 Read and write the MII registers using software-generated serial
537 MDIO protocol. See the MII specifications or DP83840A data sheet
540 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
541 met by back-to-back 33Mhz PCI cycles. */
542 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
544 /* Set iff a MII transceiver on any interface requires mdio preamble.
545 This only set with older transceivers, so the extra
546 code size of a per-interface flag is not worthwhile. */
547 static char mii_preamble_required
= 1;
549 #define MDIO_WRITE0 (MDIO_EnbOutput)
550 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
552 /* Generate the preamble required for initial synchronization and
553 a few older transceivers. */
554 static void mdio_sync(void __iomem
*mdio_addr
)
558 /* Establish sync by sending at least 32 logic ones. */
559 while (--bits
>= 0) {
560 iowrite32(MDIO_WRITE1
, mdio_addr
);
561 mdio_delay(mdio_addr
);
562 iowrite32(MDIO_WRITE1
| MDIO_ShiftClk
, mdio_addr
);
563 mdio_delay(mdio_addr
);
567 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
569 struct netdev_private
*np
= netdev_priv(dev
);
570 void __iomem
*mdio_addr
= np
->base_addr
+ MIICtrl
;
571 int mii_cmd
= (0xf6 << 10) | (phy_id
<< 5) | location
;
574 if (mii_preamble_required
)
575 mdio_sync(mdio_addr
);
577 /* Shift the read command bits out. */
578 for (i
= 15; i
>= 0; i
--) {
579 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
581 iowrite32(dataval
, mdio_addr
);
582 mdio_delay(mdio_addr
);
583 iowrite32(dataval
| MDIO_ShiftClk
, mdio_addr
);
584 mdio_delay(mdio_addr
);
586 /* Read the two transition, 16 data, and wire-idle bits. */
587 for (i
= 20; i
> 0; i
--) {
588 iowrite32(MDIO_EnbIn
, mdio_addr
);
589 mdio_delay(mdio_addr
);
590 retval
= (retval
<< 1) | ((ioread32(mdio_addr
) & MDIO_DataIn
) ? 1 : 0);
591 iowrite32(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
592 mdio_delay(mdio_addr
);
594 return (retval
>>1) & 0xffff;
597 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
599 struct netdev_private
*np
= netdev_priv(dev
);
600 void __iomem
*mdio_addr
= np
->base_addr
+ MIICtrl
;
601 int mii_cmd
= (0x5002 << 16) | (phy_id
<< 23) | (location
<<18) | value
;
604 if (location
== 4 && phy_id
== np
->phys
[0])
605 np
->mii_if
.advertising
= value
;
607 if (mii_preamble_required
)
608 mdio_sync(mdio_addr
);
610 /* Shift the command bits out. */
611 for (i
= 31; i
>= 0; i
--) {
612 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
614 iowrite32(dataval
, mdio_addr
);
615 mdio_delay(mdio_addr
);
616 iowrite32(dataval
| MDIO_ShiftClk
, mdio_addr
);
617 mdio_delay(mdio_addr
);
619 /* Clear out extra bits. */
620 for (i
= 2; i
> 0; i
--) {
621 iowrite32(MDIO_EnbIn
, mdio_addr
);
622 mdio_delay(mdio_addr
);
623 iowrite32(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
624 mdio_delay(mdio_addr
);
629 static int netdev_open(struct net_device
*dev
)
631 struct netdev_private
*np
= netdev_priv(dev
);
632 void __iomem
*ioaddr
= np
->base_addr
;
633 const int irq
= np
->pci_dev
->irq
;
636 iowrite32(0x00000001, ioaddr
+ PCIBusCfg
); /* Reset */
638 netif_device_detach(dev
);
639 i
= request_irq(irq
, intr_handler
, IRQF_SHARED
, dev
->name
, dev
);
644 netdev_dbg(dev
, "w89c840_open() irq %d\n", irq
);
646 if((i
=alloc_ringdesc(dev
)))
649 spin_lock_irq(&np
->lock
);
650 netif_device_attach(dev
);
652 spin_unlock_irq(&np
->lock
);
654 netif_start_queue(dev
);
656 netdev_dbg(dev
, "Done netdev_open()\n");
658 /* Set the timer to check for link beat. */
659 init_timer(&np
->timer
);
660 np
->timer
.expires
= jiffies
+ 1*HZ
;
661 np
->timer
.data
= (unsigned long)dev
;
662 np
->timer
.function
= netdev_timer
; /* timer handler */
663 add_timer(&np
->timer
);
666 netif_device_attach(dev
);
670 #define MII_DAVICOM_DM9101 0x0181b800
672 static int update_link(struct net_device
*dev
)
674 struct netdev_private
*np
= netdev_priv(dev
);
675 int duplex
, fasteth
, result
, mii_reg
;
678 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_BMSR
);
680 if (mii_reg
== 0xffff)
682 /* reread: the link status bit is sticky */
683 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_BMSR
);
684 if (!(mii_reg
& 0x4)) {
685 if (netif_carrier_ok(dev
)) {
688 "MII #%d reports no link. Disabling watchdog\n",
690 netif_carrier_off(dev
);
694 if (!netif_carrier_ok(dev
)) {
697 "MII #%d link is back. Enabling watchdog\n",
699 netif_carrier_on(dev
);
702 if ((np
->mii
& ~0xf) == MII_DAVICOM_DM9101
) {
703 /* If the link partner doesn't support autonegotiation
704 * the MII detects it's abilities with the "parallel detection".
705 * Some MIIs update the LPA register to the result of the parallel
706 * detection, some don't.
707 * The Davicom PHY [at least 0181b800] doesn't.
708 * Instead bit 9 and 13 of the BMCR are updated to the result
709 * of the negotiation..
711 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_BMCR
);
712 duplex
= mii_reg
& BMCR_FULLDPLX
;
713 fasteth
= mii_reg
& BMCR_SPEED100
;
716 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
717 negotiated
= mii_reg
& np
->mii_if
.advertising
;
719 duplex
= (negotiated
& LPA_100FULL
) || ((negotiated
& 0x02C0) == LPA_10FULL
);
720 fasteth
= negotiated
& 0x380;
722 duplex
|= np
->mii_if
.force_media
;
723 /* remove fastether and fullduplex */
724 result
= np
->csr6
& ~0x20000200;
728 result
|= 0x20000000;
729 if (result
!= np
->csr6
&& debug
)
731 "Setting %dMBit-%s-duplex based on MII#%d\n",
732 fasteth
? 100 : 10, duplex
? "full" : "half",
737 #define RXTX_TIMEOUT 2000
738 static inline void update_csr6(struct net_device
*dev
, int new)
740 struct netdev_private
*np
= netdev_priv(dev
);
741 void __iomem
*ioaddr
= np
->base_addr
;
742 int limit
= RXTX_TIMEOUT
;
744 if (!netif_device_present(dev
))
748 /* stop both Tx and Rx processes */
749 iowrite32(np
->csr6
& ~0x2002, ioaddr
+ NetworkConfig
);
750 /* wait until they have really stopped */
752 int csr5
= ioread32(ioaddr
+ IntrStatus
);
755 t
= (csr5
>> 17) & 0x07;
758 t
= (csr5
>> 20) & 0x07;
766 "couldn't stop rxtx, IntrStatus %xh\n", csr5
);
772 /* and restart them with the new configuration */
773 iowrite32(np
->csr6
, ioaddr
+ NetworkConfig
);
775 np
->mii_if
.full_duplex
= 1;
778 static void netdev_timer(unsigned long data
)
780 struct net_device
*dev
= (struct net_device
*)data
;
781 struct netdev_private
*np
= netdev_priv(dev
);
782 void __iomem
*ioaddr
= np
->base_addr
;
785 netdev_dbg(dev
, "Media selection timer tick, status %08x config %08x\n",
786 ioread32(ioaddr
+ IntrStatus
),
787 ioread32(ioaddr
+ NetworkConfig
));
788 spin_lock_irq(&np
->lock
);
789 update_csr6(dev
, update_link(dev
));
790 spin_unlock_irq(&np
->lock
);
791 np
->timer
.expires
= jiffies
+ 10*HZ
;
792 add_timer(&np
->timer
);
795 static void init_rxtx_rings(struct net_device
*dev
)
797 struct netdev_private
*np
= netdev_priv(dev
);
800 np
->rx_head_desc
= &np
->rx_ring
[0];
801 np
->tx_ring
= (struct w840_tx_desc
*)&np
->rx_ring
[RX_RING_SIZE
];
803 /* Initial all Rx descriptors. */
804 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
805 np
->rx_ring
[i
].length
= np
->rx_buf_sz
;
806 np
->rx_ring
[i
].status
= 0;
807 np
->rx_skbuff
[i
] = NULL
;
809 /* Mark the last entry as wrapping the ring. */
810 np
->rx_ring
[i
-1].length
|= DescEndRing
;
812 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
813 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
814 struct sk_buff
*skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
);
815 np
->rx_skbuff
[i
] = skb
;
818 np
->rx_addr
[i
] = pci_map_single(np
->pci_dev
,skb
->data
,
819 np
->rx_buf_sz
,PCI_DMA_FROMDEVICE
);
821 np
->rx_ring
[i
].buffer1
= np
->rx_addr
[i
];
822 np
->rx_ring
[i
].status
= DescOwned
;
826 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
828 /* Initialize the Tx descriptors */
829 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
830 np
->tx_skbuff
[i
] = NULL
;
831 np
->tx_ring
[i
].status
= 0;
834 np
->tx_q_bytes
= np
->dirty_tx
= np
->cur_tx
= 0;
836 iowrite32(np
->ring_dma_addr
, np
->base_addr
+ RxRingPtr
);
837 iowrite32(np
->ring_dma_addr
+sizeof(struct w840_rx_desc
)*RX_RING_SIZE
,
838 np
->base_addr
+ TxRingPtr
);
842 static void free_rxtx_rings(struct netdev_private
* np
)
845 /* Free all the skbuffs in the Rx queue. */
846 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
847 np
->rx_ring
[i
].status
= 0;
848 if (np
->rx_skbuff
[i
]) {
849 pci_unmap_single(np
->pci_dev
,
851 np
->rx_skbuff
[i
]->len
,
853 dev_kfree_skb(np
->rx_skbuff
[i
]);
855 np
->rx_skbuff
[i
] = NULL
;
857 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
858 if (np
->tx_skbuff
[i
]) {
859 pci_unmap_single(np
->pci_dev
,
861 np
->tx_skbuff
[i
]->len
,
863 dev_kfree_skb(np
->tx_skbuff
[i
]);
865 np
->tx_skbuff
[i
] = NULL
;
869 static void init_registers(struct net_device
*dev
)
871 struct netdev_private
*np
= netdev_priv(dev
);
872 void __iomem
*ioaddr
= np
->base_addr
;
875 for (i
= 0; i
< 6; i
++)
876 iowrite8(dev
->dev_addr
[i
], ioaddr
+ StationAddr
+ i
);
878 /* Initialize other registers. */
880 i
= (1<<20); /* Big-endian descriptors */
884 i
|= (0x04<<2); /* skip length 4 u32 */
885 i
|= 0x02; /* give Rx priority */
887 /* Configure the PCI bus bursts and FIFO thresholds.
888 486: Set 8 longword cache alignment, 8 longword burst.
889 586: Set 16 longword cache alignment, no burst limit.
890 Cache alignment bits 15:14 Burst length 13:8
891 0000 <not allowed> 0000 align to cache 0800 8 longwords
892 4000 8 longwords 0100 1 longword 1000 16 longwords
893 8000 16 longwords 0200 2 longwords 2000 32 longwords
894 C000 32 longwords 0400 4 longwords */
896 #if defined (__i386__) && !defined(MODULE)
897 /* When not a module we can work around broken '486 PCI boards. */
898 if (boot_cpu_data
.x86
<= 4) {
901 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
905 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
907 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
910 dev_warn(&dev
->dev
, "unknown CPU architecture, using default csr0 setting\n");
913 iowrite32(i
, ioaddr
+ PCIBusCfg
);
916 /* 128 byte Tx threshold;
917 Transmit on; Receive on; */
918 update_csr6(dev
, 0x00022002 | update_link(dev
) | __set_rx_mode(dev
));
920 /* Clear and Enable interrupts by setting the interrupt mask. */
921 iowrite32(0x1A0F5, ioaddr
+ IntrStatus
);
922 iowrite32(0x1A0F5, ioaddr
+ IntrEnable
);
924 iowrite32(0, ioaddr
+ RxStartDemand
);
927 static void tx_timeout(struct net_device
*dev
)
929 struct netdev_private
*np
= netdev_priv(dev
);
930 void __iomem
*ioaddr
= np
->base_addr
;
931 const int irq
= np
->pci_dev
->irq
;
933 dev_warn(&dev
->dev
, "Transmit timed out, status %08x, resetting...\n",
934 ioread32(ioaddr
+ IntrStatus
));
938 printk(KERN_DEBUG
" Rx ring %p: ", np
->rx_ring
);
939 for (i
= 0; i
< RX_RING_SIZE
; i
++)
940 printk(KERN_CONT
" %08x", (unsigned int)np
->rx_ring
[i
].status
);
941 printk(KERN_CONT
"\n");
942 printk(KERN_DEBUG
" Tx ring %p: ", np
->tx_ring
);
943 for (i
= 0; i
< TX_RING_SIZE
; i
++)
944 printk(KERN_CONT
" %08x", np
->tx_ring
[i
].status
);
945 printk(KERN_CONT
"\n");
947 printk(KERN_DEBUG
"Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
948 np
->cur_tx
, np
->dirty_tx
, np
->tx_full
, np
->tx_q_bytes
);
949 printk(KERN_DEBUG
"Tx Descriptor addr %xh\n", ioread32(ioaddr
+0x4C));
952 spin_lock_irq(&np
->lock
);
954 * Under high load dirty_tx and the internal tx descriptor pointer
955 * come out of sync, thus perform a software reset and reinitialize
959 iowrite32(1, np
->base_addr
+PCIBusCfg
);
963 init_rxtx_rings(dev
);
965 spin_unlock_irq(&np
->lock
);
968 netif_wake_queue(dev
);
969 netif_trans_update(dev
); /* prevent tx timeout */
970 np
->stats
.tx_errors
++;
973 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
974 static int alloc_ringdesc(struct net_device
*dev
)
976 struct netdev_private
*np
= netdev_priv(dev
);
978 np
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
980 np
->rx_ring
= pci_alloc_consistent(np
->pci_dev
,
981 sizeof(struct w840_rx_desc
)*RX_RING_SIZE
+
982 sizeof(struct w840_tx_desc
)*TX_RING_SIZE
,
986 init_rxtx_rings(dev
);
990 static void free_ringdesc(struct netdev_private
*np
)
992 pci_free_consistent(np
->pci_dev
,
993 sizeof(struct w840_rx_desc
)*RX_RING_SIZE
+
994 sizeof(struct w840_tx_desc
)*TX_RING_SIZE
,
995 np
->rx_ring
, np
->ring_dma_addr
);
999 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1001 struct netdev_private
*np
= netdev_priv(dev
);
1004 /* Caution: the write order is important here, set the field
1005 with the "ownership" bits last. */
1007 /* Calculate the next Tx descriptor entry. */
1008 entry
= np
->cur_tx
% TX_RING_SIZE
;
1010 np
->tx_addr
[entry
] = pci_map_single(np
->pci_dev
,
1011 skb
->data
,skb
->len
, PCI_DMA_TODEVICE
);
1012 np
->tx_skbuff
[entry
] = skb
;
1014 np
->tx_ring
[entry
].buffer1
= np
->tx_addr
[entry
];
1015 if (skb
->len
< TX_BUFLIMIT
) {
1016 np
->tx_ring
[entry
].length
= DescWholePkt
| skb
->len
;
1018 int len
= skb
->len
- TX_BUFLIMIT
;
1020 np
->tx_ring
[entry
].buffer2
= np
->tx_addr
[entry
]+TX_BUFLIMIT
;
1021 np
->tx_ring
[entry
].length
= DescWholePkt
| (len
<< 11) | TX_BUFLIMIT
;
1023 if(entry
== TX_RING_SIZE
-1)
1024 np
->tx_ring
[entry
].length
|= DescEndRing
;
1026 /* Now acquire the irq spinlock.
1027 * The difficult race is the ordering between
1028 * increasing np->cur_tx and setting DescOwned:
1029 * - if np->cur_tx is increased first the interrupt
1030 * handler could consider the packet as transmitted
1031 * since DescOwned is cleared.
1032 * - If DescOwned is set first the NIC could report the
1033 * packet as sent, but the interrupt handler would ignore it
1034 * since the np->cur_tx was not yet increased.
1036 spin_lock_irq(&np
->lock
);
1039 wmb(); /* flush length, buffer1, buffer2 */
1040 np
->tx_ring
[entry
].status
= DescOwned
;
1041 wmb(); /* flush status and kick the hardware */
1042 iowrite32(0, np
->base_addr
+ TxStartDemand
);
1043 np
->tx_q_bytes
+= skb
->len
;
1044 /* Work around horrible bug in the chip by marking the queue as full
1045 when we do not have FIFO room for a maximum sized packet. */
1046 if (np
->cur_tx
- np
->dirty_tx
> TX_QUEUE_LEN
||
1047 ((np
->drv_flags
& HasBrokenTx
) && np
->tx_q_bytes
> TX_BUG_FIFO_LIMIT
)) {
1048 netif_stop_queue(dev
);
1052 spin_unlock_irq(&np
->lock
);
1055 netdev_dbg(dev
, "Transmit frame #%d queued in slot %d\n",
1058 return NETDEV_TX_OK
;
1061 static void netdev_tx_done(struct net_device
*dev
)
1063 struct netdev_private
*np
= netdev_priv(dev
);
1064 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1065 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1066 int tx_status
= np
->tx_ring
[entry
].status
;
1070 if (tx_status
& 0x8000) { /* There was an error, log it. */
1071 #ifndef final_version
1073 netdev_dbg(dev
, "Transmit error, Tx status %08x\n",
1076 np
->stats
.tx_errors
++;
1077 if (tx_status
& 0x0104) np
->stats
.tx_aborted_errors
++;
1078 if (tx_status
& 0x0C80) np
->stats
.tx_carrier_errors
++;
1079 if (tx_status
& 0x0200) np
->stats
.tx_window_errors
++;
1080 if (tx_status
& 0x0002) np
->stats
.tx_fifo_errors
++;
1081 if ((tx_status
& 0x0080) && np
->mii_if
.full_duplex
== 0)
1082 np
->stats
.tx_heartbeat_errors
++;
1084 #ifndef final_version
1086 netdev_dbg(dev
, "Transmit slot %d ok, Tx status %08x\n",
1089 np
->stats
.tx_bytes
+= np
->tx_skbuff
[entry
]->len
;
1090 np
->stats
.collisions
+= (tx_status
>> 3) & 15;
1091 np
->stats
.tx_packets
++;
1093 /* Free the original skb. */
1094 pci_unmap_single(np
->pci_dev
,np
->tx_addr
[entry
],
1095 np
->tx_skbuff
[entry
]->len
,
1097 np
->tx_q_bytes
-= np
->tx_skbuff
[entry
]->len
;
1098 dev_kfree_skb_irq(np
->tx_skbuff
[entry
]);
1099 np
->tx_skbuff
[entry
] = NULL
;
1102 np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN_RESTART
&&
1103 np
->tx_q_bytes
< TX_BUG_FIFO_LIMIT
) {
1104 /* The ring is no longer full, clear tbusy. */
1107 netif_wake_queue(dev
);
1111 /* The interrupt handler does all of the Rx thread work and cleans up
1112 after the Tx thread. */
1113 static irqreturn_t
intr_handler(int irq
, void *dev_instance
)
1115 struct net_device
*dev
= (struct net_device
*)dev_instance
;
1116 struct netdev_private
*np
= netdev_priv(dev
);
1117 void __iomem
*ioaddr
= np
->base_addr
;
1118 int work_limit
= max_interrupt_work
;
1121 if (!netif_device_present(dev
))
1124 u32 intr_status
= ioread32(ioaddr
+ IntrStatus
);
1126 /* Acknowledge all of the current interrupt sources ASAP. */
1127 iowrite32(intr_status
& 0x001ffff, ioaddr
+ IntrStatus
);
1130 netdev_dbg(dev
, "Interrupt, status %04x\n", intr_status
);
1132 if ((intr_status
& (NormalIntr
|AbnormalIntr
)) == 0)
1137 if (intr_status
& (RxIntr
| RxNoBuf
))
1139 if (intr_status
& RxNoBuf
)
1140 iowrite32(0, ioaddr
+ RxStartDemand
);
1142 if (intr_status
& (TxNoBuf
| TxIntr
) &&
1143 np
->cur_tx
!= np
->dirty_tx
) {
1144 spin_lock(&np
->lock
);
1145 netdev_tx_done(dev
);
1146 spin_unlock(&np
->lock
);
1149 /* Abnormal error summary/uncommon events handlers. */
1150 if (intr_status
& (AbnormalIntr
| TxFIFOUnderflow
| SystemError
|
1152 netdev_error(dev
, intr_status
);
1154 if (--work_limit
< 0) {
1156 "Too much work at interrupt, status=0x%04x\n",
1158 /* Set the timer to re-enable the other interrupts after
1160 spin_lock(&np
->lock
);
1161 if (netif_device_present(dev
)) {
1162 iowrite32(AbnormalIntr
| TimerInt
, ioaddr
+ IntrEnable
);
1163 iowrite32(10, ioaddr
+ GPTimer
);
1165 spin_unlock(&np
->lock
);
1171 netdev_dbg(dev
, "exiting interrupt, status=%#4.4x\n",
1172 ioread32(ioaddr
+ IntrStatus
));
1173 return IRQ_RETVAL(handled
);
1176 /* This routine is logically part of the interrupt handler, but separated
1177 for clarity and better register allocation. */
1178 static int netdev_rx(struct net_device
*dev
)
1180 struct netdev_private
*np
= netdev_priv(dev
);
1181 int entry
= np
->cur_rx
% RX_RING_SIZE
;
1182 int work_limit
= np
->dirty_rx
+ RX_RING_SIZE
- np
->cur_rx
;
1185 netdev_dbg(dev
, " In netdev_rx(), entry %d status %04x\n",
1186 entry
, np
->rx_ring
[entry
].status
);
1189 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1190 while (--work_limit
>= 0) {
1191 struct w840_rx_desc
*desc
= np
->rx_head_desc
;
1192 s32 status
= desc
->status
;
1195 netdev_dbg(dev
, " netdev_rx() status was %08x\n",
1199 if ((status
& 0x38008300) != 0x0300) {
1200 if ((status
& 0x38000300) != 0x0300) {
1201 /* Ingore earlier buffers. */
1202 if ((status
& 0xffff) != 0x7fff) {
1204 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1205 np
->cur_rx
, status
);
1206 np
->stats
.rx_length_errors
++;
1208 } else if (status
& 0x8000) {
1209 /* There was a fatal error. */
1211 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
1213 np
->stats
.rx_errors
++; /* end of a packet.*/
1214 if (status
& 0x0890) np
->stats
.rx_length_errors
++;
1215 if (status
& 0x004C) np
->stats
.rx_frame_errors
++;
1216 if (status
& 0x0002) np
->stats
.rx_crc_errors
++;
1219 struct sk_buff
*skb
;
1220 /* Omit the four octet CRC from the length. */
1221 int pkt_len
= ((status
>> 16) & 0x7ff) - 4;
1223 #ifndef final_version
1225 netdev_dbg(dev
, " netdev_rx() normal Rx pkt length %d status %x\n",
1228 /* Check if the packet is long enough to accept without copying
1229 to a minimally-sized skbuff. */
1230 if (pkt_len
< rx_copybreak
&&
1231 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
1232 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1233 pci_dma_sync_single_for_cpu(np
->pci_dev
,np
->rx_addr
[entry
],
1234 np
->rx_skbuff
[entry
]->len
,
1235 PCI_DMA_FROMDEVICE
);
1236 skb_copy_to_linear_data(skb
, np
->rx_skbuff
[entry
]->data
, pkt_len
);
1237 skb_put(skb
, pkt_len
);
1238 pci_dma_sync_single_for_device(np
->pci_dev
,np
->rx_addr
[entry
],
1239 np
->rx_skbuff
[entry
]->len
,
1240 PCI_DMA_FROMDEVICE
);
1242 pci_unmap_single(np
->pci_dev
,np
->rx_addr
[entry
],
1243 np
->rx_skbuff
[entry
]->len
,
1244 PCI_DMA_FROMDEVICE
);
1245 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1246 np
->rx_skbuff
[entry
] = NULL
;
1248 #ifndef final_version /* Remove after testing. */
1249 /* You will want this info for the initial debug. */
1251 netdev_dbg(dev
, " Rx data %pM %pM %02x%02x %pI4\n",
1252 &skb
->data
[0], &skb
->data
[6],
1253 skb
->data
[12], skb
->data
[13],
1256 skb
->protocol
= eth_type_trans(skb
, dev
);
1258 np
->stats
.rx_packets
++;
1259 np
->stats
.rx_bytes
+= pkt_len
;
1261 entry
= (++np
->cur_rx
) % RX_RING_SIZE
;
1262 np
->rx_head_desc
= &np
->rx_ring
[entry
];
1265 /* Refill the Rx ring buffers. */
1266 for (; np
->cur_rx
- np
->dirty_rx
> 0; np
->dirty_rx
++) {
1267 struct sk_buff
*skb
;
1268 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1269 if (np
->rx_skbuff
[entry
] == NULL
) {
1270 skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
);
1271 np
->rx_skbuff
[entry
] = skb
;
1273 break; /* Better luck next round. */
1274 np
->rx_addr
[entry
] = pci_map_single(np
->pci_dev
,
1276 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1277 np
->rx_ring
[entry
].buffer1
= np
->rx_addr
[entry
];
1280 np
->rx_ring
[entry
].status
= DescOwned
;
1286 static void netdev_error(struct net_device
*dev
, int intr_status
)
1288 struct netdev_private
*np
= netdev_priv(dev
);
1289 void __iomem
*ioaddr
= np
->base_addr
;
1292 netdev_dbg(dev
, "Abnormal event, %08x\n", intr_status
);
1293 if (intr_status
== 0xffffffff)
1295 spin_lock(&np
->lock
);
1296 if (intr_status
& TxFIFOUnderflow
) {
1298 /* Bump up the Tx threshold */
1300 /* This causes lots of dropped packets,
1301 * and under high load even tx_timeouts
1303 new = np
->csr6
+ 0x4000;
1305 new = (np
->csr6
>> 14)&0x7f;
1309 new = 127; /* load full packet before starting */
1310 new = (np
->csr6
& ~(0x7F << 14)) | (new<<14);
1312 netdev_dbg(dev
, "Tx underflow, new csr6 %08x\n", new);
1313 update_csr6(dev
, new);
1315 if (intr_status
& RxDied
) { /* Missed a Rx frame. */
1316 np
->stats
.rx_errors
++;
1318 if (intr_status
& TimerInt
) {
1319 /* Re-enable other interrupts. */
1320 if (netif_device_present(dev
))
1321 iowrite32(0x1A0F5, ioaddr
+ IntrEnable
);
1323 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1324 iowrite32(0, ioaddr
+ RxStartDemand
);
1325 spin_unlock(&np
->lock
);
1328 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1330 struct netdev_private
*np
= netdev_priv(dev
);
1331 void __iomem
*ioaddr
= np
->base_addr
;
1333 /* The chip only need report frame silently dropped. */
1334 spin_lock_irq(&np
->lock
);
1335 if (netif_running(dev
) && netif_device_present(dev
))
1336 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1337 spin_unlock_irq(&np
->lock
);
1343 static u32
__set_rx_mode(struct net_device
*dev
)
1345 struct netdev_private
*np
= netdev_priv(dev
);
1346 void __iomem
*ioaddr
= np
->base_addr
;
1347 u32 mc_filter
[2]; /* Multicast hash filter */
1350 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1351 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1352 rx_mode
= RxAcceptBroadcast
| AcceptMulticast
| RxAcceptAllPhys
1354 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
1355 (dev
->flags
& IFF_ALLMULTI
)) {
1356 /* Too many to match, or accept all multicasts. */
1357 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1358 rx_mode
= RxAcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1360 struct netdev_hw_addr
*ha
;
1362 memset(mc_filter
, 0, sizeof(mc_filter
));
1363 netdev_for_each_mc_addr(ha
, dev
) {
1366 filbit
= (ether_crc(ETH_ALEN
, ha
->addr
) >> 26) ^ 0x3F;
1368 mc_filter
[filbit
>> 5] |= 1 << (filbit
& 31);
1370 rx_mode
= RxAcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1372 iowrite32(mc_filter
[0], ioaddr
+ MulticastFilter0
);
1373 iowrite32(mc_filter
[1], ioaddr
+ MulticastFilter1
);
1377 static void set_rx_mode(struct net_device
*dev
)
1379 struct netdev_private
*np
= netdev_priv(dev
);
1380 u32 rx_mode
= __set_rx_mode(dev
);
1381 spin_lock_irq(&np
->lock
);
1382 update_csr6(dev
, (np
->csr6
& ~0x00F8) | rx_mode
);
1383 spin_unlock_irq(&np
->lock
);
1386 static void netdev_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1388 struct netdev_private
*np
= netdev_priv(dev
);
1390 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1391 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1392 strlcpy(info
->bus_info
, pci_name(np
->pci_dev
), sizeof(info
->bus_info
));
1395 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1397 struct netdev_private
*np
= netdev_priv(dev
);
1400 spin_lock_irq(&np
->lock
);
1401 rc
= mii_ethtool_gset(&np
->mii_if
, cmd
);
1402 spin_unlock_irq(&np
->lock
);
1407 static int netdev_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1409 struct netdev_private
*np
= netdev_priv(dev
);
1412 spin_lock_irq(&np
->lock
);
1413 rc
= mii_ethtool_sset(&np
->mii_if
, cmd
);
1414 spin_unlock_irq(&np
->lock
);
1419 static int netdev_nway_reset(struct net_device
*dev
)
1421 struct netdev_private
*np
= netdev_priv(dev
);
1422 return mii_nway_restart(&np
->mii_if
);
1425 static u32
netdev_get_link(struct net_device
*dev
)
1427 struct netdev_private
*np
= netdev_priv(dev
);
1428 return mii_link_ok(&np
->mii_if
);
1431 static u32
netdev_get_msglevel(struct net_device
*dev
)
1436 static void netdev_set_msglevel(struct net_device
*dev
, u32 value
)
1441 static const struct ethtool_ops netdev_ethtool_ops
= {
1442 .get_drvinfo
= netdev_get_drvinfo
,
1443 .get_settings
= netdev_get_settings
,
1444 .set_settings
= netdev_set_settings
,
1445 .nway_reset
= netdev_nway_reset
,
1446 .get_link
= netdev_get_link
,
1447 .get_msglevel
= netdev_get_msglevel
,
1448 .set_msglevel
= netdev_set_msglevel
,
1451 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1453 struct mii_ioctl_data
*data
= if_mii(rq
);
1454 struct netdev_private
*np
= netdev_priv(dev
);
1457 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
1458 data
->phy_id
= ((struct netdev_private
*)netdev_priv(dev
))->phys
[0] & 0x1f;
1461 case SIOCGMIIREG
: /* Read MII PHY register. */
1462 spin_lock_irq(&np
->lock
);
1463 data
->val_out
= mdio_read(dev
, data
->phy_id
& 0x1f, data
->reg_num
& 0x1f);
1464 spin_unlock_irq(&np
->lock
);
1467 case SIOCSMIIREG
: /* Write MII PHY register. */
1468 spin_lock_irq(&np
->lock
);
1469 mdio_write(dev
, data
->phy_id
& 0x1f, data
->reg_num
& 0x1f, data
->val_in
);
1470 spin_unlock_irq(&np
->lock
);
1477 static int netdev_close(struct net_device
*dev
)
1479 struct netdev_private
*np
= netdev_priv(dev
);
1480 void __iomem
*ioaddr
= np
->base_addr
;
1482 netif_stop_queue(dev
);
1485 netdev_dbg(dev
, "Shutting down ethercard, status was %08x Config %08x\n",
1486 ioread32(ioaddr
+ IntrStatus
),
1487 ioread32(ioaddr
+ NetworkConfig
));
1488 netdev_dbg(dev
, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1489 np
->cur_tx
, np
->dirty_tx
,
1490 np
->cur_rx
, np
->dirty_rx
);
1493 /* Stop the chip's Tx and Rx processes. */
1494 spin_lock_irq(&np
->lock
);
1495 netif_device_detach(dev
);
1496 update_csr6(dev
, 0);
1497 iowrite32(0x0000, ioaddr
+ IntrEnable
);
1498 spin_unlock_irq(&np
->lock
);
1500 free_irq(np
->pci_dev
->irq
, dev
);
1502 netif_device_attach(dev
);
1504 if (ioread32(ioaddr
+ NetworkConfig
) != 0xffffffff)
1505 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1511 printk(KERN_DEBUG
" Tx ring at %p:\n", np
->tx_ring
);
1512 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1513 printk(KERN_DEBUG
" #%d desc. %04x %04x %08x\n",
1514 i
, np
->tx_ring
[i
].length
,
1515 np
->tx_ring
[i
].status
, np
->tx_ring
[i
].buffer1
);
1516 printk(KERN_DEBUG
" Rx ring %p:\n", np
->rx_ring
);
1517 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1518 printk(KERN_DEBUG
" #%d desc. %04x %04x %08x\n",
1519 i
, np
->rx_ring
[i
].length
,
1520 np
->rx_ring
[i
].status
, np
->rx_ring
[i
].buffer1
);
1523 #endif /* __i386__ debugging only */
1525 del_timer_sync(&np
->timer
);
1527 free_rxtx_rings(np
);
1533 static void w840_remove1(struct pci_dev
*pdev
)
1535 struct net_device
*dev
= pci_get_drvdata(pdev
);
1538 struct netdev_private
*np
= netdev_priv(dev
);
1539 unregister_netdev(dev
);
1540 pci_release_regions(pdev
);
1541 pci_iounmap(pdev
, np
->base_addr
);
1549 * suspend/resume synchronization:
1550 * - open, close, do_ioctl:
1551 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1553 * spin_lock_irq(np->lock), doesn't touch hw if not present
1555 * synchronize_irq + netif_tx_disable;
1557 * netif_device_detach + netif_tx_disable;
1558 * - set_multicast_list
1559 * netif_device_detach + netif_tx_disable;
1560 * - interrupt handler
1561 * doesn't touch hw if not present, synchronize_irq waits for
1562 * running instances of the interrupt handler.
1564 * Disabling hw requires clearing csr6 & IntrEnable.
1565 * update_csr6 & all function that write IntrEnable check netif_device_present
1566 * before settings any bits.
1568 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1569 * device would cause an irq storm.
1571 static int w840_suspend (struct pci_dev
*pdev
, pm_message_t state
)
1573 struct net_device
*dev
= pci_get_drvdata (pdev
);
1574 struct netdev_private
*np
= netdev_priv(dev
);
1575 void __iomem
*ioaddr
= np
->base_addr
;
1578 if (netif_running (dev
)) {
1579 del_timer_sync(&np
->timer
);
1581 spin_lock_irq(&np
->lock
);
1582 netif_device_detach(dev
);
1583 update_csr6(dev
, 0);
1584 iowrite32(0, ioaddr
+ IntrEnable
);
1585 spin_unlock_irq(&np
->lock
);
1587 synchronize_irq(np
->pci_dev
->irq
);
1588 netif_tx_disable(dev
);
1590 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1592 /* no more hardware accesses behind this line. */
1594 BUG_ON(np
->csr6
|| ioread32(ioaddr
+ IntrEnable
));
1596 /* pci_power_off(pdev, -1); */
1598 free_rxtx_rings(np
);
1600 netif_device_detach(dev
);
1606 static int w840_resume (struct pci_dev
*pdev
)
1608 struct net_device
*dev
= pci_get_drvdata (pdev
);
1609 struct netdev_private
*np
= netdev_priv(dev
);
1613 if (netif_device_present(dev
))
1614 goto out
; /* device not suspended */
1615 if (netif_running(dev
)) {
1616 if ((retval
= pci_enable_device(pdev
))) {
1618 "pci_enable_device failed in resume\n");
1621 spin_lock_irq(&np
->lock
);
1622 iowrite32(1, np
->base_addr
+PCIBusCfg
);
1623 ioread32(np
->base_addr
+PCIBusCfg
);
1625 netif_device_attach(dev
);
1626 init_rxtx_rings(dev
);
1627 init_registers(dev
);
1628 spin_unlock_irq(&np
->lock
);
1630 netif_wake_queue(dev
);
1632 mod_timer(&np
->timer
, jiffies
+ 1*HZ
);
1634 netif_device_attach(dev
);
1642 static struct pci_driver w840_driver
= {
1644 .id_table
= w840_pci_tbl
,
1645 .probe
= w840_probe1
,
1646 .remove
= w840_remove1
,
1648 .suspend
= w840_suspend
,
1649 .resume
= w840_resume
,
1653 static int __init
w840_init(void)
1656 return pci_register_driver(&w840_driver
);
1659 static void __exit
w840_exit(void)
1661 pci_unregister_driver(&w840_driver
);
1664 module_init(w840_init
);
1665 module_exit(w840_exit
);