1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
54 /* Automatically extracted configuration info:
55 probe-func: winbond840_probe
56 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
58 c-help-name: Winbond W89c840 PCI Ethernet support
59 c-help-symbol: CONFIG_WINBOND_840
60 c-help: This driver is for the Winbond W89c840 chip. It also works with
61 c-help: the TX9882 chip on the Compex RL100-ATX board.
62 c-help: More specific information and updates are available from
63 c-help: http://www.scyld.com/network/drivers.html
66 /* The user-configurable values.
67 These may be modified when a driver module is loaded.*/
69 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70 static int max_interrupt_work
= 20;
71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72 The '840 uses a 64 element hash table based on the Ethernet CRC. */
73 static int multicast_filter_limit
= 32;
75 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76 Setting to > 1518 effectively disables this feature. */
77 static int rx_copybreak
;
79 /* Used to pass the media type, etc.
80 Both 'options[]' and 'full_duplex[]' should exist for driver
82 The media type is usually passed in 'options[]'.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options
[MAX_UNITS
] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex
[MAX_UNITS
] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Operational parameters that are set at compile time. */
90 /* Keep the ring sizes a power of two for compile efficiency.
91 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92 Making the Tx ring too large decreases the effectiveness of channel
93 bonding and packet priority.
94 There are no ill effects from too-large receive rings. */
95 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96 #define TX_QUEUE_LEN_RESTART 5
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 /* Include files, designed to support most kernel versions 2.0.0 and later. */
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/interrupt.h>
120 #include <linux/pci.h>
121 #include <linux/dma-mapping.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/init.h>
126 #include <linux/delay.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
130 #include <linux/crc32.h>
131 #include <linux/bitops.h>
132 #include <asm/uaccess.h>
133 #include <asm/processor.h> /* Processor type for cache alignment. */
139 #undef PKT_BUF_SZ /* tulip.h also defines this */
140 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
142 /* These identify the driver base version and may not be removed. */
143 static const char version
[] __initconst
=
144 "v" DRV_VERSION
" (2.4 port) "
145 DRV_RELDATE
" Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION
);
153 module_param(max_interrupt_work
, int, 0);
154 module_param(debug
, int, 0);
155 module_param(rx_copybreak
, int, 0);
156 module_param(multicast_filter_limit
, int, 0);
157 module_param_array(options
, int, NULL
, 0);
158 module_param_array(full_duplex
, int, NULL
, 0);
159 MODULE_PARM_DESC(max_interrupt_work
, "winbond-840 maximum events handled per interrupt");
160 MODULE_PARM_DESC(debug
, "winbond-840 debug level (0-6)");
161 MODULE_PARM_DESC(rx_copybreak
, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit
, "winbond-840 maximum number of filtered multicast addresses");
163 MODULE_PARM_DESC(options
, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164 MODULE_PARM_DESC(full_duplex
, "winbond-840 full duplex setting(s) (1)");
169 I. Board Compatibility
171 This driver is for the Winbond w89c840 chip.
173 II. Board-specific settings
177 III. Driver operation
179 This chip is very similar to the Digital 21*4* "Tulip" family. The first
180 twelve registers and the descriptor format are nearly identical. Read a
181 Tulip manual for operational details.
183 A significant difference is that the multicast filter and station address are
184 stored in registers rather than loaded through a pseudo-transmit packet.
186 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187 full-sized packet we must use both data buffers in a descriptor. Thus the
188 driver uses ring mode where descriptors are implicitly sequential in memory,
189 rather than using the second descriptor address as a chain pointer to
190 subsequent descriptors.
194 If you are going to almost clone a Tulip, why not go all the way and avoid
195 the need for a new driver?
199 http://www.scyld.com/expert/100mbps.html
200 http://www.scyld.com/expert/NWay.html
201 http://www.winbond.com.tw/
205 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207 silent data corruption.
209 Test with 'ping -s 10000' on a fast computer.
218 enum chip_capability_flags
{
219 CanHaveMII
=1, HasBrokenTx
=2, AlwaysFDX
=4, FDXOnNoMII
=8,
222 static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl
) = {
223 { 0x1050, 0x0840, PCI_ANY_ID
, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 2 },
228 MODULE_DEVICE_TABLE(pci
, w840_pci_tbl
);
231 netdev_res_size
= 128, /* size of PCI BAR resource */
236 int drv_flags
; /* Driver use, intended as capability flags. */
239 static const struct pci_id_info pci_id_tbl
[] __devinitdata
= {
240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII
| HasBrokenTx
| FDXOnNoMII
},
242 { "Winbond W89c840", CanHaveMII
| HasBrokenTx
},
243 { "Compex RL100-ATX", CanHaveMII
| HasBrokenTx
},
244 { } /* terminate list. */
247 /* This driver was written to use PCI memory space, however some x86 systems
248 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
251 /* Offsets to the Command and Status Registers, "CSRs".
252 While similar to the Tulip, these registers are longword aligned.
253 Note: It's not useful to define symbolic names for every register bit in
254 the device. The name can only partially document the semantics and make
255 the driver longer and more difficult to read.
258 PCIBusCfg
=0x00, TxStartDemand
=0x04, RxStartDemand
=0x08,
259 RxRingPtr
=0x0C, TxRingPtr
=0x10,
260 IntrStatus
=0x14, NetworkConfig
=0x18, IntrEnable
=0x1C,
261 RxMissed
=0x20, EECtrl
=0x24, MIICtrl
=0x24, BootRom
=0x28, GPTimer
=0x2C,
262 CurRxDescAddr
=0x30, CurRxBufAddr
=0x34, /* Debug use */
263 MulticastFilter0
=0x38, MulticastFilter1
=0x3C, StationAddr
=0x40,
264 CurTxDescAddr
=0x4C, CurTxBufAddr
=0x50,
267 /* Bits in the NetworkConfig register. */
270 RxAcceptBroadcast
=0x20, AcceptMulticast
=0x10,
271 RxAcceptAllPhys
=0x08, AcceptMyPhys
=0x02,
275 MDIO_ShiftClk
=0x10000, MDIO_DataIn
=0x80000, MDIO_DataOut
=0x20000,
276 MDIO_EnbOutput
=0x40000, MDIO_EnbIn
= 0x00000,
279 /* The Tulip Rx and Tx buffer descriptors. */
280 struct w840_rx_desc
{
287 struct w840_tx_desc
{
290 u32 buffer1
, buffer2
;
293 #define MII_CNT 1 /* winbond only supports one MII */
294 struct netdev_private
{
295 struct w840_rx_desc
*rx_ring
;
296 dma_addr_t rx_addr
[RX_RING_SIZE
];
297 struct w840_tx_desc
*tx_ring
;
298 dma_addr_t tx_addr
[TX_RING_SIZE
];
299 dma_addr_t ring_dma_addr
;
300 /* The addresses of receive-in-place skbuffs. */
301 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
302 /* The saved address of a sent-in-place packet/buffer, for later free(). */
303 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
304 struct net_device_stats stats
;
305 struct timer_list timer
; /* Media monitoring timer. */
306 /* Frequently used values: keep some adjacent for cache effect. */
308 int chip_id
, drv_flags
;
309 struct pci_dev
*pci_dev
;
311 struct w840_rx_desc
*rx_head_desc
;
312 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
313 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
314 unsigned int cur_tx
, dirty_tx
;
315 unsigned int tx_q_bytes
;
316 unsigned int tx_full
; /* The Tx queue is full. */
317 /* MII transceiver section. */
318 int mii_cnt
; /* MII device addresses. */
319 unsigned char phys
[MII_CNT
]; /* MII device addresses, but only the first is used */
321 struct mii_if_info mii_if
;
322 void __iomem
*base_addr
;
325 static int eeprom_read(void __iomem
*ioaddr
, int location
);
326 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
327 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
328 static int netdev_open(struct net_device
*dev
);
329 static int update_link(struct net_device
*dev
);
330 static void netdev_timer(unsigned long data
);
331 static void init_rxtx_rings(struct net_device
*dev
);
332 static void free_rxtx_rings(struct netdev_private
*np
);
333 static void init_registers(struct net_device
*dev
);
334 static void tx_timeout(struct net_device
*dev
);
335 static int alloc_ringdesc(struct net_device
*dev
);
336 static void free_ringdesc(struct netdev_private
*np
);
337 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
338 static irqreturn_t
intr_handler(int irq
, void *dev_instance
);
339 static void netdev_error(struct net_device
*dev
, int intr_status
);
340 static int netdev_rx(struct net_device
*dev
);
341 static u32
__set_rx_mode(struct net_device
*dev
);
342 static void set_rx_mode(struct net_device
*dev
);
343 static struct net_device_stats
*get_stats(struct net_device
*dev
);
344 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
345 static const struct ethtool_ops netdev_ethtool_ops
;
346 static int netdev_close(struct net_device
*dev
);
348 static const struct net_device_ops netdev_ops
= {
349 .ndo_open
= netdev_open
,
350 .ndo_stop
= netdev_close
,
351 .ndo_start_xmit
= start_tx
,
352 .ndo_get_stats
= get_stats
,
353 .ndo_set_rx_mode
= set_rx_mode
,
354 .ndo_do_ioctl
= netdev_ioctl
,
355 .ndo_tx_timeout
= tx_timeout
,
356 .ndo_change_mtu
= eth_change_mtu
,
357 .ndo_set_mac_address
= eth_mac_addr
,
358 .ndo_validate_addr
= eth_validate_addr
,
361 static int __devinit
w840_probe1 (struct pci_dev
*pdev
,
362 const struct pci_device_id
*ent
)
364 struct net_device
*dev
;
365 struct netdev_private
*np
;
367 int chip_idx
= ent
->driver_data
;
369 int i
, option
= find_cnt
< MAX_UNITS
? options
[find_cnt
] : 0;
370 void __iomem
*ioaddr
;
372 i
= pci_enable_device(pdev
);
375 pci_set_master(pdev
);
379 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
380 pr_warn("Device %s disabled due to DMA limitations\n",
384 dev
= alloc_etherdev(sizeof(*np
));
387 SET_NETDEV_DEV(dev
, &pdev
->dev
);
389 if (pci_request_regions(pdev
, DRV_NAME
))
392 ioaddr
= pci_iomap(pdev
, TULIP_BAR
, netdev_res_size
);
394 goto err_out_free_res
;
396 for (i
= 0; i
< 3; i
++)
397 ((__le16
*)dev
->dev_addr
)[i
] = cpu_to_le16(eeprom_read(ioaddr
, i
));
399 /* Reset the chip to erase previous misconfiguration.
400 No hold time required! */
401 iowrite32(0x00000001, ioaddr
+ PCIBusCfg
);
403 dev
->base_addr
= (unsigned long)ioaddr
;
406 np
= netdev_priv(dev
);
408 np
->chip_id
= chip_idx
;
409 np
->drv_flags
= pci_id_tbl
[chip_idx
].drv_flags
;
410 spin_lock_init(&np
->lock
);
411 np
->mii_if
.dev
= dev
;
412 np
->mii_if
.mdio_read
= mdio_read
;
413 np
->mii_if
.mdio_write
= mdio_write
;
414 np
->base_addr
= ioaddr
;
416 pci_set_drvdata(pdev
, dev
);
419 option
= dev
->mem_start
;
421 /* The lower four bits are the media type. */
424 np
->mii_if
.full_duplex
= 1;
427 "ignoring user supplied media type %d",
430 if (find_cnt
< MAX_UNITS
&& full_duplex
[find_cnt
] > 0)
431 np
->mii_if
.full_duplex
= 1;
433 if (np
->mii_if
.full_duplex
)
434 np
->mii_if
.force_media
= 1;
436 /* The chip-specific entries in the device structure. */
437 dev
->netdev_ops
= &netdev_ops
;
438 dev
->ethtool_ops
= &netdev_ethtool_ops
;
439 dev
->watchdog_timeo
= TX_TIMEOUT
;
441 i
= register_netdev(dev
);
443 goto err_out_cleardev
;
445 dev_info(&dev
->dev
, "%s at %p, %pM, IRQ %d\n",
446 pci_id_tbl
[chip_idx
].name
, ioaddr
, dev
->dev_addr
, irq
);
448 if (np
->drv_flags
& CanHaveMII
) {
449 int phy
, phy_idx
= 0;
450 for (phy
= 1; phy
< 32 && phy_idx
< MII_CNT
; phy
++) {
451 int mii_status
= mdio_read(dev
, phy
, MII_BMSR
);
452 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
453 np
->phys
[phy_idx
++] = phy
;
454 np
->mii_if
.advertising
= mdio_read(dev
, phy
, MII_ADVERTISE
);
455 np
->mii
= (mdio_read(dev
, phy
, MII_PHYSID1
) << 16)+
456 mdio_read(dev
, phy
, MII_PHYSID2
);
458 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
459 np
->mii
, phy
, mii_status
,
460 np
->mii_if
.advertising
);
463 np
->mii_cnt
= phy_idx
;
464 np
->mii_if
.phy_id
= np
->phys
[0];
467 "MII PHY not found -- this device may not operate correctly\n");
475 pci_set_drvdata(pdev
, NULL
);
476 pci_iounmap(pdev
, ioaddr
);
478 pci_release_regions(pdev
);
485 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
486 often serial bit streams generated by the host processor.
487 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
489 /* Delay between EEPROM clock transitions.
490 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
491 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
492 made udelay() unreliable.
493 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
496 #define eeprom_delay(ee_addr) ioread32(ee_addr)
498 enum EEPROM_Ctrl_Bits
{
499 EE_ShiftClk
=0x02, EE_Write0
=0x801, EE_Write1
=0x805,
500 EE_ChipSelect
=0x801, EE_DataIn
=0x08,
503 /* The EEPROM commands include the alway-set leading bit. */
505 EE_WriteCmd
=(5 << 6), EE_ReadCmd
=(6 << 6), EE_EraseCmd
=(7 << 6),
508 static int eeprom_read(void __iomem
*addr
, int location
)
512 void __iomem
*ee_addr
= addr
+ EECtrl
;
513 int read_cmd
= location
| EE_ReadCmd
;
514 iowrite32(EE_ChipSelect
, ee_addr
);
516 /* Shift the read command bits out. */
517 for (i
= 10; i
>= 0; i
--) {
518 short dataval
= (read_cmd
& (1 << i
)) ? EE_Write1
: EE_Write0
;
519 iowrite32(dataval
, ee_addr
);
520 eeprom_delay(ee_addr
);
521 iowrite32(dataval
| EE_ShiftClk
, ee_addr
);
522 eeprom_delay(ee_addr
);
524 iowrite32(EE_ChipSelect
, ee_addr
);
525 eeprom_delay(ee_addr
);
527 for (i
= 16; i
> 0; i
--) {
528 iowrite32(EE_ChipSelect
| EE_ShiftClk
, ee_addr
);
529 eeprom_delay(ee_addr
);
530 retval
= (retval
<< 1) | ((ioread32(ee_addr
) & EE_DataIn
) ? 1 : 0);
531 iowrite32(EE_ChipSelect
, ee_addr
);
532 eeprom_delay(ee_addr
);
535 /* Terminate the EEPROM access. */
536 iowrite32(0, ee_addr
);
540 /* MII transceiver control section.
541 Read and write the MII registers using software-generated serial
542 MDIO protocol. See the MII specifications or DP83840A data sheet
545 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
546 met by back-to-back 33Mhz PCI cycles. */
547 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
549 /* Set iff a MII transceiver on any interface requires mdio preamble.
550 This only set with older transceivers, so the extra
551 code size of a per-interface flag is not worthwhile. */
552 static char mii_preamble_required
= 1;
554 #define MDIO_WRITE0 (MDIO_EnbOutput)
555 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
557 /* Generate the preamble required for initial synchronization and
558 a few older transceivers. */
559 static void mdio_sync(void __iomem
*mdio_addr
)
563 /* Establish sync by sending at least 32 logic ones. */
564 while (--bits
>= 0) {
565 iowrite32(MDIO_WRITE1
, mdio_addr
);
566 mdio_delay(mdio_addr
);
567 iowrite32(MDIO_WRITE1
| MDIO_ShiftClk
, mdio_addr
);
568 mdio_delay(mdio_addr
);
572 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
574 struct netdev_private
*np
= netdev_priv(dev
);
575 void __iomem
*mdio_addr
= np
->base_addr
+ MIICtrl
;
576 int mii_cmd
= (0xf6 << 10) | (phy_id
<< 5) | location
;
579 if (mii_preamble_required
)
580 mdio_sync(mdio_addr
);
582 /* Shift the read command bits out. */
583 for (i
= 15; i
>= 0; i
--) {
584 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
586 iowrite32(dataval
, mdio_addr
);
587 mdio_delay(mdio_addr
);
588 iowrite32(dataval
| MDIO_ShiftClk
, mdio_addr
);
589 mdio_delay(mdio_addr
);
591 /* Read the two transition, 16 data, and wire-idle bits. */
592 for (i
= 20; i
> 0; i
--) {
593 iowrite32(MDIO_EnbIn
, mdio_addr
);
594 mdio_delay(mdio_addr
);
595 retval
= (retval
<< 1) | ((ioread32(mdio_addr
) & MDIO_DataIn
) ? 1 : 0);
596 iowrite32(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
597 mdio_delay(mdio_addr
);
599 return (retval
>>1) & 0xffff;
602 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
604 struct netdev_private
*np
= netdev_priv(dev
);
605 void __iomem
*mdio_addr
= np
->base_addr
+ MIICtrl
;
606 int mii_cmd
= (0x5002 << 16) | (phy_id
<< 23) | (location
<<18) | value
;
609 if (location
== 4 && phy_id
== np
->phys
[0])
610 np
->mii_if
.advertising
= value
;
612 if (mii_preamble_required
)
613 mdio_sync(mdio_addr
);
615 /* Shift the command bits out. */
616 for (i
= 31; i
>= 0; i
--) {
617 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
619 iowrite32(dataval
, mdio_addr
);
620 mdio_delay(mdio_addr
);
621 iowrite32(dataval
| MDIO_ShiftClk
, mdio_addr
);
622 mdio_delay(mdio_addr
);
624 /* Clear out extra bits. */
625 for (i
= 2; i
> 0; i
--) {
626 iowrite32(MDIO_EnbIn
, mdio_addr
);
627 mdio_delay(mdio_addr
);
628 iowrite32(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
629 mdio_delay(mdio_addr
);
634 static int netdev_open(struct net_device
*dev
)
636 struct netdev_private
*np
= netdev_priv(dev
);
637 void __iomem
*ioaddr
= np
->base_addr
;
640 iowrite32(0x00000001, ioaddr
+ PCIBusCfg
); /* Reset */
642 netif_device_detach(dev
);
643 i
= request_irq(dev
->irq
, intr_handler
, IRQF_SHARED
, dev
->name
, dev
);
648 netdev_dbg(dev
, "w89c840_open() irq %d\n", dev
->irq
);
650 if((i
=alloc_ringdesc(dev
)))
653 spin_lock_irq(&np
->lock
);
654 netif_device_attach(dev
);
656 spin_unlock_irq(&np
->lock
);
658 netif_start_queue(dev
);
660 netdev_dbg(dev
, "Done netdev_open()\n");
662 /* Set the timer to check for link beat. */
663 init_timer(&np
->timer
);
664 np
->timer
.expires
= jiffies
+ 1*HZ
;
665 np
->timer
.data
= (unsigned long)dev
;
666 np
->timer
.function
= netdev_timer
; /* timer handler */
667 add_timer(&np
->timer
);
670 netif_device_attach(dev
);
674 #define MII_DAVICOM_DM9101 0x0181b800
676 static int update_link(struct net_device
*dev
)
678 struct netdev_private
*np
= netdev_priv(dev
);
679 int duplex
, fasteth
, result
, mii_reg
;
682 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_BMSR
);
684 if (mii_reg
== 0xffff)
686 /* reread: the link status bit is sticky */
687 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_BMSR
);
688 if (!(mii_reg
& 0x4)) {
689 if (netif_carrier_ok(dev
)) {
692 "MII #%d reports no link. Disabling watchdog\n",
694 netif_carrier_off(dev
);
698 if (!netif_carrier_ok(dev
)) {
701 "MII #%d link is back. Enabling watchdog\n",
703 netif_carrier_on(dev
);
706 if ((np
->mii
& ~0xf) == MII_DAVICOM_DM9101
) {
707 /* If the link partner doesn't support autonegotiation
708 * the MII detects it's abilities with the "parallel detection".
709 * Some MIIs update the LPA register to the result of the parallel
710 * detection, some don't.
711 * The Davicom PHY [at least 0181b800] doesn't.
712 * Instead bit 9 and 13 of the BMCR are updated to the result
713 * of the negotiation..
715 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_BMCR
);
716 duplex
= mii_reg
& BMCR_FULLDPLX
;
717 fasteth
= mii_reg
& BMCR_SPEED100
;
720 mii_reg
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
721 negotiated
= mii_reg
& np
->mii_if
.advertising
;
723 duplex
= (negotiated
& LPA_100FULL
) || ((negotiated
& 0x02C0) == LPA_10FULL
);
724 fasteth
= negotiated
& 0x380;
726 duplex
|= np
->mii_if
.force_media
;
727 /* remove fastether and fullduplex */
728 result
= np
->csr6
& ~0x20000200;
732 result
|= 0x20000000;
733 if (result
!= np
->csr6
&& debug
)
735 "Setting %dMBit-%s-duplex based on MII#%d\n",
736 fasteth
? 100 : 10, duplex
? "full" : "half",
741 #define RXTX_TIMEOUT 2000
742 static inline void update_csr6(struct net_device
*dev
, int new)
744 struct netdev_private
*np
= netdev_priv(dev
);
745 void __iomem
*ioaddr
= np
->base_addr
;
746 int limit
= RXTX_TIMEOUT
;
748 if (!netif_device_present(dev
))
752 /* stop both Tx and Rx processes */
753 iowrite32(np
->csr6
& ~0x2002, ioaddr
+ NetworkConfig
);
754 /* wait until they have really stopped */
756 int csr5
= ioread32(ioaddr
+ IntrStatus
);
759 t
= (csr5
>> 17) & 0x07;
762 t
= (csr5
>> 20) & 0x07;
770 "couldn't stop rxtx, IntrStatus %xh\n", csr5
);
776 /* and restart them with the new configuration */
777 iowrite32(np
->csr6
, ioaddr
+ NetworkConfig
);
779 np
->mii_if
.full_duplex
= 1;
782 static void netdev_timer(unsigned long data
)
784 struct net_device
*dev
= (struct net_device
*)data
;
785 struct netdev_private
*np
= netdev_priv(dev
);
786 void __iomem
*ioaddr
= np
->base_addr
;
789 netdev_dbg(dev
, "Media selection timer tick, status %08x config %08x\n",
790 ioread32(ioaddr
+ IntrStatus
),
791 ioread32(ioaddr
+ NetworkConfig
));
792 spin_lock_irq(&np
->lock
);
793 update_csr6(dev
, update_link(dev
));
794 spin_unlock_irq(&np
->lock
);
795 np
->timer
.expires
= jiffies
+ 10*HZ
;
796 add_timer(&np
->timer
);
799 static void init_rxtx_rings(struct net_device
*dev
)
801 struct netdev_private
*np
= netdev_priv(dev
);
804 np
->rx_head_desc
= &np
->rx_ring
[0];
805 np
->tx_ring
= (struct w840_tx_desc
*)&np
->rx_ring
[RX_RING_SIZE
];
807 /* Initial all Rx descriptors. */
808 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
809 np
->rx_ring
[i
].length
= np
->rx_buf_sz
;
810 np
->rx_ring
[i
].status
= 0;
811 np
->rx_skbuff
[i
] = NULL
;
813 /* Mark the last entry as wrapping the ring. */
814 np
->rx_ring
[i
-1].length
|= DescEndRing
;
816 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
817 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
818 struct sk_buff
*skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
);
819 np
->rx_skbuff
[i
] = skb
;
822 np
->rx_addr
[i
] = pci_map_single(np
->pci_dev
,skb
->data
,
823 np
->rx_buf_sz
,PCI_DMA_FROMDEVICE
);
825 np
->rx_ring
[i
].buffer1
= np
->rx_addr
[i
];
826 np
->rx_ring
[i
].status
= DescOwned
;
830 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
832 /* Initialize the Tx descriptors */
833 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
834 np
->tx_skbuff
[i
] = NULL
;
835 np
->tx_ring
[i
].status
= 0;
838 np
->tx_q_bytes
= np
->dirty_tx
= np
->cur_tx
= 0;
840 iowrite32(np
->ring_dma_addr
, np
->base_addr
+ RxRingPtr
);
841 iowrite32(np
->ring_dma_addr
+sizeof(struct w840_rx_desc
)*RX_RING_SIZE
,
842 np
->base_addr
+ TxRingPtr
);
846 static void free_rxtx_rings(struct netdev_private
* np
)
849 /* Free all the skbuffs in the Rx queue. */
850 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
851 np
->rx_ring
[i
].status
= 0;
852 if (np
->rx_skbuff
[i
]) {
853 pci_unmap_single(np
->pci_dev
,
855 np
->rx_skbuff
[i
]->len
,
857 dev_kfree_skb(np
->rx_skbuff
[i
]);
859 np
->rx_skbuff
[i
] = NULL
;
861 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
862 if (np
->tx_skbuff
[i
]) {
863 pci_unmap_single(np
->pci_dev
,
865 np
->tx_skbuff
[i
]->len
,
867 dev_kfree_skb(np
->tx_skbuff
[i
]);
869 np
->tx_skbuff
[i
] = NULL
;
873 static void init_registers(struct net_device
*dev
)
875 struct netdev_private
*np
= netdev_priv(dev
);
876 void __iomem
*ioaddr
= np
->base_addr
;
879 for (i
= 0; i
< 6; i
++)
880 iowrite8(dev
->dev_addr
[i
], ioaddr
+ StationAddr
+ i
);
882 /* Initialize other registers. */
884 i
= (1<<20); /* Big-endian descriptors */
888 i
|= (0x04<<2); /* skip length 4 u32 */
889 i
|= 0x02; /* give Rx priority */
891 /* Configure the PCI bus bursts and FIFO thresholds.
892 486: Set 8 longword cache alignment, 8 longword burst.
893 586: Set 16 longword cache alignment, no burst limit.
894 Cache alignment bits 15:14 Burst length 13:8
895 0000 <not allowed> 0000 align to cache 0800 8 longwords
896 4000 8 longwords 0100 1 longword 1000 16 longwords
897 8000 16 longwords 0200 2 longwords 2000 32 longwords
898 C000 32 longwords 0400 4 longwords */
900 #if defined (__i386__) && !defined(MODULE)
901 /* When not a module we can work around broken '486 PCI boards. */
902 if (boot_cpu_data
.x86
<= 4) {
905 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
909 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
911 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
914 #warning Processor architecture undefined
917 iowrite32(i
, ioaddr
+ PCIBusCfg
);
920 /* 128 byte Tx threshold;
921 Transmit on; Receive on; */
922 update_csr6(dev
, 0x00022002 | update_link(dev
) | __set_rx_mode(dev
));
924 /* Clear and Enable interrupts by setting the interrupt mask. */
925 iowrite32(0x1A0F5, ioaddr
+ IntrStatus
);
926 iowrite32(0x1A0F5, ioaddr
+ IntrEnable
);
928 iowrite32(0, ioaddr
+ RxStartDemand
);
931 static void tx_timeout(struct net_device
*dev
)
933 struct netdev_private
*np
= netdev_priv(dev
);
934 void __iomem
*ioaddr
= np
->base_addr
;
936 dev_warn(&dev
->dev
, "Transmit timed out, status %08x, resetting...\n",
937 ioread32(ioaddr
+ IntrStatus
));
941 printk(KERN_DEBUG
" Rx ring %p: ", np
->rx_ring
);
942 for (i
= 0; i
< RX_RING_SIZE
; i
++)
943 printk(KERN_CONT
" %08x", (unsigned int)np
->rx_ring
[i
].status
);
944 printk(KERN_CONT
"\n");
945 printk(KERN_DEBUG
" Tx ring %p: ", np
->tx_ring
);
946 for (i
= 0; i
< TX_RING_SIZE
; i
++)
947 printk(KERN_CONT
" %08x", np
->tx_ring
[i
].status
);
948 printk(KERN_CONT
"\n");
950 printk(KERN_DEBUG
"Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
951 np
->cur_tx
, np
->dirty_tx
, np
->tx_full
, np
->tx_q_bytes
);
952 printk(KERN_DEBUG
"Tx Descriptor addr %xh\n", ioread32(ioaddr
+0x4C));
954 disable_irq(dev
->irq
);
955 spin_lock_irq(&np
->lock
);
957 * Under high load dirty_tx and the internal tx descriptor pointer
958 * come out of sync, thus perform a software reset and reinitialize
962 iowrite32(1, np
->base_addr
+PCIBusCfg
);
966 init_rxtx_rings(dev
);
968 spin_unlock_irq(&np
->lock
);
969 enable_irq(dev
->irq
);
971 netif_wake_queue(dev
);
972 dev
->trans_start
= jiffies
; /* prevent tx timeout */
973 np
->stats
.tx_errors
++;
976 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
977 static int alloc_ringdesc(struct net_device
*dev
)
979 struct netdev_private
*np
= netdev_priv(dev
);
981 np
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
983 np
->rx_ring
= pci_alloc_consistent(np
->pci_dev
,
984 sizeof(struct w840_rx_desc
)*RX_RING_SIZE
+
985 sizeof(struct w840_tx_desc
)*TX_RING_SIZE
,
989 init_rxtx_rings(dev
);
993 static void free_ringdesc(struct netdev_private
*np
)
995 pci_free_consistent(np
->pci_dev
,
996 sizeof(struct w840_rx_desc
)*RX_RING_SIZE
+
997 sizeof(struct w840_tx_desc
)*TX_RING_SIZE
,
998 np
->rx_ring
, np
->ring_dma_addr
);
1002 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1004 struct netdev_private
*np
= netdev_priv(dev
);
1007 /* Caution: the write order is important here, set the field
1008 with the "ownership" bits last. */
1010 /* Calculate the next Tx descriptor entry. */
1011 entry
= np
->cur_tx
% TX_RING_SIZE
;
1013 np
->tx_addr
[entry
] = pci_map_single(np
->pci_dev
,
1014 skb
->data
,skb
->len
, PCI_DMA_TODEVICE
);
1015 np
->tx_skbuff
[entry
] = skb
;
1017 np
->tx_ring
[entry
].buffer1
= np
->tx_addr
[entry
];
1018 if (skb
->len
< TX_BUFLIMIT
) {
1019 np
->tx_ring
[entry
].length
= DescWholePkt
| skb
->len
;
1021 int len
= skb
->len
- TX_BUFLIMIT
;
1023 np
->tx_ring
[entry
].buffer2
= np
->tx_addr
[entry
]+TX_BUFLIMIT
;
1024 np
->tx_ring
[entry
].length
= DescWholePkt
| (len
<< 11) | TX_BUFLIMIT
;
1026 if(entry
== TX_RING_SIZE
-1)
1027 np
->tx_ring
[entry
].length
|= DescEndRing
;
1029 /* Now acquire the irq spinlock.
1030 * The difficult race is the ordering between
1031 * increasing np->cur_tx and setting DescOwned:
1032 * - if np->cur_tx is increased first the interrupt
1033 * handler could consider the packet as transmitted
1034 * since DescOwned is cleared.
1035 * - If DescOwned is set first the NIC could report the
1036 * packet as sent, but the interrupt handler would ignore it
1037 * since the np->cur_tx was not yet increased.
1039 spin_lock_irq(&np
->lock
);
1042 wmb(); /* flush length, buffer1, buffer2 */
1043 np
->tx_ring
[entry
].status
= DescOwned
;
1044 wmb(); /* flush status and kick the hardware */
1045 iowrite32(0, np
->base_addr
+ TxStartDemand
);
1046 np
->tx_q_bytes
+= skb
->len
;
1047 /* Work around horrible bug in the chip by marking the queue as full
1048 when we do not have FIFO room for a maximum sized packet. */
1049 if (np
->cur_tx
- np
->dirty_tx
> TX_QUEUE_LEN
||
1050 ((np
->drv_flags
& HasBrokenTx
) && np
->tx_q_bytes
> TX_BUG_FIFO_LIMIT
)) {
1051 netif_stop_queue(dev
);
1055 spin_unlock_irq(&np
->lock
);
1058 netdev_dbg(dev
, "Transmit frame #%d queued in slot %d\n",
1061 return NETDEV_TX_OK
;
1064 static void netdev_tx_done(struct net_device
*dev
)
1066 struct netdev_private
*np
= netdev_priv(dev
);
1067 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1068 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1069 int tx_status
= np
->tx_ring
[entry
].status
;
1073 if (tx_status
& 0x8000) { /* There was an error, log it. */
1074 #ifndef final_version
1076 netdev_dbg(dev
, "Transmit error, Tx status %08x\n",
1079 np
->stats
.tx_errors
++;
1080 if (tx_status
& 0x0104) np
->stats
.tx_aborted_errors
++;
1081 if (tx_status
& 0x0C80) np
->stats
.tx_carrier_errors
++;
1082 if (tx_status
& 0x0200) np
->stats
.tx_window_errors
++;
1083 if (tx_status
& 0x0002) np
->stats
.tx_fifo_errors
++;
1084 if ((tx_status
& 0x0080) && np
->mii_if
.full_duplex
== 0)
1085 np
->stats
.tx_heartbeat_errors
++;
1087 #ifndef final_version
1089 netdev_dbg(dev
, "Transmit slot %d ok, Tx status %08x\n",
1092 np
->stats
.tx_bytes
+= np
->tx_skbuff
[entry
]->len
;
1093 np
->stats
.collisions
+= (tx_status
>> 3) & 15;
1094 np
->stats
.tx_packets
++;
1096 /* Free the original skb. */
1097 pci_unmap_single(np
->pci_dev
,np
->tx_addr
[entry
],
1098 np
->tx_skbuff
[entry
]->len
,
1100 np
->tx_q_bytes
-= np
->tx_skbuff
[entry
]->len
;
1101 dev_kfree_skb_irq(np
->tx_skbuff
[entry
]);
1102 np
->tx_skbuff
[entry
] = NULL
;
1105 np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN_RESTART
&&
1106 np
->tx_q_bytes
< TX_BUG_FIFO_LIMIT
) {
1107 /* The ring is no longer full, clear tbusy. */
1110 netif_wake_queue(dev
);
1114 /* The interrupt handler does all of the Rx thread work and cleans up
1115 after the Tx thread. */
1116 static irqreturn_t
intr_handler(int irq
, void *dev_instance
)
1118 struct net_device
*dev
= (struct net_device
*)dev_instance
;
1119 struct netdev_private
*np
= netdev_priv(dev
);
1120 void __iomem
*ioaddr
= np
->base_addr
;
1121 int work_limit
= max_interrupt_work
;
1124 if (!netif_device_present(dev
))
1127 u32 intr_status
= ioread32(ioaddr
+ IntrStatus
);
1129 /* Acknowledge all of the current interrupt sources ASAP. */
1130 iowrite32(intr_status
& 0x001ffff, ioaddr
+ IntrStatus
);
1133 netdev_dbg(dev
, "Interrupt, status %04x\n", intr_status
);
1135 if ((intr_status
& (NormalIntr
|AbnormalIntr
)) == 0)
1140 if (intr_status
& (RxIntr
| RxNoBuf
))
1142 if (intr_status
& RxNoBuf
)
1143 iowrite32(0, ioaddr
+ RxStartDemand
);
1145 if (intr_status
& (TxNoBuf
| TxIntr
) &&
1146 np
->cur_tx
!= np
->dirty_tx
) {
1147 spin_lock(&np
->lock
);
1148 netdev_tx_done(dev
);
1149 spin_unlock(&np
->lock
);
1152 /* Abnormal error summary/uncommon events handlers. */
1153 if (intr_status
& (AbnormalIntr
| TxFIFOUnderflow
| SystemError
|
1155 netdev_error(dev
, intr_status
);
1157 if (--work_limit
< 0) {
1159 "Too much work at interrupt, status=0x%04x\n",
1161 /* Set the timer to re-enable the other interrupts after
1163 spin_lock(&np
->lock
);
1164 if (netif_device_present(dev
)) {
1165 iowrite32(AbnormalIntr
| TimerInt
, ioaddr
+ IntrEnable
);
1166 iowrite32(10, ioaddr
+ GPTimer
);
1168 spin_unlock(&np
->lock
);
1174 netdev_dbg(dev
, "exiting interrupt, status=%#4.4x\n",
1175 ioread32(ioaddr
+ IntrStatus
));
1176 return IRQ_RETVAL(handled
);
1179 /* This routine is logically part of the interrupt handler, but separated
1180 for clarity and better register allocation. */
1181 static int netdev_rx(struct net_device
*dev
)
1183 struct netdev_private
*np
= netdev_priv(dev
);
1184 int entry
= np
->cur_rx
% RX_RING_SIZE
;
1185 int work_limit
= np
->dirty_rx
+ RX_RING_SIZE
- np
->cur_rx
;
1188 netdev_dbg(dev
, " In netdev_rx(), entry %d status %04x\n",
1189 entry
, np
->rx_ring
[entry
].status
);
1192 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1193 while (--work_limit
>= 0) {
1194 struct w840_rx_desc
*desc
= np
->rx_head_desc
;
1195 s32 status
= desc
->status
;
1198 netdev_dbg(dev
, " netdev_rx() status was %08x\n",
1202 if ((status
& 0x38008300) != 0x0300) {
1203 if ((status
& 0x38000300) != 0x0300) {
1204 /* Ingore earlier buffers. */
1205 if ((status
& 0xffff) != 0x7fff) {
1207 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1208 np
->cur_rx
, status
);
1209 np
->stats
.rx_length_errors
++;
1211 } else if (status
& 0x8000) {
1212 /* There was a fatal error. */
1214 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
1216 np
->stats
.rx_errors
++; /* end of a packet.*/
1217 if (status
& 0x0890) np
->stats
.rx_length_errors
++;
1218 if (status
& 0x004C) np
->stats
.rx_frame_errors
++;
1219 if (status
& 0x0002) np
->stats
.rx_crc_errors
++;
1222 struct sk_buff
*skb
;
1223 /* Omit the four octet CRC from the length. */
1224 int pkt_len
= ((status
>> 16) & 0x7ff) - 4;
1226 #ifndef final_version
1228 netdev_dbg(dev
, " netdev_rx() normal Rx pkt length %d status %x\n",
1231 /* Check if the packet is long enough to accept without copying
1232 to a minimally-sized skbuff. */
1233 if (pkt_len
< rx_copybreak
&&
1234 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
1235 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1236 pci_dma_sync_single_for_cpu(np
->pci_dev
,np
->rx_addr
[entry
],
1237 np
->rx_skbuff
[entry
]->len
,
1238 PCI_DMA_FROMDEVICE
);
1239 skb_copy_to_linear_data(skb
, np
->rx_skbuff
[entry
]->data
, pkt_len
);
1240 skb_put(skb
, pkt_len
);
1241 pci_dma_sync_single_for_device(np
->pci_dev
,np
->rx_addr
[entry
],
1242 np
->rx_skbuff
[entry
]->len
,
1243 PCI_DMA_FROMDEVICE
);
1245 pci_unmap_single(np
->pci_dev
,np
->rx_addr
[entry
],
1246 np
->rx_skbuff
[entry
]->len
,
1247 PCI_DMA_FROMDEVICE
);
1248 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1249 np
->rx_skbuff
[entry
] = NULL
;
1251 #ifndef final_version /* Remove after testing. */
1252 /* You will want this info for the initial debug. */
1254 netdev_dbg(dev
, " Rx data %pM %pM %02x%02x %pI4\n",
1255 &skb
->data
[0], &skb
->data
[6],
1256 skb
->data
[12], skb
->data
[13],
1259 skb
->protocol
= eth_type_trans(skb
, dev
);
1261 np
->stats
.rx_packets
++;
1262 np
->stats
.rx_bytes
+= pkt_len
;
1264 entry
= (++np
->cur_rx
) % RX_RING_SIZE
;
1265 np
->rx_head_desc
= &np
->rx_ring
[entry
];
1268 /* Refill the Rx ring buffers. */
1269 for (; np
->cur_rx
- np
->dirty_rx
> 0; np
->dirty_rx
++) {
1270 struct sk_buff
*skb
;
1271 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1272 if (np
->rx_skbuff
[entry
] == NULL
) {
1273 skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
);
1274 np
->rx_skbuff
[entry
] = skb
;
1276 break; /* Better luck next round. */
1277 np
->rx_addr
[entry
] = pci_map_single(np
->pci_dev
,
1279 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1280 np
->rx_ring
[entry
].buffer1
= np
->rx_addr
[entry
];
1283 np
->rx_ring
[entry
].status
= DescOwned
;
1289 static void netdev_error(struct net_device
*dev
, int intr_status
)
1291 struct netdev_private
*np
= netdev_priv(dev
);
1292 void __iomem
*ioaddr
= np
->base_addr
;
1295 netdev_dbg(dev
, "Abnormal event, %08x\n", intr_status
);
1296 if (intr_status
== 0xffffffff)
1298 spin_lock(&np
->lock
);
1299 if (intr_status
& TxFIFOUnderflow
) {
1301 /* Bump up the Tx threshold */
1303 /* This causes lots of dropped packets,
1304 * and under high load even tx_timeouts
1306 new = np
->csr6
+ 0x4000;
1308 new = (np
->csr6
>> 14)&0x7f;
1312 new = 127; /* load full packet before starting */
1313 new = (np
->csr6
& ~(0x7F << 14)) | (new<<14);
1315 netdev_dbg(dev
, "Tx underflow, new csr6 %08x\n", new);
1316 update_csr6(dev
, new);
1318 if (intr_status
& RxDied
) { /* Missed a Rx frame. */
1319 np
->stats
.rx_errors
++;
1321 if (intr_status
& TimerInt
) {
1322 /* Re-enable other interrupts. */
1323 if (netif_device_present(dev
))
1324 iowrite32(0x1A0F5, ioaddr
+ IntrEnable
);
1326 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1327 iowrite32(0, ioaddr
+ RxStartDemand
);
1328 spin_unlock(&np
->lock
);
1331 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1333 struct netdev_private
*np
= netdev_priv(dev
);
1334 void __iomem
*ioaddr
= np
->base_addr
;
1336 /* The chip only need report frame silently dropped. */
1337 spin_lock_irq(&np
->lock
);
1338 if (netif_running(dev
) && netif_device_present(dev
))
1339 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1340 spin_unlock_irq(&np
->lock
);
1346 static u32
__set_rx_mode(struct net_device
*dev
)
1348 struct netdev_private
*np
= netdev_priv(dev
);
1349 void __iomem
*ioaddr
= np
->base_addr
;
1350 u32 mc_filter
[2]; /* Multicast hash filter */
1353 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1354 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1355 rx_mode
= RxAcceptBroadcast
| AcceptMulticast
| RxAcceptAllPhys
1357 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
1358 (dev
->flags
& IFF_ALLMULTI
)) {
1359 /* Too many to match, or accept all multicasts. */
1360 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1361 rx_mode
= RxAcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1363 struct netdev_hw_addr
*ha
;
1365 memset(mc_filter
, 0, sizeof(mc_filter
));
1366 netdev_for_each_mc_addr(ha
, dev
) {
1369 filbit
= (ether_crc(ETH_ALEN
, ha
->addr
) >> 26) ^ 0x3F;
1371 mc_filter
[filbit
>> 5] |= 1 << (filbit
& 31);
1373 rx_mode
= RxAcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1375 iowrite32(mc_filter
[0], ioaddr
+ MulticastFilter0
);
1376 iowrite32(mc_filter
[1], ioaddr
+ MulticastFilter1
);
1380 static void set_rx_mode(struct net_device
*dev
)
1382 struct netdev_private
*np
= netdev_priv(dev
);
1383 u32 rx_mode
= __set_rx_mode(dev
);
1384 spin_lock_irq(&np
->lock
);
1385 update_csr6(dev
, (np
->csr6
& ~0x00F8) | rx_mode
);
1386 spin_unlock_irq(&np
->lock
);
1389 static void netdev_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1391 struct netdev_private
*np
= netdev_priv(dev
);
1393 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1394 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1395 strlcpy(info
->bus_info
, pci_name(np
->pci_dev
), sizeof(info
->bus_info
));
1398 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1400 struct netdev_private
*np
= netdev_priv(dev
);
1403 spin_lock_irq(&np
->lock
);
1404 rc
= mii_ethtool_gset(&np
->mii_if
, cmd
);
1405 spin_unlock_irq(&np
->lock
);
1410 static int netdev_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1412 struct netdev_private
*np
= netdev_priv(dev
);
1415 spin_lock_irq(&np
->lock
);
1416 rc
= mii_ethtool_sset(&np
->mii_if
, cmd
);
1417 spin_unlock_irq(&np
->lock
);
1422 static int netdev_nway_reset(struct net_device
*dev
)
1424 struct netdev_private
*np
= netdev_priv(dev
);
1425 return mii_nway_restart(&np
->mii_if
);
1428 static u32
netdev_get_link(struct net_device
*dev
)
1430 struct netdev_private
*np
= netdev_priv(dev
);
1431 return mii_link_ok(&np
->mii_if
);
1434 static u32
netdev_get_msglevel(struct net_device
*dev
)
1439 static void netdev_set_msglevel(struct net_device
*dev
, u32 value
)
1444 static const struct ethtool_ops netdev_ethtool_ops
= {
1445 .get_drvinfo
= netdev_get_drvinfo
,
1446 .get_settings
= netdev_get_settings
,
1447 .set_settings
= netdev_set_settings
,
1448 .nway_reset
= netdev_nway_reset
,
1449 .get_link
= netdev_get_link
,
1450 .get_msglevel
= netdev_get_msglevel
,
1451 .set_msglevel
= netdev_set_msglevel
,
1454 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1456 struct mii_ioctl_data
*data
= if_mii(rq
);
1457 struct netdev_private
*np
= netdev_priv(dev
);
1460 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
1461 data
->phy_id
= ((struct netdev_private
*)netdev_priv(dev
))->phys
[0] & 0x1f;
1464 case SIOCGMIIREG
: /* Read MII PHY register. */
1465 spin_lock_irq(&np
->lock
);
1466 data
->val_out
= mdio_read(dev
, data
->phy_id
& 0x1f, data
->reg_num
& 0x1f);
1467 spin_unlock_irq(&np
->lock
);
1470 case SIOCSMIIREG
: /* Write MII PHY register. */
1471 spin_lock_irq(&np
->lock
);
1472 mdio_write(dev
, data
->phy_id
& 0x1f, data
->reg_num
& 0x1f, data
->val_in
);
1473 spin_unlock_irq(&np
->lock
);
1480 static int netdev_close(struct net_device
*dev
)
1482 struct netdev_private
*np
= netdev_priv(dev
);
1483 void __iomem
*ioaddr
= np
->base_addr
;
1485 netif_stop_queue(dev
);
1488 netdev_dbg(dev
, "Shutting down ethercard, status was %08x Config %08x\n",
1489 ioread32(ioaddr
+ IntrStatus
),
1490 ioread32(ioaddr
+ NetworkConfig
));
1491 netdev_dbg(dev
, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1492 np
->cur_tx
, np
->dirty_tx
,
1493 np
->cur_rx
, np
->dirty_rx
);
1496 /* Stop the chip's Tx and Rx processes. */
1497 spin_lock_irq(&np
->lock
);
1498 netif_device_detach(dev
);
1499 update_csr6(dev
, 0);
1500 iowrite32(0x0000, ioaddr
+ IntrEnable
);
1501 spin_unlock_irq(&np
->lock
);
1503 free_irq(dev
->irq
, dev
);
1505 netif_device_attach(dev
);
1507 if (ioread32(ioaddr
+ NetworkConfig
) != 0xffffffff)
1508 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1514 printk(KERN_DEBUG
" Tx ring at %p:\n", np
->tx_ring
);
1515 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1516 printk(KERN_DEBUG
" #%d desc. %04x %04x %08x\n",
1517 i
, np
->tx_ring
[i
].length
,
1518 np
->tx_ring
[i
].status
, np
->tx_ring
[i
].buffer1
);
1519 printk(KERN_DEBUG
" Rx ring %p:\n", np
->rx_ring
);
1520 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1521 printk(KERN_DEBUG
" #%d desc. %04x %04x %08x\n",
1522 i
, np
->rx_ring
[i
].length
,
1523 np
->rx_ring
[i
].status
, np
->rx_ring
[i
].buffer1
);
1526 #endif /* __i386__ debugging only */
1528 del_timer_sync(&np
->timer
);
1530 free_rxtx_rings(np
);
1536 static void __devexit
w840_remove1 (struct pci_dev
*pdev
)
1538 struct net_device
*dev
= pci_get_drvdata(pdev
);
1541 struct netdev_private
*np
= netdev_priv(dev
);
1542 unregister_netdev(dev
);
1543 pci_release_regions(pdev
);
1544 pci_iounmap(pdev
, np
->base_addr
);
1548 pci_set_drvdata(pdev
, NULL
);
1554 * suspend/resume synchronization:
1555 * - open, close, do_ioctl:
1556 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1558 * spin_lock_irq(np->lock), doesn't touch hw if not present
1560 * synchronize_irq + netif_tx_disable;
1562 * netif_device_detach + netif_tx_disable;
1563 * - set_multicast_list
1564 * netif_device_detach + netif_tx_disable;
1565 * - interrupt handler
1566 * doesn't touch hw if not present, synchronize_irq waits for
1567 * running instances of the interrupt handler.
1569 * Disabling hw requires clearing csr6 & IntrEnable.
1570 * update_csr6 & all function that write IntrEnable check netif_device_present
1571 * before settings any bits.
1573 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1574 * device would cause an irq storm.
1576 static int w840_suspend (struct pci_dev
*pdev
, pm_message_t state
)
1578 struct net_device
*dev
= pci_get_drvdata (pdev
);
1579 struct netdev_private
*np
= netdev_priv(dev
);
1580 void __iomem
*ioaddr
= np
->base_addr
;
1583 if (netif_running (dev
)) {
1584 del_timer_sync(&np
->timer
);
1586 spin_lock_irq(&np
->lock
);
1587 netif_device_detach(dev
);
1588 update_csr6(dev
, 0);
1589 iowrite32(0, ioaddr
+ IntrEnable
);
1590 spin_unlock_irq(&np
->lock
);
1592 synchronize_irq(dev
->irq
);
1593 netif_tx_disable(dev
);
1595 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ RxMissed
) & 0xffff;
1597 /* no more hardware accesses behind this line. */
1599 BUG_ON(np
->csr6
|| ioread32(ioaddr
+ IntrEnable
));
1601 /* pci_power_off(pdev, -1); */
1603 free_rxtx_rings(np
);
1605 netif_device_detach(dev
);
1611 static int w840_resume (struct pci_dev
*pdev
)
1613 struct net_device
*dev
= pci_get_drvdata (pdev
);
1614 struct netdev_private
*np
= netdev_priv(dev
);
1618 if (netif_device_present(dev
))
1619 goto out
; /* device not suspended */
1620 if (netif_running(dev
)) {
1621 if ((retval
= pci_enable_device(pdev
))) {
1623 "pci_enable_device failed in resume\n");
1626 spin_lock_irq(&np
->lock
);
1627 iowrite32(1, np
->base_addr
+PCIBusCfg
);
1628 ioread32(np
->base_addr
+PCIBusCfg
);
1630 netif_device_attach(dev
);
1631 init_rxtx_rings(dev
);
1632 init_registers(dev
);
1633 spin_unlock_irq(&np
->lock
);
1635 netif_wake_queue(dev
);
1637 mod_timer(&np
->timer
, jiffies
+ 1*HZ
);
1639 netif_device_attach(dev
);
1647 static struct pci_driver w840_driver
= {
1649 .id_table
= w840_pci_tbl
,
1650 .probe
= w840_probe1
,
1651 .remove
= __devexit_p(w840_remove1
),
1653 .suspend
= w840_suspend
,
1654 .resume
= w840_resume
,
1658 static int __init
w840_init(void)
1661 return pci_register_driver(&w840_driver
);
1664 static void __exit
w840_exit(void)
1666 pci_unregister_driver(&w840_driver
);
1669 module_init(w840_init
);
1670 module_exit(w840_exit
);