Linux 2.6.17.7
[linux/fpc-iii.git] / drivers / net / natsemi.c
blob90627756d6fa47bbe93ce363af1d6ced2307fba4
1 /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2 /*
3 Written/copyright 1999-2001 by Donald Becker.
4 Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5 Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6 Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL. License for under other terms may be
14 available. Contact the original author for details.
16 The original author may be reached as becker@scyld.com, or at
17 Scyld Computing Corporation
18 410 Severn Ave., Suite 210
19 Annapolis MD 21403
21 Support information and updates available at
22 http://www.scyld.com/network/netsemi.html
25 Linux kernel modifications:
27 Version 1.0.1:
28 - Spinlock fixes
29 - Bug fixes and better intr performance (Tjeerd)
30 Version 1.0.2:
31 - Now reads correct MAC address from eeprom
32 Version 1.0.3:
33 - Eliminate redundant priv->tx_full flag
34 - Call netif_start_queue from dev->tx_timeout
35 - wmb() in start_tx() to flush data
36 - Update Tx locking
37 - Clean up PCI enable (davej)
38 Version 1.0.4:
39 - Merge Donald Becker's natsemi.c version 1.07
40 Version 1.0.5:
41 - { fill me in }
42 Version 1.0.6:
43 * ethtool support (jgarzik)
44 * Proper initialization of the card (which sometimes
45 fails to occur and leaves the card in a non-functional
46 state). (uzi)
48 * Some documented register settings to optimize some
49 of the 100Mbit autodetection circuitry in rev C cards. (uzi)
51 * Polling of the PHY intr for stuff like link state
52 change and auto- negotiation to finally work properly. (uzi)
54 * One-liner removal of a duplicate declaration of
55 netdev_error(). (uzi)
57 Version 1.0.7: (Manfred Spraul)
58 * pci dma
59 * SMP locking update
60 * full reset added into tx_timeout
61 * correct multicast hash generation (both big and little endian)
62 [copied from a natsemi driver version
63 from Myrio Corporation, Greg Smith]
64 * suspend/resume
66 version 1.0.8 (Tim Hockin <thockin@sun.com>)
67 * ETHTOOL_* support
68 * Wake on lan support (Erik Gilling)
69 * MXDMA fixes for serverworks
70 * EEPROM reload
72 version 1.0.9 (Manfred Spraul)
73 * Main change: fix lack of synchronize
74 netif_close/netif_suspend against a last interrupt
75 or packet.
76 * do not enable superflous interrupts (e.g. the
77 drivers relies on TxDone - TxIntr not needed)
78 * wait that the hardware has really stopped in close
79 and suspend.
80 * workaround for the (at least) gcc-2.95.1 compiler
81 problem. Also simplifies the code a bit.
82 * disable_irq() in tx_timeout - needed to protect
83 against rx interrupts.
84 * stop the nic before switching into silent rx mode
85 for wol (required according to docu).
87 version 1.0.10:
88 * use long for ee_addr (various)
89 * print pointers properly (DaveM)
90 * include asm/irq.h (?)
92 version 1.0.11:
93 * check and reset if PHY errors appear (Adrian Sun)
94 * WoL cleanup (Tim Hockin)
95 * Magic number cleanup (Tim Hockin)
96 * Don't reload EEPROM on every reset (Tim Hockin)
97 * Save and restore EEPROM state across reset (Tim Hockin)
98 * MDIO Cleanup (Tim Hockin)
99 * Reformat register offsets/bits (jgarzik)
101 version 1.0.12:
102 * ETHTOOL_* further support (Tim Hockin)
104 version 1.0.13:
105 * ETHTOOL_[G]EEPROM support (Tim Hockin)
107 version 1.0.13:
108 * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
110 version 1.0.14:
111 * Cleanup some messages and autoneg in ethtool (Tim Hockin)
113 version 1.0.15:
114 * Get rid of cable_magic flag
115 * use new (National provided) solution for cable magic issue
117 version 1.0.16:
118 * call netdev_rx() for RxErrors (Manfred Spraul)
119 * formatting and cleanups
120 * change options and full_duplex arrays to be zero
121 initialized
122 * enable only the WoL and PHY interrupts in wol mode
124 version 1.0.17:
125 * only do cable_magic on 83815 and early 83816 (Tim Hockin)
126 * create a function for rx refill (Manfred Spraul)
127 * combine drain_ring and init_ring (Manfred Spraul)
128 * oom handling (Manfred Spraul)
129 * hands_off instead of playing with netif_device_{de,a}ttach
130 (Manfred Spraul)
131 * be sure to write the MAC back to the chip (Manfred Spraul)
132 * lengthen EEPROM timeout, and always warn about timeouts
133 (Manfred Spraul)
134 * comments update (Manfred)
135 * do the right thing on a phy-reset (Manfred and Tim)
137 TODO:
138 * big endian support with CFG:BEM instead of cpu_to_le32
141 #include <linux/config.h>
142 #include <linux/module.h>
143 #include <linux/kernel.h>
144 #include <linux/string.h>
145 #include <linux/timer.h>
146 #include <linux/errno.h>
147 #include <linux/ioport.h>
148 #include <linux/slab.h>
149 #include <linux/interrupt.h>
150 #include <linux/pci.h>
151 #include <linux/netdevice.h>
152 #include <linux/etherdevice.h>
153 #include <linux/skbuff.h>
154 #include <linux/init.h>
155 #include <linux/spinlock.h>
156 #include <linux/ethtool.h>
157 #include <linux/delay.h>
158 #include <linux/rtnetlink.h>
159 #include <linux/mii.h>
160 #include <linux/crc32.h>
161 #include <linux/bitops.h>
162 #include <linux/prefetch.h>
163 #include <asm/processor.h> /* Processor type for cache alignment. */
164 #include <asm/io.h>
165 #include <asm/irq.h>
166 #include <asm/uaccess.h>
168 #define DRV_NAME "natsemi"
169 #define DRV_VERSION "1.07+LK1.0.17"
170 #define DRV_RELDATE "Sep 27, 2002"
172 #define RX_OFFSET 2
174 /* Updated to recommendations in pci-skeleton v2.03. */
176 /* The user-configurable values.
177 These may be modified when a driver module is loaded.*/
179 #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
180 NETIF_MSG_LINK | \
181 NETIF_MSG_WOL | \
182 NETIF_MSG_RX_ERR | \
183 NETIF_MSG_TX_ERR)
184 static int debug = -1;
186 static int mtu;
188 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
189 This chip uses a 512 element hash table based on the Ethernet CRC. */
190 static const int multicast_filter_limit = 100;
192 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
193 Setting to > 1518 effectively disables this feature. */
194 static int rx_copybreak;
196 /* Used to pass the media type, etc.
197 Both 'options[]' and 'full_duplex[]' should exist for driver
198 interoperability.
199 The media type is usually passed in 'options[]'.
201 #define MAX_UNITS 8 /* More are supported, limit only on options */
202 static int options[MAX_UNITS];
203 static int full_duplex[MAX_UNITS];
205 /* Operational parameters that are set at compile time. */
207 /* Keep the ring sizes a power of two for compile efficiency.
208 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
209 Making the Tx ring too large decreases the effectiveness of channel
210 bonding and packet priority.
211 There are no ill effects from too-large receive rings. */
212 #define TX_RING_SIZE 16
213 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
214 #define RX_RING_SIZE 32
216 /* Operational parameters that usually are not changed. */
217 /* Time in jiffies before concluding the transmitter is hung. */
218 #define TX_TIMEOUT (2*HZ)
220 #define NATSEMI_HW_TIMEOUT 400
221 #define NATSEMI_TIMER_FREQ 3*HZ
222 #define NATSEMI_PG0_NREGS 64
223 #define NATSEMI_RFDR_NREGS 8
224 #define NATSEMI_PG1_NREGS 4
225 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
226 NATSEMI_PG1_NREGS)
227 #define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
228 #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
229 #define NATSEMI_DEF_EEPROM_SIZE 24 /* 12 16-bit values */
231 /* Buffer sizes:
232 * The nic writes 32-bit values, even if the upper bytes of
233 * a 32-bit value are beyond the end of the buffer.
235 #define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
236 #define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
237 #define NATSEMI_LONGPKT 1518 /* limit for normal packets */
238 #define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
240 /* These identify the driver base version and may not be removed. */
241 static const char version[] __devinitdata =
242 KERN_INFO DRV_NAME " dp8381x driver, version "
243 DRV_VERSION ", " DRV_RELDATE "\n"
244 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
245 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
246 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
248 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
249 MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
250 MODULE_LICENSE("GPL");
252 module_param(mtu, int, 0);
253 module_param(debug, int, 0);
254 module_param(rx_copybreak, int, 0);
255 module_param_array(options, int, NULL, 0);
256 module_param_array(full_duplex, int, NULL, 0);
257 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
258 MODULE_PARM_DESC(debug, "DP8381x default debug level");
259 MODULE_PARM_DESC(rx_copybreak,
260 "DP8381x copy breakpoint for copy-only-tiny-frames");
261 MODULE_PARM_DESC(options,
262 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
263 MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
266 Theory of Operation
268 I. Board Compatibility
270 This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
271 It also works with other chips in in the DP83810 series.
273 II. Board-specific settings
275 This driver requires the PCI interrupt line to be valid.
276 It honors the EEPROM-set values.
278 III. Driver operation
280 IIIa. Ring buffers
282 This driver uses two statically allocated fixed-size descriptor lists
283 formed into rings by a branch from the final descriptor to the beginning of
284 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
285 The NatSemi design uses a 'next descriptor' pointer that the driver forms
286 into a list.
288 IIIb/c. Transmit/Receive Structure
290 This driver uses a zero-copy receive and transmit scheme.
291 The driver allocates full frame size skbuffs for the Rx ring buffers at
292 open() time and passes the skb->data field to the chip as receive data
293 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
294 a fresh skbuff is allocated and the frame is copied to the new skbuff.
295 When the incoming frame is larger, the skbuff is passed directly up the
296 protocol stack. Buffers consumed this way are replaced by newly allocated
297 skbuffs in a later phase of receives.
299 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
300 using a full-sized skbuff for small frames vs. the copying costs of larger
301 frames. New boards are typically used in generously configured machines
302 and the underfilled buffers have negligible impact compared to the benefit of
303 a single allocation size, so the default value of zero results in never
304 copying packets. When copying is done, the cost is usually mitigated by using
305 a combined copy/checksum routine. Copying also preloads the cache, which is
306 most useful with small frames.
308 A subtle aspect of the operation is that unaligned buffers are not permitted
309 by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
310 longword aligned for further processing. On copies frames are put into the
311 skbuff at an offset of "+2", 16-byte aligning the IP header.
313 IIId. Synchronization
315 Most operations are synchronized on the np->lock irq spinlock, except the
316 performance critical codepaths:
318 The rx process only runs in the interrupt handler. Access from outside
319 the interrupt handler is only permitted after disable_irq().
321 The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
322 is set, then access is permitted under spin_lock_irq(&np->lock).
324 Thus configuration functions that want to access everything must call
325 disable_irq(dev->irq);
326 spin_lock_bh(dev->xmit_lock);
327 spin_lock_irq(&np->lock);
329 IV. Notes
331 NatSemi PCI network controllers are very uncommon.
333 IVb. References
335 http://www.scyld.com/expert/100mbps.html
336 http://www.scyld.com/expert/NWay.html
337 Datasheet is available from:
338 http://www.national.com/pf/DP/DP83815.html
340 IVc. Errata
342 None characterised.
347 enum pcistuff {
348 PCI_USES_IO = 0x01,
349 PCI_USES_MEM = 0x02,
350 PCI_USES_MASTER = 0x04,
351 PCI_ADDR0 = 0x08,
352 PCI_ADDR1 = 0x10,
355 /* MMIO operations required */
356 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
360 * Support for fibre connections on Am79C874:
361 * This phy needs a special setup when connected to a fibre cable.
362 * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
364 #define PHYID_AM79C874 0x0022561b
366 #define MII_MCTRL 0x15 /* mode control register */
367 #define MII_FX_SEL 0x0001 /* 100BASE-FX (fiber) */
368 #define MII_EN_SCRM 0x0004 /* enable scrambler (tp) */
371 /* array of board data directly indexed by pci_tbl[x].driver_data */
372 static const struct {
373 const char *name;
374 unsigned long flags;
375 } natsemi_pci_info[] __devinitdata = {
376 { "NatSemi DP8381[56]", PCI_IOTYPE },
379 static struct pci_device_id natsemi_pci_tbl[] = {
380 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
381 { 0, },
383 MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
385 /* Offsets to the device registers.
386 Unlike software-only systems, device drivers interact with complex hardware.
387 It's not useful to define symbolic names for every register bit in the
388 device.
390 enum register_offsets {
391 ChipCmd = 0x00,
392 ChipConfig = 0x04,
393 EECtrl = 0x08,
394 PCIBusCfg = 0x0C,
395 IntrStatus = 0x10,
396 IntrMask = 0x14,
397 IntrEnable = 0x18,
398 IntrHoldoff = 0x1C, /* DP83816 only */
399 TxRingPtr = 0x20,
400 TxConfig = 0x24,
401 RxRingPtr = 0x30,
402 RxConfig = 0x34,
403 ClkRun = 0x3C,
404 WOLCmd = 0x40,
405 PauseCmd = 0x44,
406 RxFilterAddr = 0x48,
407 RxFilterData = 0x4C,
408 BootRomAddr = 0x50,
409 BootRomData = 0x54,
410 SiliconRev = 0x58,
411 StatsCtrl = 0x5C,
412 StatsData = 0x60,
413 RxPktErrs = 0x60,
414 RxMissed = 0x68,
415 RxCRCErrs = 0x64,
416 BasicControl = 0x80,
417 BasicStatus = 0x84,
418 AnegAdv = 0x90,
419 AnegPeer = 0x94,
420 PhyStatus = 0xC0,
421 MIntrCtrl = 0xC4,
422 MIntrStatus = 0xC8,
423 PhyCtrl = 0xE4,
425 /* These are from the spec, around page 78... on a separate table.
426 * The meaning of these registers depend on the value of PGSEL. */
427 PGSEL = 0xCC,
428 PMDCSR = 0xE4,
429 TSTDAT = 0xFC,
430 DSPCFG = 0xF4,
431 SDCFG = 0xF8
433 /* the values for the 'magic' registers above (PGSEL=1) */
434 #define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
435 #define TSTDAT_VAL 0x0
436 #define DSPCFG_VAL 0x5040
437 #define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
438 #define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
439 #define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
440 #define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
442 /* misc PCI space registers */
443 enum pci_register_offsets {
444 PCIPM = 0x44,
447 enum ChipCmd_bits {
448 ChipReset = 0x100,
449 RxReset = 0x20,
450 TxReset = 0x10,
451 RxOff = 0x08,
452 RxOn = 0x04,
453 TxOff = 0x02,
454 TxOn = 0x01,
457 enum ChipConfig_bits {
458 CfgPhyDis = 0x200,
459 CfgPhyRst = 0x400,
460 CfgExtPhy = 0x1000,
461 CfgAnegEnable = 0x2000,
462 CfgAneg100 = 0x4000,
463 CfgAnegFull = 0x8000,
464 CfgAnegDone = 0x8000000,
465 CfgFullDuplex = 0x20000000,
466 CfgSpeed100 = 0x40000000,
467 CfgLink = 0x80000000,
470 enum EECtrl_bits {
471 EE_ShiftClk = 0x04,
472 EE_DataIn = 0x01,
473 EE_ChipSelect = 0x08,
474 EE_DataOut = 0x02,
475 MII_Data = 0x10,
476 MII_Write = 0x20,
477 MII_ShiftClk = 0x40,
480 enum PCIBusCfg_bits {
481 EepromReload = 0x4,
484 /* Bits in the interrupt status/mask registers. */
485 enum IntrStatus_bits {
486 IntrRxDone = 0x0001,
487 IntrRxIntr = 0x0002,
488 IntrRxErr = 0x0004,
489 IntrRxEarly = 0x0008,
490 IntrRxIdle = 0x0010,
491 IntrRxOverrun = 0x0020,
492 IntrTxDone = 0x0040,
493 IntrTxIntr = 0x0080,
494 IntrTxErr = 0x0100,
495 IntrTxIdle = 0x0200,
496 IntrTxUnderrun = 0x0400,
497 StatsMax = 0x0800,
498 SWInt = 0x1000,
499 WOLPkt = 0x2000,
500 LinkChange = 0x4000,
501 IntrHighBits = 0x8000,
502 RxStatusFIFOOver = 0x10000,
503 IntrPCIErr = 0xf00000,
504 RxResetDone = 0x1000000,
505 TxResetDone = 0x2000000,
506 IntrAbnormalSummary = 0xCD20,
510 * Default Interrupts:
511 * Rx OK, Rx Packet Error, Rx Overrun,
512 * Tx OK, Tx Packet Error, Tx Underrun,
513 * MIB Service, Phy Interrupt, High Bits,
514 * Rx Status FIFO overrun,
515 * Received Target Abort, Received Master Abort,
516 * Signalled System Error, Received Parity Error
518 #define DEFAULT_INTR 0x00f1cd65
520 enum TxConfig_bits {
521 TxDrthMask = 0x3f,
522 TxFlthMask = 0x3f00,
523 TxMxdmaMask = 0x700000,
524 TxMxdma_512 = 0x0,
525 TxMxdma_4 = 0x100000,
526 TxMxdma_8 = 0x200000,
527 TxMxdma_16 = 0x300000,
528 TxMxdma_32 = 0x400000,
529 TxMxdma_64 = 0x500000,
530 TxMxdma_128 = 0x600000,
531 TxMxdma_256 = 0x700000,
532 TxCollRetry = 0x800000,
533 TxAutoPad = 0x10000000,
534 TxMacLoop = 0x20000000,
535 TxHeartIgn = 0x40000000,
536 TxCarrierIgn = 0x80000000
540 * Tx Configuration:
541 * - 256 byte DMA burst length
542 * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
543 * - 64 bytes initial drain threshold (i.e. begin actual transmission
544 * when 64 byte are in the fifo)
545 * - on tx underruns, increase drain threshold by 64.
546 * - at most use a drain threshold of 1472 bytes: The sum of the fill
547 * threshold and the drain threshold must be less than 2016 bytes.
550 #define TX_FLTH_VAL ((512/32) << 8)
551 #define TX_DRTH_VAL_START (64/32)
552 #define TX_DRTH_VAL_INC 2
553 #define TX_DRTH_VAL_LIMIT (1472/32)
555 enum RxConfig_bits {
556 RxDrthMask = 0x3e,
557 RxMxdmaMask = 0x700000,
558 RxMxdma_512 = 0x0,
559 RxMxdma_4 = 0x100000,
560 RxMxdma_8 = 0x200000,
561 RxMxdma_16 = 0x300000,
562 RxMxdma_32 = 0x400000,
563 RxMxdma_64 = 0x500000,
564 RxMxdma_128 = 0x600000,
565 RxMxdma_256 = 0x700000,
566 RxAcceptLong = 0x8000000,
567 RxAcceptTx = 0x10000000,
568 RxAcceptRunt = 0x40000000,
569 RxAcceptErr = 0x80000000
571 #define RX_DRTH_VAL (128/8)
573 enum ClkRun_bits {
574 PMEEnable = 0x100,
575 PMEStatus = 0x8000,
578 enum WolCmd_bits {
579 WakePhy = 0x1,
580 WakeUnicast = 0x2,
581 WakeMulticast = 0x4,
582 WakeBroadcast = 0x8,
583 WakeArp = 0x10,
584 WakePMatch0 = 0x20,
585 WakePMatch1 = 0x40,
586 WakePMatch2 = 0x80,
587 WakePMatch3 = 0x100,
588 WakeMagic = 0x200,
589 WakeMagicSecure = 0x400,
590 SecureHack = 0x100000,
591 WokePhy = 0x400000,
592 WokeUnicast = 0x800000,
593 WokeMulticast = 0x1000000,
594 WokeBroadcast = 0x2000000,
595 WokeArp = 0x4000000,
596 WokePMatch0 = 0x8000000,
597 WokePMatch1 = 0x10000000,
598 WokePMatch2 = 0x20000000,
599 WokePMatch3 = 0x40000000,
600 WokeMagic = 0x80000000,
601 WakeOptsSummary = 0x7ff
604 enum RxFilterAddr_bits {
605 RFCRAddressMask = 0x3ff,
606 AcceptMulticast = 0x00200000,
607 AcceptMyPhys = 0x08000000,
608 AcceptAllPhys = 0x10000000,
609 AcceptAllMulticast = 0x20000000,
610 AcceptBroadcast = 0x40000000,
611 RxFilterEnable = 0x80000000
614 enum StatsCtrl_bits {
615 StatsWarn = 0x1,
616 StatsFreeze = 0x2,
617 StatsClear = 0x4,
618 StatsStrobe = 0x8,
621 enum MIntrCtrl_bits {
622 MICRIntEn = 0x2,
625 enum PhyCtrl_bits {
626 PhyAddrMask = 0x1f,
629 #define PHY_ADDR_NONE 32
630 #define PHY_ADDR_INTERNAL 1
632 /* values we might find in the silicon revision register */
633 #define SRR_DP83815_C 0x0302
634 #define SRR_DP83815_D 0x0403
635 #define SRR_DP83816_A4 0x0504
636 #define SRR_DP83816_A5 0x0505
638 /* The Rx and Tx buffer descriptors. */
639 /* Note that using only 32 bit fields simplifies conversion to big-endian
640 architectures. */
641 struct netdev_desc {
642 u32 next_desc;
643 s32 cmd_status;
644 u32 addr;
645 u32 software_use;
648 /* Bits in network_desc.status */
649 enum desc_status_bits {
650 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
651 DescNoCRC=0x10000000, DescPktOK=0x08000000,
652 DescSizeMask=0xfff,
654 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
655 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
656 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
657 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
659 DescRxAbort=0x04000000, DescRxOver=0x02000000,
660 DescRxDest=0x01800000, DescRxLong=0x00400000,
661 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
662 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
663 DescRxLoop=0x00020000, DesRxColl=0x00010000,
666 struct netdev_private {
667 /* Descriptor rings first for alignment */
668 dma_addr_t ring_dma;
669 struct netdev_desc *rx_ring;
670 struct netdev_desc *tx_ring;
671 /* The addresses of receive-in-place skbuffs */
672 struct sk_buff *rx_skbuff[RX_RING_SIZE];
673 dma_addr_t rx_dma[RX_RING_SIZE];
674 /* address of a sent-in-place packet/buffer, for later free() */
675 struct sk_buff *tx_skbuff[TX_RING_SIZE];
676 dma_addr_t tx_dma[TX_RING_SIZE];
677 struct net_device_stats stats;
678 /* Media monitoring timer */
679 struct timer_list timer;
680 /* Frequently used values: keep some adjacent for cache effect */
681 struct pci_dev *pci_dev;
682 struct netdev_desc *rx_head_desc;
683 /* Producer/consumer ring indices */
684 unsigned int cur_rx, dirty_rx;
685 unsigned int cur_tx, dirty_tx;
686 /* Based on MTU+slack. */
687 unsigned int rx_buf_sz;
688 int oom;
689 /* Interrupt status */
690 u32 intr_status;
691 /* Do not touch the nic registers */
692 int hands_off;
693 /* external phy that is used: only valid if dev->if_port != PORT_TP */
694 int mii;
695 int phy_addr_external;
696 unsigned int full_duplex;
697 /* Rx filter */
698 u32 cur_rx_mode;
699 u32 rx_filter[16];
700 /* FIFO and PCI burst thresholds */
701 u32 tx_config, rx_config;
702 /* original contents of ClkRun register */
703 u32 SavedClkRun;
704 /* silicon revision */
705 u32 srr;
706 /* expected DSPCFG value */
707 u16 dspcfg;
708 /* parms saved in ethtool format */
709 u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
710 u8 duplex; /* Duplex, half or full */
711 u8 autoneg; /* Autonegotiation enabled */
712 /* MII transceiver section */
713 u16 advertising;
714 unsigned int iosize;
715 spinlock_t lock;
716 u32 msg_enable;
717 /* EEPROM data */
718 int eeprom_size;
721 static void move_int_phy(struct net_device *dev, int addr);
722 static int eeprom_read(void __iomem *ioaddr, int location);
723 static int mdio_read(struct net_device *dev, int reg);
724 static void mdio_write(struct net_device *dev, int reg, u16 data);
725 static void init_phy_fixup(struct net_device *dev);
726 static int miiport_read(struct net_device *dev, int phy_id, int reg);
727 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
728 static int find_mii(struct net_device *dev);
729 static void natsemi_reset(struct net_device *dev);
730 static void natsemi_reload_eeprom(struct net_device *dev);
731 static void natsemi_stop_rxtx(struct net_device *dev);
732 static int netdev_open(struct net_device *dev);
733 static void do_cable_magic(struct net_device *dev);
734 static void undo_cable_magic(struct net_device *dev);
735 static void check_link(struct net_device *dev);
736 static void netdev_timer(unsigned long data);
737 static void dump_ring(struct net_device *dev);
738 static void tx_timeout(struct net_device *dev);
739 static int alloc_ring(struct net_device *dev);
740 static void refill_rx(struct net_device *dev);
741 static void init_ring(struct net_device *dev);
742 static void drain_tx(struct net_device *dev);
743 static void drain_ring(struct net_device *dev);
744 static void free_ring(struct net_device *dev);
745 static void reinit_ring(struct net_device *dev);
746 static void init_registers(struct net_device *dev);
747 static int start_tx(struct sk_buff *skb, struct net_device *dev);
748 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
749 static void netdev_error(struct net_device *dev, int intr_status);
750 static int natsemi_poll(struct net_device *dev, int *budget);
751 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
752 static void netdev_tx_done(struct net_device *dev);
753 static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
754 #ifdef CONFIG_NET_POLL_CONTROLLER
755 static void natsemi_poll_controller(struct net_device *dev);
756 #endif
757 static void __set_rx_mode(struct net_device *dev);
758 static void set_rx_mode(struct net_device *dev);
759 static void __get_stats(struct net_device *dev);
760 static struct net_device_stats *get_stats(struct net_device *dev);
761 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
762 static int netdev_set_wol(struct net_device *dev, u32 newval);
763 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
764 static int netdev_set_sopass(struct net_device *dev, u8 *newval);
765 static int netdev_get_sopass(struct net_device *dev, u8 *data);
766 static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
767 static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
768 static void enable_wol_mode(struct net_device *dev, int enable_intr);
769 static int netdev_close(struct net_device *dev);
770 static int netdev_get_regs(struct net_device *dev, u8 *buf);
771 static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
772 static struct ethtool_ops ethtool_ops;
774 static inline void __iomem *ns_ioaddr(struct net_device *dev)
776 return (void __iomem *) dev->base_addr;
779 static inline void natsemi_irq_enable(struct net_device *dev)
781 writel(1, ns_ioaddr(dev) + IntrEnable);
782 readl(ns_ioaddr(dev) + IntrEnable);
785 static inline void natsemi_irq_disable(struct net_device *dev)
787 writel(0, ns_ioaddr(dev) + IntrEnable);
788 readl(ns_ioaddr(dev) + IntrEnable);
791 static void move_int_phy(struct net_device *dev, int addr)
793 struct netdev_private *np = netdev_priv(dev);
794 void __iomem *ioaddr = ns_ioaddr(dev);
795 int target = 31;
798 * The internal phy is visible on the external mii bus. Therefore we must
799 * move it away before we can send commands to an external phy.
800 * There are two addresses we must avoid:
801 * - the address on the external phy that is used for transmission.
802 * - the address that we want to access. User space can access phys
803 * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the
804 * phy that is used for transmission.
807 if (target == addr)
808 target--;
809 if (target == np->phy_addr_external)
810 target--;
811 writew(target, ioaddr + PhyCtrl);
812 readw(ioaddr + PhyCtrl);
813 udelay(1);
816 static int __devinit natsemi_probe1 (struct pci_dev *pdev,
817 const struct pci_device_id *ent)
819 struct net_device *dev;
820 struct netdev_private *np;
821 int i, option, irq, chip_idx = ent->driver_data;
822 static int find_cnt = -1;
823 unsigned long iostart, iosize;
824 void __iomem *ioaddr;
825 const int pcibar = 1; /* PCI base address register */
826 int prev_eedata;
827 u32 tmp;
829 /* when built into the kernel, we only print version if device is found */
830 #ifndef MODULE
831 static int printed_version;
832 if (!printed_version++)
833 printk(version);
834 #endif
836 i = pci_enable_device(pdev);
837 if (i) return i;
839 /* natsemi has a non-standard PM control register
840 * in PCI config space. Some boards apparently need
841 * to be brought to D0 in this manner.
843 pci_read_config_dword(pdev, PCIPM, &tmp);
844 if (tmp & PCI_PM_CTRL_STATE_MASK) {
845 /* D0 state, disable PME assertion */
846 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
847 pci_write_config_dword(pdev, PCIPM, newtmp);
850 find_cnt++;
851 iostart = pci_resource_start(pdev, pcibar);
852 iosize = pci_resource_len(pdev, pcibar);
853 irq = pdev->irq;
855 if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
856 pci_set_master(pdev);
858 dev = alloc_etherdev(sizeof (struct netdev_private));
859 if (!dev)
860 return -ENOMEM;
861 SET_MODULE_OWNER(dev);
862 SET_NETDEV_DEV(dev, &pdev->dev);
864 i = pci_request_regions(pdev, DRV_NAME);
865 if (i)
866 goto err_pci_request_regions;
868 ioaddr = ioremap(iostart, iosize);
869 if (!ioaddr) {
870 i = -ENOMEM;
871 goto err_ioremap;
874 /* Work around the dropped serial bit. */
875 prev_eedata = eeprom_read(ioaddr, 6);
876 for (i = 0; i < 3; i++) {
877 int eedata = eeprom_read(ioaddr, i + 7);
878 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
879 dev->dev_addr[i*2+1] = eedata >> 7;
880 prev_eedata = eedata;
883 dev->base_addr = (unsigned long __force) ioaddr;
884 dev->irq = irq;
886 np = netdev_priv(dev);
888 np->pci_dev = pdev;
889 pci_set_drvdata(pdev, dev);
890 np->iosize = iosize;
891 spin_lock_init(&np->lock);
892 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
893 np->hands_off = 0;
894 np->intr_status = 0;
895 np->eeprom_size = NATSEMI_DEF_EEPROM_SIZE;
897 /* Initial port:
898 * - If the nic was configured to use an external phy and if find_mii
899 * finds a phy: use external port, first phy that replies.
900 * - Otherwise: internal port.
901 * Note that the phy address for the internal phy doesn't matter:
902 * The address would be used to access a phy over the mii bus, but
903 * the internal phy is accessed through mapped registers.
905 if (readl(ioaddr + ChipConfig) & CfgExtPhy)
906 dev->if_port = PORT_MII;
907 else
908 dev->if_port = PORT_TP;
909 /* Reset the chip to erase previous misconfiguration. */
910 natsemi_reload_eeprom(dev);
911 natsemi_reset(dev);
913 if (dev->if_port != PORT_TP) {
914 np->phy_addr_external = find_mii(dev);
915 if (np->phy_addr_external == PHY_ADDR_NONE) {
916 dev->if_port = PORT_TP;
917 np->phy_addr_external = PHY_ADDR_INTERNAL;
919 } else {
920 np->phy_addr_external = PHY_ADDR_INTERNAL;
923 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
924 if (dev->mem_start)
925 option = dev->mem_start;
927 /* The lower four bits are the media type. */
928 if (option) {
929 if (option & 0x200)
930 np->full_duplex = 1;
931 if (option & 15)
932 printk(KERN_INFO
933 "natsemi %s: ignoring user supplied media type %d",
934 pci_name(np->pci_dev), option & 15);
936 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
937 np->full_duplex = 1;
939 /* The chip-specific entries in the device structure. */
940 dev->open = &netdev_open;
941 dev->hard_start_xmit = &start_tx;
942 dev->stop = &netdev_close;
943 dev->get_stats = &get_stats;
944 dev->set_multicast_list = &set_rx_mode;
945 dev->change_mtu = &natsemi_change_mtu;
946 dev->do_ioctl = &netdev_ioctl;
947 dev->tx_timeout = &tx_timeout;
948 dev->watchdog_timeo = TX_TIMEOUT;
949 dev->poll = natsemi_poll;
950 dev->weight = 64;
952 #ifdef CONFIG_NET_POLL_CONTROLLER
953 dev->poll_controller = &natsemi_poll_controller;
954 #endif
955 SET_ETHTOOL_OPS(dev, &ethtool_ops);
957 if (mtu)
958 dev->mtu = mtu;
960 netif_carrier_off(dev);
962 /* get the initial settings from hardware */
963 tmp = mdio_read(dev, MII_BMCR);
964 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
965 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
966 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
967 np->advertising= mdio_read(dev, MII_ADVERTISE);
969 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
970 && netif_msg_probe(np)) {
971 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
972 "10%s %s duplex.\n",
973 pci_name(np->pci_dev),
974 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
975 "enabled, advertise" : "disabled, force",
976 (np->advertising &
977 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
978 "0" : "",
979 (np->advertising &
980 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
981 "full" : "half");
983 if (netif_msg_probe(np))
984 printk(KERN_INFO
985 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
986 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
987 np->advertising);
989 /* save the silicon revision for later querying */
990 np->srr = readl(ioaddr + SiliconRev);
991 if (netif_msg_hw(np))
992 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
993 pci_name(np->pci_dev), np->srr);
995 i = register_netdev(dev);
996 if (i)
997 goto err_register_netdev;
999 if (netif_msg_drv(np)) {
1000 printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ",
1001 dev->name, natsemi_pci_info[chip_idx].name, iostart,
1002 pci_name(np->pci_dev));
1003 for (i = 0; i < ETH_ALEN-1; i++)
1004 printk("%02x:", dev->dev_addr[i]);
1005 printk("%02x, IRQ %d", dev->dev_addr[i], irq);
1006 if (dev->if_port == PORT_TP)
1007 printk(", port TP.\n");
1008 else
1009 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
1011 return 0;
1013 err_register_netdev:
1014 iounmap(ioaddr);
1016 err_ioremap:
1017 pci_release_regions(pdev);
1018 pci_set_drvdata(pdev, NULL);
1020 err_pci_request_regions:
1021 free_netdev(dev);
1022 return i;
1026 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
1027 The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
1029 /* Delay between EEPROM clock transitions.
1030 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
1031 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
1032 made udelay() unreliable.
1033 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
1034 depricated.
1036 #define eeprom_delay(ee_addr) readl(ee_addr)
1038 #define EE_Write0 (EE_ChipSelect)
1039 #define EE_Write1 (EE_ChipSelect | EE_DataIn)
1041 /* The EEPROM commands include the alway-set leading bit. */
1042 enum EEPROM_Cmds {
1043 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1046 static int eeprom_read(void __iomem *addr, int location)
1048 int i;
1049 int retval = 0;
1050 void __iomem *ee_addr = addr + EECtrl;
1051 int read_cmd = location | EE_ReadCmd;
1053 writel(EE_Write0, ee_addr);
1055 /* Shift the read command bits out. */
1056 for (i = 10; i >= 0; i--) {
1057 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1058 writel(dataval, ee_addr);
1059 eeprom_delay(ee_addr);
1060 writel(dataval | EE_ShiftClk, ee_addr);
1061 eeprom_delay(ee_addr);
1063 writel(EE_ChipSelect, ee_addr);
1064 eeprom_delay(ee_addr);
1066 for (i = 0; i < 16; i++) {
1067 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1068 eeprom_delay(ee_addr);
1069 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1070 writel(EE_ChipSelect, ee_addr);
1071 eeprom_delay(ee_addr);
1074 /* Terminate the EEPROM access. */
1075 writel(EE_Write0, ee_addr);
1076 writel(0, ee_addr);
1077 return retval;
1080 /* MII transceiver control section.
1081 * The 83815 series has an internal transceiver, and we present the
1082 * internal management registers as if they were MII connected.
1083 * External Phy registers are referenced through the MII interface.
1086 /* clock transitions >= 20ns (25MHz)
1087 * One readl should be good to PCI @ 100MHz
1089 #define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1091 static int mii_getbit (struct net_device *dev)
1093 int data;
1094 void __iomem *ioaddr = ns_ioaddr(dev);
1096 writel(MII_ShiftClk, ioaddr + EECtrl);
1097 data = readl(ioaddr + EECtrl);
1098 writel(0, ioaddr + EECtrl);
1099 mii_delay(ioaddr);
1100 return (data & MII_Data)? 1 : 0;
1103 static void mii_send_bits (struct net_device *dev, u32 data, int len)
1105 u32 i;
1106 void __iomem *ioaddr = ns_ioaddr(dev);
1108 for (i = (1 << (len-1)); i; i >>= 1)
1110 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1111 writel(mdio_val, ioaddr + EECtrl);
1112 mii_delay(ioaddr);
1113 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1114 mii_delay(ioaddr);
1116 writel(0, ioaddr + EECtrl);
1117 mii_delay(ioaddr);
1120 static int miiport_read(struct net_device *dev, int phy_id, int reg)
1122 u32 cmd;
1123 int i;
1124 u32 retval = 0;
1126 /* Ensure sync */
1127 mii_send_bits (dev, 0xffffffff, 32);
1128 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1129 /* ST,OP = 0110'b for read operation */
1130 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1131 mii_send_bits (dev, cmd, 14);
1132 /* Turnaround */
1133 if (mii_getbit (dev))
1134 return 0;
1135 /* Read data */
1136 for (i = 0; i < 16; i++) {
1137 retval <<= 1;
1138 retval |= mii_getbit (dev);
1140 /* End cycle */
1141 mii_getbit (dev);
1142 return retval;
1145 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1147 u32 cmd;
1149 /* Ensure sync */
1150 mii_send_bits (dev, 0xffffffff, 32);
1151 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1152 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1153 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1154 mii_send_bits (dev, cmd, 32);
1155 /* End cycle */
1156 mii_getbit (dev);
1159 static int mdio_read(struct net_device *dev, int reg)
1161 struct netdev_private *np = netdev_priv(dev);
1162 void __iomem *ioaddr = ns_ioaddr(dev);
1164 /* The 83815 series has two ports:
1165 * - an internal transceiver
1166 * - an external mii bus
1168 if (dev->if_port == PORT_TP)
1169 return readw(ioaddr+BasicControl+(reg<<2));
1170 else
1171 return miiport_read(dev, np->phy_addr_external, reg);
1174 static void mdio_write(struct net_device *dev, int reg, u16 data)
1176 struct netdev_private *np = netdev_priv(dev);
1177 void __iomem *ioaddr = ns_ioaddr(dev);
1179 /* The 83815 series has an internal transceiver; handle separately */
1180 if (dev->if_port == PORT_TP)
1181 writew(data, ioaddr+BasicControl+(reg<<2));
1182 else
1183 miiport_write(dev, np->phy_addr_external, reg, data);
1186 static void init_phy_fixup(struct net_device *dev)
1188 struct netdev_private *np = netdev_priv(dev);
1189 void __iomem *ioaddr = ns_ioaddr(dev);
1190 int i;
1191 u32 cfg;
1192 u16 tmp;
1194 /* restore stuff lost when power was out */
1195 tmp = mdio_read(dev, MII_BMCR);
1196 if (np->autoneg == AUTONEG_ENABLE) {
1197 /* renegotiate if something changed */
1198 if ((tmp & BMCR_ANENABLE) == 0
1199 || np->advertising != mdio_read(dev, MII_ADVERTISE))
1201 /* turn on autonegotiation and force negotiation */
1202 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1203 mdio_write(dev, MII_ADVERTISE, np->advertising);
1205 } else {
1206 /* turn off auto negotiation, set speed and duplexity */
1207 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1208 if (np->speed == SPEED_100)
1209 tmp |= BMCR_SPEED100;
1210 if (np->duplex == DUPLEX_FULL)
1211 tmp |= BMCR_FULLDPLX;
1213 * Note: there is no good way to inform the link partner
1214 * that our capabilities changed. The user has to unplug
1215 * and replug the network cable after some changes, e.g.
1216 * after switching from 10HD, autoneg off to 100 HD,
1217 * autoneg off.
1220 mdio_write(dev, MII_BMCR, tmp);
1221 readl(ioaddr + ChipConfig);
1222 udelay(1);
1224 /* find out what phy this is */
1225 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1226 + mdio_read(dev, MII_PHYSID2);
1228 /* handle external phys here */
1229 switch (np->mii) {
1230 case PHYID_AM79C874:
1231 /* phy specific configuration for fibre/tp operation */
1232 tmp = mdio_read(dev, MII_MCTRL);
1233 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1234 if (dev->if_port == PORT_FIBRE)
1235 tmp |= MII_FX_SEL;
1236 else
1237 tmp |= MII_EN_SCRM;
1238 mdio_write(dev, MII_MCTRL, tmp);
1239 break;
1240 default:
1241 break;
1243 cfg = readl(ioaddr + ChipConfig);
1244 if (cfg & CfgExtPhy)
1245 return;
1247 /* On page 78 of the spec, they recommend some settings for "optimum
1248 performance" to be done in sequence. These settings optimize some
1249 of the 100Mbit autodetection circuitry. They say we only want to
1250 do this for rev C of the chip, but engineers at NSC (Bradley
1251 Kennedy) recommends always setting them. If you don't, you get
1252 errors on some autonegotiations that make the device unusable.
1254 It seems that the DSP needs a few usec to reinitialize after
1255 the start of the phy. Just retry writing these values until they
1256 stick.
1258 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1260 int dspcfg;
1261 writew(1, ioaddr + PGSEL);
1262 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1263 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1264 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1265 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1266 writew(np->dspcfg, ioaddr + DSPCFG);
1267 writew(SDCFG_VAL, ioaddr + SDCFG);
1268 writew(0, ioaddr + PGSEL);
1269 readl(ioaddr + ChipConfig);
1270 udelay(10);
1272 writew(1, ioaddr + PGSEL);
1273 dspcfg = readw(ioaddr + DSPCFG);
1274 writew(0, ioaddr + PGSEL);
1275 if (np->dspcfg == dspcfg)
1276 break;
1279 if (netif_msg_link(np)) {
1280 if (i==NATSEMI_HW_TIMEOUT) {
1281 printk(KERN_INFO
1282 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1283 dev->name, i*10);
1284 } else {
1285 printk(KERN_INFO
1286 "%s: DSPCFG accepted after %d usec.\n",
1287 dev->name, i*10);
1291 * Enable PHY Specific event based interrupts. Link state change
1292 * and Auto-Negotiation Completion are among the affected.
1293 * Read the intr status to clear it (needed for wake events).
1295 readw(ioaddr + MIntrStatus);
1296 writew(MICRIntEn, ioaddr + MIntrCtrl);
1299 static int switch_port_external(struct net_device *dev)
1301 struct netdev_private *np = netdev_priv(dev);
1302 void __iomem *ioaddr = ns_ioaddr(dev);
1303 u32 cfg;
1305 cfg = readl(ioaddr + ChipConfig);
1306 if (cfg & CfgExtPhy)
1307 return 0;
1309 if (netif_msg_link(np)) {
1310 printk(KERN_INFO "%s: switching to external transceiver.\n",
1311 dev->name);
1314 /* 1) switch back to external phy */
1315 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1316 readl(ioaddr + ChipConfig);
1317 udelay(1);
1319 /* 2) reset the external phy: */
1320 /* resetting the external PHY has been known to cause a hub supplying
1321 * power over Ethernet to kill the power. We don't want to kill
1322 * power to this computer, so we avoid resetting the phy.
1325 /* 3) reinit the phy fixup, it got lost during power down. */
1326 move_int_phy(dev, np->phy_addr_external);
1327 init_phy_fixup(dev);
1329 return 1;
1332 static int switch_port_internal(struct net_device *dev)
1334 struct netdev_private *np = netdev_priv(dev);
1335 void __iomem *ioaddr = ns_ioaddr(dev);
1336 int i;
1337 u32 cfg;
1338 u16 bmcr;
1340 cfg = readl(ioaddr + ChipConfig);
1341 if (!(cfg &CfgExtPhy))
1342 return 0;
1344 if (netif_msg_link(np)) {
1345 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1346 dev->name);
1348 /* 1) switch back to internal phy: */
1349 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1350 writel(cfg, ioaddr + ChipConfig);
1351 readl(ioaddr + ChipConfig);
1352 udelay(1);
1354 /* 2) reset the internal phy: */
1355 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1356 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1357 readl(ioaddr + ChipConfig);
1358 udelay(10);
1359 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1360 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1361 if (!(bmcr & BMCR_RESET))
1362 break;
1363 udelay(10);
1365 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1366 printk(KERN_INFO
1367 "%s: phy reset did not complete in %d usec.\n",
1368 dev->name, i*10);
1370 /* 3) reinit the phy fixup, it got lost during power down. */
1371 init_phy_fixup(dev);
1373 return 1;
1376 /* Scan for a PHY on the external mii bus.
1377 * There are two tricky points:
1378 * - Do not scan while the internal phy is enabled. The internal phy will
1379 * crash: e.g. reads from the DSPCFG register will return odd values and
1380 * the nasty random phy reset code will reset the nic every few seconds.
1381 * - The internal phy must be moved around, an external phy could
1382 * have the same address as the internal phy.
1384 static int find_mii(struct net_device *dev)
1386 struct netdev_private *np = netdev_priv(dev);
1387 int tmp;
1388 int i;
1389 int did_switch;
1391 /* Switch to external phy */
1392 did_switch = switch_port_external(dev);
1394 /* Scan the possible phy addresses:
1396 * PHY address 0 means that the phy is in isolate mode. Not yet
1397 * supported due to lack of test hardware. User space should
1398 * handle it through ethtool.
1400 for (i = 1; i <= 31; i++) {
1401 move_int_phy(dev, i);
1402 tmp = miiport_read(dev, i, MII_BMSR);
1403 if (tmp != 0xffff && tmp != 0x0000) {
1404 /* found something! */
1405 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1406 + mdio_read(dev, MII_PHYSID2);
1407 if (netif_msg_probe(np)) {
1408 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1409 pci_name(np->pci_dev), np->mii, i);
1411 break;
1414 /* And switch back to internal phy: */
1415 if (did_switch)
1416 switch_port_internal(dev);
1417 return i;
1420 /* CFG bits [13:16] [18:23] */
1421 #define CFG_RESET_SAVE 0xfde000
1422 /* WCSR bits [0:4] [9:10] */
1423 #define WCSR_RESET_SAVE 0x61f
1424 /* RFCR bits [20] [22] [27:31] */
1425 #define RFCR_RESET_SAVE 0xf8500000;
1427 static void natsemi_reset(struct net_device *dev)
1429 int i;
1430 u32 cfg;
1431 u32 wcsr;
1432 u32 rfcr;
1433 u16 pmatch[3];
1434 u16 sopass[3];
1435 struct netdev_private *np = netdev_priv(dev);
1436 void __iomem *ioaddr = ns_ioaddr(dev);
1439 * Resetting the chip causes some registers to be lost.
1440 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1441 * we save the state that would have been loaded from EEPROM
1442 * on a normal power-up (see the spec EEPROM map). This assumes
1443 * whoever calls this will follow up with init_registers() eventually.
1446 /* CFG */
1447 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1448 /* WCSR */
1449 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1450 /* RFCR */
1451 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1452 /* PMATCH */
1453 for (i = 0; i < 3; i++) {
1454 writel(i*2, ioaddr + RxFilterAddr);
1455 pmatch[i] = readw(ioaddr + RxFilterData);
1457 /* SOPAS */
1458 for (i = 0; i < 3; i++) {
1459 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1460 sopass[i] = readw(ioaddr + RxFilterData);
1463 /* now whack the chip */
1464 writel(ChipReset, ioaddr + ChipCmd);
1465 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1466 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1467 break;
1468 udelay(5);
1470 if (i==NATSEMI_HW_TIMEOUT) {
1471 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1472 dev->name, i*5);
1473 } else if (netif_msg_hw(np)) {
1474 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1475 dev->name, i*5);
1478 /* restore CFG */
1479 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1480 /* turn on external phy if it was selected */
1481 if (dev->if_port == PORT_TP)
1482 cfg &= ~(CfgExtPhy | CfgPhyDis);
1483 else
1484 cfg |= (CfgExtPhy | CfgPhyDis);
1485 writel(cfg, ioaddr + ChipConfig);
1486 /* restore WCSR */
1487 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1488 writel(wcsr, ioaddr + WOLCmd);
1489 /* read RFCR */
1490 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1491 /* restore PMATCH */
1492 for (i = 0; i < 3; i++) {
1493 writel(i*2, ioaddr + RxFilterAddr);
1494 writew(pmatch[i], ioaddr + RxFilterData);
1496 for (i = 0; i < 3; i++) {
1497 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1498 writew(sopass[i], ioaddr + RxFilterData);
1500 /* restore RFCR */
1501 writel(rfcr, ioaddr + RxFilterAddr);
1504 static void reset_rx(struct net_device *dev)
1506 int i;
1507 struct netdev_private *np = netdev_priv(dev);
1508 void __iomem *ioaddr = ns_ioaddr(dev);
1510 np->intr_status &= ~RxResetDone;
1512 writel(RxReset, ioaddr + ChipCmd);
1514 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1515 np->intr_status |= readl(ioaddr + IntrStatus);
1516 if (np->intr_status & RxResetDone)
1517 break;
1518 udelay(15);
1520 if (i==NATSEMI_HW_TIMEOUT) {
1521 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1522 dev->name, i*15);
1523 } else if (netif_msg_hw(np)) {
1524 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1525 dev->name, i*15);
1529 static void natsemi_reload_eeprom(struct net_device *dev)
1531 struct netdev_private *np = netdev_priv(dev);
1532 void __iomem *ioaddr = ns_ioaddr(dev);
1533 int i;
1535 writel(EepromReload, ioaddr + PCIBusCfg);
1536 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1537 udelay(50);
1538 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1539 break;
1541 if (i==NATSEMI_HW_TIMEOUT) {
1542 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1543 pci_name(np->pci_dev), i*50);
1544 } else if (netif_msg_hw(np)) {
1545 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1546 pci_name(np->pci_dev), i*50);
1550 static void natsemi_stop_rxtx(struct net_device *dev)
1552 void __iomem * ioaddr = ns_ioaddr(dev);
1553 struct netdev_private *np = netdev_priv(dev);
1554 int i;
1556 writel(RxOff | TxOff, ioaddr + ChipCmd);
1557 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1558 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1559 break;
1560 udelay(5);
1562 if (i==NATSEMI_HW_TIMEOUT) {
1563 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1564 dev->name, i*5);
1565 } else if (netif_msg_hw(np)) {
1566 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1567 dev->name, i*5);
1571 static int netdev_open(struct net_device *dev)
1573 struct netdev_private *np = netdev_priv(dev);
1574 void __iomem * ioaddr = ns_ioaddr(dev);
1575 int i;
1577 /* Reset the chip, just in case. */
1578 natsemi_reset(dev);
1580 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1581 if (i) return i;
1583 if (netif_msg_ifup(np))
1584 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1585 dev->name, dev->irq);
1586 i = alloc_ring(dev);
1587 if (i < 0) {
1588 free_irq(dev->irq, dev);
1589 return i;
1591 init_ring(dev);
1592 spin_lock_irq(&np->lock);
1593 init_registers(dev);
1594 /* now set the MAC address according to dev->dev_addr */
1595 for (i = 0; i < 3; i++) {
1596 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1598 writel(i*2, ioaddr + RxFilterAddr);
1599 writew(mac, ioaddr + RxFilterData);
1601 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1602 spin_unlock_irq(&np->lock);
1604 netif_start_queue(dev);
1606 if (netif_msg_ifup(np))
1607 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1608 dev->name, (int)readl(ioaddr + ChipCmd));
1610 /* Set the timer to check for link beat. */
1611 init_timer(&np->timer);
1612 np->timer.expires = jiffies + NATSEMI_TIMER_FREQ;
1613 np->timer.data = (unsigned long)dev;
1614 np->timer.function = &netdev_timer; /* timer handler */
1615 add_timer(&np->timer);
1617 return 0;
1620 static void do_cable_magic(struct net_device *dev)
1622 struct netdev_private *np = netdev_priv(dev);
1623 void __iomem *ioaddr = ns_ioaddr(dev);
1625 if (dev->if_port != PORT_TP)
1626 return;
1628 if (np->srr >= SRR_DP83816_A5)
1629 return;
1632 * 100 MBit links with short cables can trip an issue with the chip.
1633 * The problem manifests as lots of CRC errors and/or flickering
1634 * activity LED while idle. This process is based on instructions
1635 * from engineers at National.
1637 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1638 u16 data;
1640 writew(1, ioaddr + PGSEL);
1642 * coefficient visibility should already be enabled via
1643 * DSPCFG | 0x1000
1645 data = readw(ioaddr + TSTDAT) & 0xff;
1647 * the value must be negative, and within certain values
1648 * (these values all come from National)
1650 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1651 struct netdev_private *np = netdev_priv(dev);
1653 /* the bug has been triggered - fix the coefficient */
1654 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1655 /* lock the value */
1656 data = readw(ioaddr + DSPCFG);
1657 np->dspcfg = data | DSPCFG_LOCK;
1658 writew(np->dspcfg, ioaddr + DSPCFG);
1660 writew(0, ioaddr + PGSEL);
1664 static void undo_cable_magic(struct net_device *dev)
1666 u16 data;
1667 struct netdev_private *np = netdev_priv(dev);
1668 void __iomem * ioaddr = ns_ioaddr(dev);
1670 if (dev->if_port != PORT_TP)
1671 return;
1673 if (np->srr >= SRR_DP83816_A5)
1674 return;
1676 writew(1, ioaddr + PGSEL);
1677 /* make sure the lock bit is clear */
1678 data = readw(ioaddr + DSPCFG);
1679 np->dspcfg = data & ~DSPCFG_LOCK;
1680 writew(np->dspcfg, ioaddr + DSPCFG);
1681 writew(0, ioaddr + PGSEL);
1684 static void check_link(struct net_device *dev)
1686 struct netdev_private *np = netdev_priv(dev);
1687 void __iomem * ioaddr = ns_ioaddr(dev);
1688 int duplex;
1689 u16 bmsr;
1691 /* The link status field is latched: it remains low after a temporary
1692 * link failure until it's read. We need the current link status,
1693 * thus read twice.
1695 mdio_read(dev, MII_BMSR);
1696 bmsr = mdio_read(dev, MII_BMSR);
1698 if (!(bmsr & BMSR_LSTATUS)) {
1699 if (netif_carrier_ok(dev)) {
1700 if (netif_msg_link(np))
1701 printk(KERN_NOTICE "%s: link down.\n",
1702 dev->name);
1703 netif_carrier_off(dev);
1704 undo_cable_magic(dev);
1706 return;
1708 if (!netif_carrier_ok(dev)) {
1709 if (netif_msg_link(np))
1710 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1711 netif_carrier_on(dev);
1712 do_cable_magic(dev);
1715 duplex = np->full_duplex;
1716 if (!duplex) {
1717 if (bmsr & BMSR_ANEGCOMPLETE) {
1718 int tmp = mii_nway_result(
1719 np->advertising & mdio_read(dev, MII_LPA));
1720 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1721 duplex = 1;
1722 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1723 duplex = 1;
1726 /* if duplex is set then bit 28 must be set, too */
1727 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1728 if (netif_msg_link(np))
1729 printk(KERN_INFO
1730 "%s: Setting %s-duplex based on negotiated "
1731 "link capability.\n", dev->name,
1732 duplex ? "full" : "half");
1733 if (duplex) {
1734 np->rx_config |= RxAcceptTx;
1735 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1736 } else {
1737 np->rx_config &= ~RxAcceptTx;
1738 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1740 writel(np->tx_config, ioaddr + TxConfig);
1741 writel(np->rx_config, ioaddr + RxConfig);
1745 static void init_registers(struct net_device *dev)
1747 struct netdev_private *np = netdev_priv(dev);
1748 void __iomem * ioaddr = ns_ioaddr(dev);
1750 init_phy_fixup(dev);
1752 /* clear any interrupts that are pending, such as wake events */
1753 readl(ioaddr + IntrStatus);
1755 writel(np->ring_dma, ioaddr + RxRingPtr);
1756 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1757 ioaddr + TxRingPtr);
1759 /* Initialize other registers.
1760 * Configure the PCI bus bursts and FIFO thresholds.
1761 * Configure for standard, in-spec Ethernet.
1762 * Start with half-duplex. check_link will update
1763 * to the correct settings.
1766 /* DRTH: 2: start tx if 64 bytes are in the fifo
1767 * FLTH: 0x10: refill with next packet if 512 bytes are free
1768 * MXDMA: 0: up to 256 byte bursts.
1769 * MXDMA must be <= FLTH
1770 * ECRETRY=1
1771 * ATP=1
1773 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1774 TX_FLTH_VAL | TX_DRTH_VAL_START;
1775 writel(np->tx_config, ioaddr + TxConfig);
1777 /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1778 * MXDMA 0: up to 256 byte bursts
1780 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1781 /* if receive ring now has bigger buffers than normal, enable jumbo */
1782 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1783 np->rx_config |= RxAcceptLong;
1785 writel(np->rx_config, ioaddr + RxConfig);
1787 /* Disable PME:
1788 * The PME bit is initialized from the EEPROM contents.
1789 * PCI cards probably have PME disabled, but motherboard
1790 * implementations may have PME set to enable WakeOnLan.
1791 * With PME set the chip will scan incoming packets but
1792 * nothing will be written to memory. */
1793 np->SavedClkRun = readl(ioaddr + ClkRun);
1794 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1795 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1796 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1797 dev->name, readl(ioaddr + WOLCmd));
1800 check_link(dev);
1801 __set_rx_mode(dev);
1803 /* Enable interrupts by setting the interrupt mask. */
1804 writel(DEFAULT_INTR, ioaddr + IntrMask);
1805 writel(1, ioaddr + IntrEnable);
1807 writel(RxOn | TxOn, ioaddr + ChipCmd);
1808 writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1812 * netdev_timer:
1813 * Purpose:
1814 * 1) check for link changes. Usually they are handled by the MII interrupt
1815 * but it doesn't hurt to check twice.
1816 * 2) check for sudden death of the NIC:
1817 * It seems that a reference set for this chip went out with incorrect info,
1818 * and there exist boards that aren't quite right. An unexpected voltage
1819 * drop can cause the PHY to get itself in a weird state (basically reset).
1820 * NOTE: this only seems to affect revC chips.
1821 * 3) check of death of the RX path due to OOM
1823 static void netdev_timer(unsigned long data)
1825 struct net_device *dev = (struct net_device *)data;
1826 struct netdev_private *np = netdev_priv(dev);
1827 void __iomem * ioaddr = ns_ioaddr(dev);
1828 int next_tick = 5*HZ;
1830 if (netif_msg_timer(np)) {
1831 /* DO NOT read the IntrStatus register,
1832 * a read clears any pending interrupts.
1834 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1835 dev->name);
1838 if (dev->if_port == PORT_TP) {
1839 u16 dspcfg;
1841 spin_lock_irq(&np->lock);
1842 /* check for a nasty random phy-reset - use dspcfg as a flag */
1843 writew(1, ioaddr+PGSEL);
1844 dspcfg = readw(ioaddr+DSPCFG);
1845 writew(0, ioaddr+PGSEL);
1846 if (dspcfg != np->dspcfg) {
1847 if (!netif_queue_stopped(dev)) {
1848 spin_unlock_irq(&np->lock);
1849 if (netif_msg_hw(np))
1850 printk(KERN_NOTICE "%s: possible phy reset: "
1851 "re-initializing\n", dev->name);
1852 disable_irq(dev->irq);
1853 spin_lock_irq(&np->lock);
1854 natsemi_stop_rxtx(dev);
1855 dump_ring(dev);
1856 reinit_ring(dev);
1857 init_registers(dev);
1858 spin_unlock_irq(&np->lock);
1859 enable_irq(dev->irq);
1860 } else {
1861 /* hurry back */
1862 next_tick = HZ;
1863 spin_unlock_irq(&np->lock);
1865 } else {
1866 /* init_registers() calls check_link() for the above case */
1867 check_link(dev);
1868 spin_unlock_irq(&np->lock);
1870 } else {
1871 spin_lock_irq(&np->lock);
1872 check_link(dev);
1873 spin_unlock_irq(&np->lock);
1875 if (np->oom) {
1876 disable_irq(dev->irq);
1877 np->oom = 0;
1878 refill_rx(dev);
1879 enable_irq(dev->irq);
1880 if (!np->oom) {
1881 writel(RxOn, ioaddr + ChipCmd);
1882 } else {
1883 next_tick = 1;
1886 mod_timer(&np->timer, jiffies + next_tick);
1889 static void dump_ring(struct net_device *dev)
1891 struct netdev_private *np = netdev_priv(dev);
1893 if (netif_msg_pktdata(np)) {
1894 int i;
1895 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1896 for (i = 0; i < TX_RING_SIZE; i++) {
1897 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1898 i, np->tx_ring[i].next_desc,
1899 np->tx_ring[i].cmd_status,
1900 np->tx_ring[i].addr);
1902 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1903 for (i = 0; i < RX_RING_SIZE; i++) {
1904 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1905 i, np->rx_ring[i].next_desc,
1906 np->rx_ring[i].cmd_status,
1907 np->rx_ring[i].addr);
1912 static void tx_timeout(struct net_device *dev)
1914 struct netdev_private *np = netdev_priv(dev);
1915 void __iomem * ioaddr = ns_ioaddr(dev);
1917 disable_irq(dev->irq);
1918 spin_lock_irq(&np->lock);
1919 if (!np->hands_off) {
1920 if (netif_msg_tx_err(np))
1921 printk(KERN_WARNING
1922 "%s: Transmit timed out, status %#08x,"
1923 " resetting...\n",
1924 dev->name, readl(ioaddr + IntrStatus));
1925 dump_ring(dev);
1927 natsemi_reset(dev);
1928 reinit_ring(dev);
1929 init_registers(dev);
1930 } else {
1931 printk(KERN_WARNING
1932 "%s: tx_timeout while in hands_off state?\n",
1933 dev->name);
1935 spin_unlock_irq(&np->lock);
1936 enable_irq(dev->irq);
1938 dev->trans_start = jiffies;
1939 np->stats.tx_errors++;
1940 netif_wake_queue(dev);
1943 static int alloc_ring(struct net_device *dev)
1945 struct netdev_private *np = netdev_priv(dev);
1946 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1947 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1948 &np->ring_dma);
1949 if (!np->rx_ring)
1950 return -ENOMEM;
1951 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1952 return 0;
1955 static void refill_rx(struct net_device *dev)
1957 struct netdev_private *np = netdev_priv(dev);
1959 /* Refill the Rx ring buffers. */
1960 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1961 struct sk_buff *skb;
1962 int entry = np->dirty_rx % RX_RING_SIZE;
1963 if (np->rx_skbuff[entry] == NULL) {
1964 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1965 skb = dev_alloc_skb(buflen);
1966 np->rx_skbuff[entry] = skb;
1967 if (skb == NULL)
1968 break; /* Better luck next round. */
1969 skb->dev = dev; /* Mark as being used by this device. */
1970 np->rx_dma[entry] = pci_map_single(np->pci_dev,
1971 skb->data, buflen, PCI_DMA_FROMDEVICE);
1972 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1974 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1976 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1977 if (netif_msg_rx_err(np))
1978 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1979 np->oom = 1;
1983 static void set_bufsize(struct net_device *dev)
1985 struct netdev_private *np = netdev_priv(dev);
1986 if (dev->mtu <= ETH_DATA_LEN)
1987 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1988 else
1989 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1992 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1993 static void init_ring(struct net_device *dev)
1995 struct netdev_private *np = netdev_priv(dev);
1996 int i;
1998 /* 1) TX ring */
1999 np->dirty_tx = np->cur_tx = 0;
2000 for (i = 0; i < TX_RING_SIZE; i++) {
2001 np->tx_skbuff[i] = NULL;
2002 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
2003 +sizeof(struct netdev_desc)
2004 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
2005 np->tx_ring[i].cmd_status = 0;
2008 /* 2) RX ring */
2009 np->dirty_rx = 0;
2010 np->cur_rx = RX_RING_SIZE;
2011 np->oom = 0;
2012 set_bufsize(dev);
2014 np->rx_head_desc = &np->rx_ring[0];
2016 /* Please be carefull before changing this loop - at least gcc-2.95.1
2017 * miscompiles it otherwise.
2019 /* Initialize all Rx descriptors. */
2020 for (i = 0; i < RX_RING_SIZE; i++) {
2021 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
2022 +sizeof(struct netdev_desc)
2023 *((i+1)%RX_RING_SIZE));
2024 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2025 np->rx_skbuff[i] = NULL;
2027 refill_rx(dev);
2028 dump_ring(dev);
2031 static void drain_tx(struct net_device *dev)
2033 struct netdev_private *np = netdev_priv(dev);
2034 int i;
2036 for (i = 0; i < TX_RING_SIZE; i++) {
2037 if (np->tx_skbuff[i]) {
2038 pci_unmap_single(np->pci_dev,
2039 np->tx_dma[i], np->tx_skbuff[i]->len,
2040 PCI_DMA_TODEVICE);
2041 dev_kfree_skb(np->tx_skbuff[i]);
2042 np->stats.tx_dropped++;
2044 np->tx_skbuff[i] = NULL;
2048 static void drain_rx(struct net_device *dev)
2050 struct netdev_private *np = netdev_priv(dev);
2051 unsigned int buflen = np->rx_buf_sz;
2052 int i;
2054 /* Free all the skbuffs in the Rx queue. */
2055 for (i = 0; i < RX_RING_SIZE; i++) {
2056 np->rx_ring[i].cmd_status = 0;
2057 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
2058 if (np->rx_skbuff[i]) {
2059 pci_unmap_single(np->pci_dev,
2060 np->rx_dma[i], buflen,
2061 PCI_DMA_FROMDEVICE);
2062 dev_kfree_skb(np->rx_skbuff[i]);
2064 np->rx_skbuff[i] = NULL;
2068 static void drain_ring(struct net_device *dev)
2070 drain_rx(dev);
2071 drain_tx(dev);
2074 static void free_ring(struct net_device *dev)
2076 struct netdev_private *np = netdev_priv(dev);
2077 pci_free_consistent(np->pci_dev,
2078 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
2079 np->rx_ring, np->ring_dma);
2082 static void reinit_rx(struct net_device *dev)
2084 struct netdev_private *np = netdev_priv(dev);
2085 int i;
2087 /* RX Ring */
2088 np->dirty_rx = 0;
2089 np->cur_rx = RX_RING_SIZE;
2090 np->rx_head_desc = &np->rx_ring[0];
2091 /* Initialize all Rx descriptors. */
2092 for (i = 0; i < RX_RING_SIZE; i++)
2093 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2095 refill_rx(dev);
2098 static void reinit_ring(struct net_device *dev)
2100 struct netdev_private *np = netdev_priv(dev);
2101 int i;
2103 /* drain TX ring */
2104 drain_tx(dev);
2105 np->dirty_tx = np->cur_tx = 0;
2106 for (i=0;i<TX_RING_SIZE;i++)
2107 np->tx_ring[i].cmd_status = 0;
2109 reinit_rx(dev);
2112 static int start_tx(struct sk_buff *skb, struct net_device *dev)
2114 struct netdev_private *np = netdev_priv(dev);
2115 void __iomem * ioaddr = ns_ioaddr(dev);
2116 unsigned entry;
2118 /* Note: Ordering is important here, set the field with the
2119 "ownership" bit last, and only then increment cur_tx. */
2121 /* Calculate the next Tx descriptor entry. */
2122 entry = np->cur_tx % TX_RING_SIZE;
2124 np->tx_skbuff[entry] = skb;
2125 np->tx_dma[entry] = pci_map_single(np->pci_dev,
2126 skb->data,skb->len, PCI_DMA_TODEVICE);
2128 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2130 spin_lock_irq(&np->lock);
2132 if (!np->hands_off) {
2133 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2134 /* StrongARM: Explicitly cache flush np->tx_ring and
2135 * skb->data,skb->len. */
2136 wmb();
2137 np->cur_tx++;
2138 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2139 netdev_tx_done(dev);
2140 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2141 netif_stop_queue(dev);
2143 /* Wake the potentially-idle transmit channel. */
2144 writel(TxOn, ioaddr + ChipCmd);
2145 } else {
2146 dev_kfree_skb_irq(skb);
2147 np->stats.tx_dropped++;
2149 spin_unlock_irq(&np->lock);
2151 dev->trans_start = jiffies;
2153 if (netif_msg_tx_queued(np)) {
2154 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2155 dev->name, np->cur_tx, entry);
2157 return 0;
2160 static void netdev_tx_done(struct net_device *dev)
2162 struct netdev_private *np = netdev_priv(dev);
2164 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2165 int entry = np->dirty_tx % TX_RING_SIZE;
2166 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2167 break;
2168 if (netif_msg_tx_done(np))
2169 printk(KERN_DEBUG
2170 "%s: tx frame #%d finished, status %#08x.\n",
2171 dev->name, np->dirty_tx,
2172 le32_to_cpu(np->tx_ring[entry].cmd_status));
2173 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2174 np->stats.tx_packets++;
2175 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
2176 } else { /* Various Tx errors */
2177 int tx_status =
2178 le32_to_cpu(np->tx_ring[entry].cmd_status);
2179 if (tx_status & (DescTxAbort|DescTxExcColl))
2180 np->stats.tx_aborted_errors++;
2181 if (tx_status & DescTxFIFO)
2182 np->stats.tx_fifo_errors++;
2183 if (tx_status & DescTxCarrier)
2184 np->stats.tx_carrier_errors++;
2185 if (tx_status & DescTxOOWCol)
2186 np->stats.tx_window_errors++;
2187 np->stats.tx_errors++;
2189 pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2190 np->tx_skbuff[entry]->len,
2191 PCI_DMA_TODEVICE);
2192 /* Free the original skb. */
2193 dev_kfree_skb_irq(np->tx_skbuff[entry]);
2194 np->tx_skbuff[entry] = NULL;
2196 if (netif_queue_stopped(dev)
2197 && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2198 /* The ring is no longer full, wake queue. */
2199 netif_wake_queue(dev);
2203 /* The interrupt handler doesn't actually handle interrupts itself, it
2204 * schedules a NAPI poll if there is anything to do. */
2205 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
2207 struct net_device *dev = dev_instance;
2208 struct netdev_private *np = netdev_priv(dev);
2209 void __iomem * ioaddr = ns_ioaddr(dev);
2211 if (np->hands_off)
2212 return IRQ_NONE;
2214 /* Reading automatically acknowledges. */
2215 np->intr_status = readl(ioaddr + IntrStatus);
2217 if (netif_msg_intr(np))
2218 printk(KERN_DEBUG
2219 "%s: Interrupt, status %#08x, mask %#08x.\n",
2220 dev->name, np->intr_status,
2221 readl(ioaddr + IntrMask));
2223 if (!np->intr_status)
2224 return IRQ_NONE;
2226 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2228 if (netif_rx_schedule_prep(dev)) {
2229 /* Disable interrupts and register for poll */
2230 natsemi_irq_disable(dev);
2231 __netif_rx_schedule(dev);
2233 return IRQ_HANDLED;
2236 /* This is the NAPI poll routine. As well as the standard RX handling
2237 * it also handles all other interrupts that the chip might raise.
2239 static int natsemi_poll(struct net_device *dev, int *budget)
2241 struct netdev_private *np = netdev_priv(dev);
2242 void __iomem * ioaddr = ns_ioaddr(dev);
2244 int work_to_do = min(*budget, dev->quota);
2245 int work_done = 0;
2247 do {
2248 if (np->intr_status &
2249 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2250 spin_lock(&np->lock);
2251 netdev_tx_done(dev);
2252 spin_unlock(&np->lock);
2255 /* Abnormal error summary/uncommon events handlers. */
2256 if (np->intr_status & IntrAbnormalSummary)
2257 netdev_error(dev, np->intr_status);
2259 if (np->intr_status &
2260 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2261 IntrRxErr | IntrRxOverrun)) {
2262 netdev_rx(dev, &work_done, work_to_do);
2265 *budget -= work_done;
2266 dev->quota -= work_done;
2268 if (work_done >= work_to_do)
2269 return 1;
2271 np->intr_status = readl(ioaddr + IntrStatus);
2272 } while (np->intr_status);
2274 netif_rx_complete(dev);
2276 /* Reenable interrupts providing nothing is trying to shut
2277 * the chip down. */
2278 spin_lock(&np->lock);
2279 if (!np->hands_off && netif_running(dev))
2280 natsemi_irq_enable(dev);
2281 spin_unlock(&np->lock);
2283 return 0;
2286 /* This routine is logically part of the interrupt handler, but separated
2287 for clarity and better register allocation. */
2288 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2290 struct netdev_private *np = netdev_priv(dev);
2291 int entry = np->cur_rx % RX_RING_SIZE;
2292 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2293 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2294 unsigned int buflen = np->rx_buf_sz;
2295 void __iomem * ioaddr = ns_ioaddr(dev);
2297 /* If the driver owns the next entry it's a new packet. Send it up. */
2298 while (desc_status < 0) { /* e.g. & DescOwn */
2299 int pkt_len;
2300 if (netif_msg_rx_status(np))
2301 printk(KERN_DEBUG
2302 " netdev_rx() entry %d status was %#08x.\n",
2303 entry, desc_status);
2304 if (--boguscnt < 0)
2305 break;
2307 if (*work_done >= work_to_do)
2308 break;
2310 (*work_done)++;
2312 pkt_len = (desc_status & DescSizeMask) - 4;
2313 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2314 if (desc_status & DescMore) {
2315 if (netif_msg_rx_err(np))
2316 printk(KERN_WARNING
2317 "%s: Oversized(?) Ethernet "
2318 "frame spanned multiple "
2319 "buffers, entry %#08x "
2320 "status %#08x.\n", dev->name,
2321 np->cur_rx, desc_status);
2322 np->stats.rx_length_errors++;
2324 /* The RX state machine has probably
2325 * locked up beneath us. Follow the
2326 * reset procedure documented in
2327 * AN-1287. */
2329 spin_lock_irq(&np->lock);
2330 reset_rx(dev);
2331 reinit_rx(dev);
2332 writel(np->ring_dma, ioaddr + RxRingPtr);
2333 check_link(dev);
2334 spin_unlock_irq(&np->lock);
2336 /* We'll enable RX on exit from this
2337 * function. */
2338 break;
2340 } else {
2341 /* There was an error. */
2342 np->stats.rx_errors++;
2343 if (desc_status & (DescRxAbort|DescRxOver))
2344 np->stats.rx_over_errors++;
2345 if (desc_status & (DescRxLong|DescRxRunt))
2346 np->stats.rx_length_errors++;
2347 if (desc_status & (DescRxInvalid|DescRxAlign))
2348 np->stats.rx_frame_errors++;
2349 if (desc_status & DescRxCRC)
2350 np->stats.rx_crc_errors++;
2352 } else if (pkt_len > np->rx_buf_sz) {
2353 /* if this is the tail of a double buffer
2354 * packet, we've already counted the error
2355 * on the first part. Ignore the second half.
2357 } else {
2358 struct sk_buff *skb;
2359 /* Omit CRC size. */
2360 /* Check if the packet is long enough to accept
2361 * without copying to a minimally-sized skbuff. */
2362 if (pkt_len < rx_copybreak
2363 && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
2364 skb->dev = dev;
2365 /* 16 byte align the IP header */
2366 skb_reserve(skb, RX_OFFSET);
2367 pci_dma_sync_single_for_cpu(np->pci_dev,
2368 np->rx_dma[entry],
2369 buflen,
2370 PCI_DMA_FROMDEVICE);
2371 eth_copy_and_sum(skb,
2372 np->rx_skbuff[entry]->data, pkt_len, 0);
2373 skb_put(skb, pkt_len);
2374 pci_dma_sync_single_for_device(np->pci_dev,
2375 np->rx_dma[entry],
2376 buflen,
2377 PCI_DMA_FROMDEVICE);
2378 } else {
2379 pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2380 buflen, PCI_DMA_FROMDEVICE);
2381 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2382 np->rx_skbuff[entry] = NULL;
2384 skb->protocol = eth_type_trans(skb, dev);
2385 netif_receive_skb(skb);
2386 dev->last_rx = jiffies;
2387 np->stats.rx_packets++;
2388 np->stats.rx_bytes += pkt_len;
2390 entry = (++np->cur_rx) % RX_RING_SIZE;
2391 np->rx_head_desc = &np->rx_ring[entry];
2392 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2394 refill_rx(dev);
2396 /* Restart Rx engine if stopped. */
2397 if (np->oom)
2398 mod_timer(&np->timer, jiffies + 1);
2399 else
2400 writel(RxOn, ioaddr + ChipCmd);
2403 static void netdev_error(struct net_device *dev, int intr_status)
2405 struct netdev_private *np = netdev_priv(dev);
2406 void __iomem * ioaddr = ns_ioaddr(dev);
2408 spin_lock(&np->lock);
2409 if (intr_status & LinkChange) {
2410 u16 lpa = mdio_read(dev, MII_LPA);
2411 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE
2412 && netif_msg_link(np)) {
2413 printk(KERN_INFO
2414 "%s: Autonegotiation advertising"
2415 " %#04x partner %#04x.\n", dev->name,
2416 np->advertising, lpa);
2419 /* read MII int status to clear the flag */
2420 readw(ioaddr + MIntrStatus);
2421 check_link(dev);
2423 if (intr_status & StatsMax) {
2424 __get_stats(dev);
2426 if (intr_status & IntrTxUnderrun) {
2427 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2428 np->tx_config += TX_DRTH_VAL_INC;
2429 if (netif_msg_tx_err(np))
2430 printk(KERN_NOTICE
2431 "%s: increased tx threshold, txcfg %#08x.\n",
2432 dev->name, np->tx_config);
2433 } else {
2434 if (netif_msg_tx_err(np))
2435 printk(KERN_NOTICE
2436 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2437 dev->name, np->tx_config);
2439 writel(np->tx_config, ioaddr + TxConfig);
2441 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2442 int wol_status = readl(ioaddr + WOLCmd);
2443 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2444 dev->name, wol_status);
2446 if (intr_status & RxStatusFIFOOver) {
2447 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2448 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2449 dev->name);
2451 np->stats.rx_fifo_errors++;
2453 /* Hmmmmm, it's not clear how to recover from PCI faults. */
2454 if (intr_status & IntrPCIErr) {
2455 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2456 intr_status & IntrPCIErr);
2457 np->stats.tx_fifo_errors++;
2458 np->stats.rx_fifo_errors++;
2460 spin_unlock(&np->lock);
2463 static void __get_stats(struct net_device *dev)
2465 void __iomem * ioaddr = ns_ioaddr(dev);
2466 struct netdev_private *np = netdev_priv(dev);
2468 /* The chip only need report frame silently dropped. */
2469 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2470 np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2473 static struct net_device_stats *get_stats(struct net_device *dev)
2475 struct netdev_private *np = netdev_priv(dev);
2477 /* The chip only need report frame silently dropped. */
2478 spin_lock_irq(&np->lock);
2479 if (netif_running(dev) && !np->hands_off)
2480 __get_stats(dev);
2481 spin_unlock_irq(&np->lock);
2483 return &np->stats;
2486 #ifdef CONFIG_NET_POLL_CONTROLLER
2487 static void natsemi_poll_controller(struct net_device *dev)
2489 disable_irq(dev->irq);
2490 intr_handler(dev->irq, dev, NULL);
2491 enable_irq(dev->irq);
2493 #endif
2495 #define HASH_TABLE 0x200
2496 static void __set_rx_mode(struct net_device *dev)
2498 void __iomem * ioaddr = ns_ioaddr(dev);
2499 struct netdev_private *np = netdev_priv(dev);
2500 u8 mc_filter[64]; /* Multicast hash filter */
2501 u32 rx_mode;
2503 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2504 /* Unconditionally log net taps. */
2505 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2506 dev->name);
2507 rx_mode = RxFilterEnable | AcceptBroadcast
2508 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2509 } else if ((dev->mc_count > multicast_filter_limit)
2510 || (dev->flags & IFF_ALLMULTI)) {
2511 rx_mode = RxFilterEnable | AcceptBroadcast
2512 | AcceptAllMulticast | AcceptMyPhys;
2513 } else {
2514 struct dev_mc_list *mclist;
2515 int i;
2516 memset(mc_filter, 0, sizeof(mc_filter));
2517 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2518 i++, mclist = mclist->next) {
2519 int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
2520 mc_filter[i/8] |= (1 << (i & 0x07));
2522 rx_mode = RxFilterEnable | AcceptBroadcast
2523 | AcceptMulticast | AcceptMyPhys;
2524 for (i = 0; i < 64; i += 2) {
2525 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2526 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2527 ioaddr + RxFilterData);
2530 writel(rx_mode, ioaddr + RxFilterAddr);
2531 np->cur_rx_mode = rx_mode;
2534 static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2536 if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
2537 return -EINVAL;
2539 dev->mtu = new_mtu;
2541 /* synchronized against open : rtnl_lock() held by caller */
2542 if (netif_running(dev)) {
2543 struct netdev_private *np = netdev_priv(dev);
2544 void __iomem * ioaddr = ns_ioaddr(dev);
2546 disable_irq(dev->irq);
2547 spin_lock(&np->lock);
2548 /* stop engines */
2549 natsemi_stop_rxtx(dev);
2550 /* drain rx queue */
2551 drain_rx(dev);
2552 /* change buffers */
2553 set_bufsize(dev);
2554 reinit_rx(dev);
2555 writel(np->ring_dma, ioaddr + RxRingPtr);
2556 /* restart engines */
2557 writel(RxOn | TxOn, ioaddr + ChipCmd);
2558 spin_unlock(&np->lock);
2559 enable_irq(dev->irq);
2561 return 0;
2564 static void set_rx_mode(struct net_device *dev)
2566 struct netdev_private *np = netdev_priv(dev);
2567 spin_lock_irq(&np->lock);
2568 if (!np->hands_off)
2569 __set_rx_mode(dev);
2570 spin_unlock_irq(&np->lock);
2573 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2575 struct netdev_private *np = netdev_priv(dev);
2576 strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
2577 strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
2578 strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
2581 static int get_regs_len(struct net_device *dev)
2583 return NATSEMI_REGS_SIZE;
2586 static int get_eeprom_len(struct net_device *dev)
2588 struct netdev_private *np = netdev_priv(dev);
2589 return np->eeprom_size;
2592 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2594 struct netdev_private *np = netdev_priv(dev);
2595 spin_lock_irq(&np->lock);
2596 netdev_get_ecmd(dev, ecmd);
2597 spin_unlock_irq(&np->lock);
2598 return 0;
2601 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2603 struct netdev_private *np = netdev_priv(dev);
2604 int res;
2605 spin_lock_irq(&np->lock);
2606 res = netdev_set_ecmd(dev, ecmd);
2607 spin_unlock_irq(&np->lock);
2608 return res;
2611 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2613 struct netdev_private *np = netdev_priv(dev);
2614 spin_lock_irq(&np->lock);
2615 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2616 netdev_get_sopass(dev, wol->sopass);
2617 spin_unlock_irq(&np->lock);
2620 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2622 struct netdev_private *np = netdev_priv(dev);
2623 int res;
2624 spin_lock_irq(&np->lock);
2625 netdev_set_wol(dev, wol->wolopts);
2626 res = netdev_set_sopass(dev, wol->sopass);
2627 spin_unlock_irq(&np->lock);
2628 return res;
2631 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2633 struct netdev_private *np = netdev_priv(dev);
2634 regs->version = NATSEMI_REGS_VER;
2635 spin_lock_irq(&np->lock);
2636 netdev_get_regs(dev, buf);
2637 spin_unlock_irq(&np->lock);
2640 static u32 get_msglevel(struct net_device *dev)
2642 struct netdev_private *np = netdev_priv(dev);
2643 return np->msg_enable;
2646 static void set_msglevel(struct net_device *dev, u32 val)
2648 struct netdev_private *np = netdev_priv(dev);
2649 np->msg_enable = val;
2652 static int nway_reset(struct net_device *dev)
2654 int tmp;
2655 int r = -EINVAL;
2656 /* if autoneg is off, it's an error */
2657 tmp = mdio_read(dev, MII_BMCR);
2658 if (tmp & BMCR_ANENABLE) {
2659 tmp |= (BMCR_ANRESTART);
2660 mdio_write(dev, MII_BMCR, tmp);
2661 r = 0;
2663 return r;
2666 static u32 get_link(struct net_device *dev)
2668 /* LSTATUS is latched low until a read - so read twice */
2669 mdio_read(dev, MII_BMSR);
2670 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2673 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2675 struct netdev_private *np = netdev_priv(dev);
2676 u8 *eebuf;
2677 int res;
2679 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2680 if (!eebuf)
2681 return -ENOMEM;
2683 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2684 spin_lock_irq(&np->lock);
2685 res = netdev_get_eeprom(dev, eebuf);
2686 spin_unlock_irq(&np->lock);
2687 if (!res)
2688 memcpy(data, eebuf+eeprom->offset, eeprom->len);
2689 kfree(eebuf);
2690 return res;
2693 static struct ethtool_ops ethtool_ops = {
2694 .get_drvinfo = get_drvinfo,
2695 .get_regs_len = get_regs_len,
2696 .get_eeprom_len = get_eeprom_len,
2697 .get_settings = get_settings,
2698 .set_settings = set_settings,
2699 .get_wol = get_wol,
2700 .set_wol = set_wol,
2701 .get_regs = get_regs,
2702 .get_msglevel = get_msglevel,
2703 .set_msglevel = set_msglevel,
2704 .nway_reset = nway_reset,
2705 .get_link = get_link,
2706 .get_eeprom = get_eeprom,
2709 static int netdev_set_wol(struct net_device *dev, u32 newval)
2711 struct netdev_private *np = netdev_priv(dev);
2712 void __iomem * ioaddr = ns_ioaddr(dev);
2713 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2715 /* translate to bitmasks this chip understands */
2716 if (newval & WAKE_PHY)
2717 data |= WakePhy;
2718 if (newval & WAKE_UCAST)
2719 data |= WakeUnicast;
2720 if (newval & WAKE_MCAST)
2721 data |= WakeMulticast;
2722 if (newval & WAKE_BCAST)
2723 data |= WakeBroadcast;
2724 if (newval & WAKE_ARP)
2725 data |= WakeArp;
2726 if (newval & WAKE_MAGIC)
2727 data |= WakeMagic;
2728 if (np->srr >= SRR_DP83815_D) {
2729 if (newval & WAKE_MAGICSECURE) {
2730 data |= WakeMagicSecure;
2734 writel(data, ioaddr + WOLCmd);
2736 return 0;
2739 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2741 struct netdev_private *np = netdev_priv(dev);
2742 void __iomem * ioaddr = ns_ioaddr(dev);
2743 u32 regval = readl(ioaddr + WOLCmd);
2745 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2746 | WAKE_ARP | WAKE_MAGIC);
2748 if (np->srr >= SRR_DP83815_D) {
2749 /* SOPASS works on revD and higher */
2750 *supported |= WAKE_MAGICSECURE;
2752 *cur = 0;
2754 /* translate from chip bitmasks */
2755 if (regval & WakePhy)
2756 *cur |= WAKE_PHY;
2757 if (regval & WakeUnicast)
2758 *cur |= WAKE_UCAST;
2759 if (regval & WakeMulticast)
2760 *cur |= WAKE_MCAST;
2761 if (regval & WakeBroadcast)
2762 *cur |= WAKE_BCAST;
2763 if (regval & WakeArp)
2764 *cur |= WAKE_ARP;
2765 if (regval & WakeMagic)
2766 *cur |= WAKE_MAGIC;
2767 if (regval & WakeMagicSecure) {
2768 /* this can be on in revC, but it's broken */
2769 *cur |= WAKE_MAGICSECURE;
2772 return 0;
2775 static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2777 struct netdev_private *np = netdev_priv(dev);
2778 void __iomem * ioaddr = ns_ioaddr(dev);
2779 u16 *sval = (u16 *)newval;
2780 u32 addr;
2782 if (np->srr < SRR_DP83815_D) {
2783 return 0;
2786 /* enable writing to these registers by disabling the RX filter */
2787 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2788 addr &= ~RxFilterEnable;
2789 writel(addr, ioaddr + RxFilterAddr);
2791 /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2792 writel(addr | 0xa, ioaddr + RxFilterAddr);
2793 writew(sval[0], ioaddr + RxFilterData);
2795 writel(addr | 0xc, ioaddr + RxFilterAddr);
2796 writew(sval[1], ioaddr + RxFilterData);
2798 writel(addr | 0xe, ioaddr + RxFilterAddr);
2799 writew(sval[2], ioaddr + RxFilterData);
2801 /* re-enable the RX filter */
2802 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2804 return 0;
2807 static int netdev_get_sopass(struct net_device *dev, u8 *data)
2809 struct netdev_private *np = netdev_priv(dev);
2810 void __iomem * ioaddr = ns_ioaddr(dev);
2811 u16 *sval = (u16 *)data;
2812 u32 addr;
2814 if (np->srr < SRR_DP83815_D) {
2815 sval[0] = sval[1] = sval[2] = 0;
2816 return 0;
2819 /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2820 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2822 writel(addr | 0xa, ioaddr + RxFilterAddr);
2823 sval[0] = readw(ioaddr + RxFilterData);
2825 writel(addr | 0xc, ioaddr + RxFilterAddr);
2826 sval[1] = readw(ioaddr + RxFilterData);
2828 writel(addr | 0xe, ioaddr + RxFilterAddr);
2829 sval[2] = readw(ioaddr + RxFilterData);
2831 writel(addr, ioaddr + RxFilterAddr);
2833 return 0;
2836 static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2838 struct netdev_private *np = netdev_priv(dev);
2839 u32 tmp;
2841 ecmd->port = dev->if_port;
2842 ecmd->speed = np->speed;
2843 ecmd->duplex = np->duplex;
2844 ecmd->autoneg = np->autoneg;
2845 ecmd->advertising = 0;
2846 if (np->advertising & ADVERTISE_10HALF)
2847 ecmd->advertising |= ADVERTISED_10baseT_Half;
2848 if (np->advertising & ADVERTISE_10FULL)
2849 ecmd->advertising |= ADVERTISED_10baseT_Full;
2850 if (np->advertising & ADVERTISE_100HALF)
2851 ecmd->advertising |= ADVERTISED_100baseT_Half;
2852 if (np->advertising & ADVERTISE_100FULL)
2853 ecmd->advertising |= ADVERTISED_100baseT_Full;
2854 ecmd->supported = (SUPPORTED_Autoneg |
2855 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2856 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2857 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2858 ecmd->phy_address = np->phy_addr_external;
2860 * We intentionally report the phy address of the external
2861 * phy, even if the internal phy is used. This is necessary
2862 * to work around a deficiency of the ethtool interface:
2863 * It's only possible to query the settings of the active
2864 * port. Therefore
2865 * # ethtool -s ethX port mii
2866 * actually sends an ioctl to switch to port mii with the
2867 * settings that are used for the current active port.
2868 * If we would report a different phy address in this
2869 * command, then
2870 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2871 * would unintentionally change the phy address.
2873 * Fortunately the phy address doesn't matter with the
2874 * internal phy...
2877 /* set information based on active port type */
2878 switch (ecmd->port) {
2879 default:
2880 case PORT_TP:
2881 ecmd->advertising |= ADVERTISED_TP;
2882 ecmd->transceiver = XCVR_INTERNAL;
2883 break;
2884 case PORT_MII:
2885 ecmd->advertising |= ADVERTISED_MII;
2886 ecmd->transceiver = XCVR_EXTERNAL;
2887 break;
2888 case PORT_FIBRE:
2889 ecmd->advertising |= ADVERTISED_FIBRE;
2890 ecmd->transceiver = XCVR_EXTERNAL;
2891 break;
2894 /* if autonegotiation is on, try to return the active speed/duplex */
2895 if (ecmd->autoneg == AUTONEG_ENABLE) {
2896 ecmd->advertising |= ADVERTISED_Autoneg;
2897 tmp = mii_nway_result(
2898 np->advertising & mdio_read(dev, MII_LPA));
2899 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2900 ecmd->speed = SPEED_100;
2901 else
2902 ecmd->speed = SPEED_10;
2903 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2904 ecmd->duplex = DUPLEX_FULL;
2905 else
2906 ecmd->duplex = DUPLEX_HALF;
2909 /* ignore maxtxpkt, maxrxpkt for now */
2911 return 0;
2914 static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2916 struct netdev_private *np = netdev_priv(dev);
2918 if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
2919 return -EINVAL;
2920 if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
2921 return -EINVAL;
2922 if (ecmd->autoneg == AUTONEG_ENABLE) {
2923 if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
2924 ADVERTISED_10baseT_Full |
2925 ADVERTISED_100baseT_Half |
2926 ADVERTISED_100baseT_Full)) == 0) {
2927 return -EINVAL;
2929 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2930 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
2931 return -EINVAL;
2932 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2933 return -EINVAL;
2934 } else {
2935 return -EINVAL;
2939 * maxtxpkt, maxrxpkt: ignored for now.
2941 * transceiver:
2942 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2943 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2944 * selects based on ecmd->port.
2946 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2947 * phys that are connected to the mii bus. It's used to apply fibre
2948 * specific updates.
2951 /* WHEW! now lets bang some bits */
2953 /* save the parms */
2954 dev->if_port = ecmd->port;
2955 np->autoneg = ecmd->autoneg;
2956 np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
2957 if (np->autoneg == AUTONEG_ENABLE) {
2958 /* advertise only what has been requested */
2959 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2960 if (ecmd->advertising & ADVERTISED_10baseT_Half)
2961 np->advertising |= ADVERTISE_10HALF;
2962 if (ecmd->advertising & ADVERTISED_10baseT_Full)
2963 np->advertising |= ADVERTISE_10FULL;
2964 if (ecmd->advertising & ADVERTISED_100baseT_Half)
2965 np->advertising |= ADVERTISE_100HALF;
2966 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2967 np->advertising |= ADVERTISE_100FULL;
2968 } else {
2969 np->speed = ecmd->speed;
2970 np->duplex = ecmd->duplex;
2971 /* user overriding the initial full duplex parm? */
2972 if (np->duplex == DUPLEX_HALF)
2973 np->full_duplex = 0;
2976 /* get the right phy enabled */
2977 if (ecmd->port == PORT_TP)
2978 switch_port_internal(dev);
2979 else
2980 switch_port_external(dev);
2982 /* set parms and see how this affected our link status */
2983 init_phy_fixup(dev);
2984 check_link(dev);
2985 return 0;
2988 static int netdev_get_regs(struct net_device *dev, u8 *buf)
2990 int i;
2991 int j;
2992 u32 rfcr;
2993 u32 *rbuf = (u32 *)buf;
2994 void __iomem * ioaddr = ns_ioaddr(dev);
2996 /* read non-mii page 0 of registers */
2997 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
2998 rbuf[i] = readl(ioaddr + i*4);
3001 /* read current mii registers */
3002 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3003 rbuf[i] = mdio_read(dev, i & 0x1f);
3005 /* read only the 'magic' registers from page 1 */
3006 writew(1, ioaddr + PGSEL);
3007 rbuf[i++] = readw(ioaddr + PMDCSR);
3008 rbuf[i++] = readw(ioaddr + TSTDAT);
3009 rbuf[i++] = readw(ioaddr + DSPCFG);
3010 rbuf[i++] = readw(ioaddr + SDCFG);
3011 writew(0, ioaddr + PGSEL);
3013 /* read RFCR indexed registers */
3014 rfcr = readl(ioaddr + RxFilterAddr);
3015 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3016 writel(j*2, ioaddr + RxFilterAddr);
3017 rbuf[i++] = readw(ioaddr + RxFilterData);
3019 writel(rfcr, ioaddr + RxFilterAddr);
3021 /* the interrupt status is clear-on-read - see if we missed any */
3022 if (rbuf[4] & rbuf[5]) {
3023 printk(KERN_WARNING
3024 "%s: shoot, we dropped an interrupt (%#08x)\n",
3025 dev->name, rbuf[4] & rbuf[5]);
3028 return 0;
3031 #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3032 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3033 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3034 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3035 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3036 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3037 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3038 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3040 static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3042 int i;
3043 u16 *ebuf = (u16 *)buf;
3044 void __iomem * ioaddr = ns_ioaddr(dev);
3045 struct netdev_private *np = netdev_priv(dev);
3047 /* eeprom_read reads 16 bits, and indexes by 16 bits */
3048 for (i = 0; i < np->eeprom_size/2; i++) {
3049 ebuf[i] = eeprom_read(ioaddr, i);
3050 /* The EEPROM itself stores data bit-swapped, but eeprom_read
3051 * reads it back "sanely". So we swap it back here in order to
3052 * present it to userland as it is stored. */
3053 ebuf[i] = SWAP_BITS(ebuf[i]);
3055 return 0;
3058 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3060 struct mii_ioctl_data *data = if_mii(rq);
3061 struct netdev_private *np = netdev_priv(dev);
3063 switch(cmd) {
3064 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
3065 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
3066 data->phy_id = np->phy_addr_external;
3067 /* Fall Through */
3069 case SIOCGMIIREG: /* Read MII PHY register. */
3070 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
3071 /* The phy_id is not enough to uniquely identify
3072 * the intended target. Therefore the command is sent to
3073 * the given mii on the current port.
3075 if (dev->if_port == PORT_TP) {
3076 if ((data->phy_id & 0x1f) == np->phy_addr_external)
3077 data->val_out = mdio_read(dev,
3078 data->reg_num & 0x1f);
3079 else
3080 data->val_out = 0;
3081 } else {
3082 move_int_phy(dev, data->phy_id & 0x1f);
3083 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3084 data->reg_num & 0x1f);
3086 return 0;
3088 case SIOCSMIIREG: /* Write MII PHY register. */
3089 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
3090 if (!capable(CAP_NET_ADMIN))
3091 return -EPERM;
3092 if (dev->if_port == PORT_TP) {
3093 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3094 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3095 np->advertising = data->val_in;
3096 mdio_write(dev, data->reg_num & 0x1f,
3097 data->val_in);
3099 } else {
3100 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3101 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3102 np->advertising = data->val_in;
3104 move_int_phy(dev, data->phy_id & 0x1f);
3105 miiport_write(dev, data->phy_id & 0x1f,
3106 data->reg_num & 0x1f,
3107 data->val_in);
3109 return 0;
3110 default:
3111 return -EOPNOTSUPP;
3115 static void enable_wol_mode(struct net_device *dev, int enable_intr)
3117 void __iomem * ioaddr = ns_ioaddr(dev);
3118 struct netdev_private *np = netdev_priv(dev);
3120 if (netif_msg_wol(np))
3121 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3122 dev->name);
3124 /* For WOL we must restart the rx process in silent mode.
3125 * Write NULL to the RxRingPtr. Only possible if
3126 * rx process is stopped
3128 writel(0, ioaddr + RxRingPtr);
3130 /* read WoL status to clear */
3131 readl(ioaddr + WOLCmd);
3133 /* PME on, clear status */
3134 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3136 /* and restart the rx process */
3137 writel(RxOn, ioaddr + ChipCmd);
3139 if (enable_intr) {
3140 /* enable the WOL interrupt.
3141 * Could be used to send a netlink message.
3143 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3144 writel(1, ioaddr + IntrEnable);
3148 static int netdev_close(struct net_device *dev)
3150 void __iomem * ioaddr = ns_ioaddr(dev);
3151 struct netdev_private *np = netdev_priv(dev);
3153 if (netif_msg_ifdown(np))
3154 printk(KERN_DEBUG
3155 "%s: Shutting down ethercard, status was %#04x.\n",
3156 dev->name, (int)readl(ioaddr + ChipCmd));
3157 if (netif_msg_pktdata(np))
3158 printk(KERN_DEBUG
3159 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3160 dev->name, np->cur_tx, np->dirty_tx,
3161 np->cur_rx, np->dirty_rx);
3164 * FIXME: what if someone tries to close a device
3165 * that is suspended?
3166 * Should we reenable the nic to switch to
3167 * the final WOL settings?
3170 del_timer_sync(&np->timer);
3171 disable_irq(dev->irq);
3172 spin_lock_irq(&np->lock);
3173 natsemi_irq_disable(dev);
3174 np->hands_off = 1;
3175 spin_unlock_irq(&np->lock);
3176 enable_irq(dev->irq);
3178 free_irq(dev->irq, dev);
3180 /* Interrupt disabled, interrupt handler released,
3181 * queue stopped, timer deleted, rtnl_lock held
3182 * All async codepaths that access the driver are disabled.
3184 spin_lock_irq(&np->lock);
3185 np->hands_off = 0;
3186 readl(ioaddr + IntrMask);
3187 readw(ioaddr + MIntrStatus);
3189 /* Freeze Stats */
3190 writel(StatsFreeze, ioaddr + StatsCtrl);
3192 /* Stop the chip's Tx and Rx processes. */
3193 natsemi_stop_rxtx(dev);
3195 __get_stats(dev);
3196 spin_unlock_irq(&np->lock);
3198 /* clear the carrier last - an interrupt could reenable it otherwise */
3199 netif_carrier_off(dev);
3200 netif_stop_queue(dev);
3202 dump_ring(dev);
3203 drain_ring(dev);
3204 free_ring(dev);
3207 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3208 if (wol) {
3209 /* restart the NIC in WOL mode.
3210 * The nic must be stopped for this.
3212 enable_wol_mode(dev, 0);
3213 } else {
3214 /* Restore PME enable bit unmolested */
3215 writel(np->SavedClkRun, ioaddr + ClkRun);
3218 return 0;
3222 static void __devexit natsemi_remove1 (struct pci_dev *pdev)
3224 struct net_device *dev = pci_get_drvdata(pdev);
3225 void __iomem * ioaddr = ns_ioaddr(dev);
3227 unregister_netdev (dev);
3228 pci_release_regions (pdev);
3229 iounmap(ioaddr);
3230 free_netdev (dev);
3231 pci_set_drvdata(pdev, NULL);
3234 #ifdef CONFIG_PM
3237 * The ns83815 chip doesn't have explicit RxStop bits.
3238 * Kicking the Rx or Tx process for a new packet reenables the Rx process
3239 * of the nic, thus this function must be very careful:
3241 * suspend/resume synchronization:
3242 * entry points:
3243 * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3244 * start_tx, tx_timeout
3246 * No function accesses the hardware without checking np->hands_off.
3247 * the check occurs under spin_lock_irq(&np->lock);
3248 * exceptions:
3249 * * netdev_ioctl: noncritical access.
3250 * * netdev_open: cannot happen due to the device_detach
3251 * * netdev_close: doesn't hurt.
3252 * * netdev_timer: timer stopped by natsemi_suspend.
3253 * * intr_handler: doesn't acquire the spinlock. suspend calls
3254 * disable_irq() to enforce synchronization.
3255 * * natsemi_poll: checks before reenabling interrupts. suspend
3256 * sets hands_off, disables interrupts and then waits with
3257 * netif_poll_disable().
3259 * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3262 static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3264 struct net_device *dev = pci_get_drvdata (pdev);
3265 struct netdev_private *np = netdev_priv(dev);
3266 void __iomem * ioaddr = ns_ioaddr(dev);
3268 rtnl_lock();
3269 if (netif_running (dev)) {
3270 del_timer_sync(&np->timer);
3272 disable_irq(dev->irq);
3273 spin_lock_irq(&np->lock);
3275 writel(0, ioaddr + IntrEnable);
3276 np->hands_off = 1;
3277 natsemi_stop_rxtx(dev);
3278 netif_stop_queue(dev);
3280 spin_unlock_irq(&np->lock);
3281 enable_irq(dev->irq);
3283 netif_poll_disable(dev);
3285 /* Update the error counts. */
3286 __get_stats(dev);
3288 /* pci_power_off(pdev, -1); */
3289 drain_ring(dev);
3291 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3292 /* Restore PME enable bit */
3293 if (wol) {
3294 /* restart the NIC in WOL mode.
3295 * The nic must be stopped for this.
3296 * FIXME: use the WOL interrupt
3298 enable_wol_mode(dev, 0);
3299 } else {
3300 /* Restore PME enable bit unmolested */
3301 writel(np->SavedClkRun, ioaddr + ClkRun);
3305 netif_device_detach(dev);
3306 rtnl_unlock();
3307 return 0;
3311 static int natsemi_resume (struct pci_dev *pdev)
3313 struct net_device *dev = pci_get_drvdata (pdev);
3314 struct netdev_private *np = netdev_priv(dev);
3316 rtnl_lock();
3317 if (netif_device_present(dev))
3318 goto out;
3319 if (netif_running(dev)) {
3320 BUG_ON(!np->hands_off);
3321 pci_enable_device(pdev);
3322 /* pci_power_on(pdev); */
3324 natsemi_reset(dev);
3325 init_ring(dev);
3326 disable_irq(dev->irq);
3327 spin_lock_irq(&np->lock);
3328 np->hands_off = 0;
3329 init_registers(dev);
3330 netif_device_attach(dev);
3331 spin_unlock_irq(&np->lock);
3332 enable_irq(dev->irq);
3334 mod_timer(&np->timer, jiffies + 1*HZ);
3336 netif_device_attach(dev);
3337 netif_poll_enable(dev);
3338 out:
3339 rtnl_unlock();
3340 return 0;
3343 #endif /* CONFIG_PM */
3345 static struct pci_driver natsemi_driver = {
3346 .name = DRV_NAME,
3347 .id_table = natsemi_pci_tbl,
3348 .probe = natsemi_probe1,
3349 .remove = __devexit_p(natsemi_remove1),
3350 #ifdef CONFIG_PM
3351 .suspend = natsemi_suspend,
3352 .resume = natsemi_resume,
3353 #endif
3356 static int __init natsemi_init_mod (void)
3358 /* when a module, this is printed whether or not devices are found in probe */
3359 #ifdef MODULE
3360 printk(version);
3361 #endif
3363 return pci_module_init (&natsemi_driver);
3366 static void __exit natsemi_exit_mod (void)
3368 pci_unregister_driver (&natsemi_driver);
3371 module_init(natsemi_init_mod);
3372 module_exit(natsemi_exit_mod);