1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
26 -----------------------------------------------------------
28 Linux kernel-specific changes:
31 - Use PCI driver interface
36 - Merge Becker version 0.15
38 LK1.1.3 (Andrew Morton)
42 - Merge Becker version 1.03
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
48 LK1.2.2 (Ion Badulescu)
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
64 LK1.2.7 (Ion Badulescu)
66 - Made more functions static and __init
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
124 LK1.4.0 (Ion Badulescu)
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
132 TODO: bugfixes (no bugs known as of right now)
135 #define DRV_NAME "starfire"
136 #define DRV_VERSION "1.03+LK1.4.1"
137 #define DRV_RELDATE "February 10, 2002"
139 #include <linux/config.h>
140 #include <linux/version.h>
141 #include <linux/module.h>
142 #include <linux/kernel.h>
143 #include <linux/pci.h>
144 #include <linux/netdevice.h>
145 #include <linux/etherdevice.h>
146 #include <linux/init.h>
147 #include <linux/delay.h>
148 #include <asm/processor.h> /* Processor type for cache alignment. */
149 #include <asm/uaccess.h>
153 * Adaptec's license for their drivers (which is where I got the
154 * firmware files) does not allow one to redistribute them. Thus, we can't
155 * include the firmware with this driver.
157 * However, should a legal-to-distribute firmware become available,
158 * the driver developer would need only to obtain the firmware in the
159 * form of a C header file.
160 * Once that's done, the #undef below must be changed into a #define
161 * for this driver to really use the firmware. Note that Rx/Tx
162 * hardware TCP checksumming is not possible without the firmware.
164 * WANTED: legal firmware to include with this GPL'd driver.
168 * The current frame processor firmware fails to checksum a fragment
169 * of length 1. If and when this is fixed, the #define below can be removed.
171 #define HAS_BROKEN_FIRMWARE
173 * Define this if using the driver with the zero-copy patch
175 #if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
180 #include "starfire_firmware.h"
181 #endif /* HAS_FIRMWARE */
183 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
187 #ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
188 #undef HAVE_NETDEV_POLL
191 /* The user-configurable values.
192 These may be modified when a driver module is loaded.*/
194 /* Used for tuning interrupt latency vs. overhead. */
195 static int intr_latency
;
196 static int small_frames
;
198 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
199 static int max_interrupt_work
= 20;
201 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
202 The Starfire has a 512 element hash table based on the Ethernet CRC. */
203 static int multicast_filter_limit
= 512;
204 /* Whether to do TCP/UDP checksums in hardware */
206 static int enable_hw_cksum
= 1;
208 static int enable_hw_cksum
= 0;
211 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
213 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
214 * Setting to > 1518 effectively disables this feature.
217 * The ia64 doesn't allow for unaligned loads even of integers being
218 * misaligned on a 2 byte boundary. Thus always force copying of
219 * packets as the starfire doesn't allow for misaligned DMAs ;-(
222 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
223 * at least, having unaligned frames leads to a rather serious performance
226 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
227 static int rx_copybreak
= PKT_BUF_SZ
;
229 static int rx_copybreak
/* = 0 */;
232 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
234 #define DMA_BURST_SIZE 64
236 #define DMA_BURST_SIZE 128
239 /* Used to pass the media type, etc.
240 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
241 The media type is usually passed in 'options[]'.
242 These variables are deprecated, use ethtool instead. -Ion
244 #define MAX_UNITS 8 /* More are supported, limit only on options */
245 static int options
[MAX_UNITS
] = {0, };
246 static int full_duplex
[MAX_UNITS
] = {0, };
248 /* Operational parameters that are set at compile time. */
250 /* The "native" ring sizes are either 256 or 2048.
251 However in some modes a descriptor may be marked to wrap the ring earlier.
253 #define RX_RING_SIZE 256
254 #define TX_RING_SIZE 32
255 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
256 #define DONE_Q_SIZE 1024
257 /* All queues must be aligned on a 256-byte boundary */
258 #define QUEUE_ALIGN 256
260 #if RX_RING_SIZE > 256
261 #define RX_Q_ENTRIES Rx2048QEntries
263 #define RX_Q_ENTRIES Rx256QEntries
266 /* Operational parameters that usually are not changed. */
267 /* Time in jiffies before concluding the transmitter is hung. */
268 #define TX_TIMEOUT (2 * HZ)
272 * We need a much better method to determine if dma_addr_t is 64-bit.
274 #if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
275 /* 64-bit dma_addr_t */
276 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
277 #define cpu_to_dma(x) cpu_to_le64(x)
278 #define dma_to_cpu(x) le64_to_cpu(x)
279 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
280 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
281 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
282 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
283 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
284 #else /* 32-bit dma_addr_t */
285 #define cpu_to_dma(x) cpu_to_le32(x)
286 #define dma_to_cpu(x) le32_to_cpu(x)
287 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
288 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
289 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
290 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
291 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
295 #define skb_first_frag_len(skb) skb_headlen(skb)
296 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
297 #else /* not MAX_SKB_FRAGS */
298 #define skb_first_frag_len(skb) (skb->len)
299 #define skb_num_frags(skb) 1
300 #endif /* not MAX_SKB_FRAGS */
302 /* 2.2.x compatibility code */
303 #if LINUX_VERSION_CODE < 0x20300
305 #include "starfire-kcomp22.h"
307 #else /* LINUX_VERSION_CODE > 0x20300 */
309 #include <linux/crc32.h>
310 #include <linux/ethtool.h>
311 #include <linux/mii.h>
313 #include <linux/if_vlan.h>
315 #define COMPAT_MOD_INC_USE_COUNT
316 #define COMPAT_MOD_DEC_USE_COUNT
318 #define init_tx_timer(dev, func, timeout) \
319 dev->tx_timeout = func; \
320 dev->watchdog_timeo = timeout;
321 #define kick_tx_timer(dev, func, timeout)
323 #define netif_start_if(dev)
324 #define netif_stop_if(dev)
326 #define PCI_SLOT_NAME(pci_dev) pci_name(pci_dev)
328 #endif /* LINUX_VERSION_CODE > 0x20300 */
330 #ifdef HAVE_NETDEV_POLL
331 #define init_poll(dev) \
332 dev->poll = &netdev_poll; \
333 dev->weight = max_interrupt_work;
334 #define netdev_rx(dev, ioaddr) \
337 if (netif_rx_schedule_prep(dev)) { \
338 __netif_rx_schedule(dev); \
339 intr_enable = readl(ioaddr + IntrEnable); \
340 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
341 writel(intr_enable, ioaddr + IntrEnable); \
342 readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
344 /* Paranoia check */ \
345 intr_enable = readl(ioaddr + IntrEnable); \
346 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
347 printk("%s: interrupt while in polling mode!\n", dev->name); \
348 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
349 writel(intr_enable, ioaddr + IntrEnable); \
353 #define netdev_receive_skb(skb) netif_receive_skb(skb)
354 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
355 static int netdev_poll(struct net_device
*dev
, int *budget
);
356 #else /* not HAVE_NETDEV_POLL */
357 #define init_poll(dev)
358 #define netdev_receive_skb(skb) netif_rx(skb)
359 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
360 #define netdev_rx(dev, ioaddr) \
362 int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
363 __netdev_rx(dev, "a);\
365 #endif /* not HAVE_NETDEV_POLL */
366 /* end of compatibility code */
369 /* These identify the driver base version and may not be removed. */
370 static char version
[] __devinitdata
=
371 KERN_INFO
"starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
372 KERN_INFO
" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION
", " DRV_RELDATE
")\n";
374 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
375 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
376 MODULE_LICENSE("GPL");
378 MODULE_PARM(max_interrupt_work
, "i");
379 MODULE_PARM(mtu
, "i");
380 MODULE_PARM(debug
, "i");
381 MODULE_PARM(rx_copybreak
, "i");
382 MODULE_PARM(intr_latency
, "i");
383 MODULE_PARM(small_frames
, "i");
384 MODULE_PARM(options
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
385 MODULE_PARM(full_duplex
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
386 MODULE_PARM(enable_hw_cksum
, "i");
387 MODULE_PARM_DESC(max_interrupt_work
, "Maximum events handled per interrupt");
388 MODULE_PARM_DESC(mtu
, "MTU (all boards)");
389 MODULE_PARM_DESC(debug
, "Debug level (0-6)");
390 MODULE_PARM_DESC(rx_copybreak
, "Copy breakpoint for copy-only-tiny-frames");
391 MODULE_PARM_DESC(intr_latency
, "Maximum interrupt latency, in microseconds");
392 MODULE_PARM_DESC(small_frames
, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
393 MODULE_PARM_DESC(options
, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
394 MODULE_PARM_DESC(full_duplex
, "Deprecated: Forced full-duplex setting (0/1)");
395 MODULE_PARM_DESC(enable_hw_cksum
, "Enable/disable hardware cksum support (0/1)");
400 I. Board Compatibility
402 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
404 II. Board-specific settings
406 III. Driver operation
410 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
411 ring sizes are set fixed by the hardware, but may optionally be wrapped
412 earlier by the END bit in the descriptor.
413 This driver uses that hardware queue size for the Rx ring, where a large
414 number of entries has no ill effect beyond increases the potential backlog.
415 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
416 disables the queue layer priority ordering and we have no mechanism to
417 utilize the hardware two-level priority queue. When modifying the
418 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
421 IIIb/c. Transmit/Receive Structure
423 See the Adaptec manual for the many possible structures, and options for
424 each structure. There are far too many to document all of them here.
426 For transmit this driver uses type 0/1 transmit descriptors (depending
427 on the 32/64 bitness of the architecture), and relies on automatic
428 minimum-length padding. It does not use the completion queue
429 consumer index, but instead checks for non-zero status entries.
431 For receive this driver uses type 0/1/2/3 receive descriptors. The driver
432 allocates full frame size skbuffs for the Rx ring buffers, so all frames
433 should fit in a single descriptor. The driver does not use the completion
434 queue consumer index, but instead checks for non-zero status entries.
436 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
437 is allocated and the frame is copied to the new skbuff. When the incoming
438 frame is larger, the skbuff is passed directly up the protocol stack.
439 Buffers consumed this way are replaced by newly allocated skbuffs in a later
442 A notable aspect of operation is that unaligned buffers are not permitted by
443 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
444 isn't longword aligned, which may cause problems on some machine
445 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
446 the frame into a new skbuff unconditionally. Copied frames are put into the
447 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
449 IIId. Synchronization
451 The driver runs as two independent, single-threaded flows of control. One
452 is the send-packet routine, which enforces single-threaded use by the
453 dev->tbusy flag. The other thread is the interrupt handler, which is single
454 threaded by the hardware and interrupt handling software.
456 The send packet thread has partial control over the Tx ring and the netif_queue
457 status. If the number of free Tx slots in the ring falls below a certain number
458 (currently hardcoded to 4), it signals the upper layer to stop the queue.
460 The interrupt handler has exclusive control over the Rx ring and records stats
461 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
462 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
463 number of free Tx slow is above the threshold, it signals the upper layer to
470 The Adaptec Starfire manuals, available only from Adaptec.
471 http://www.scyld.com/expert/100mbps.html
472 http://www.scyld.com/expert/NWay.html
476 - StopOnPerr is broken, don't enable
477 - Hardware ethernet padding exposes random data, perform software padding
478 instead (unverified -- works correctly for all the hardware I have)
484 enum chip_capability_flags
{CanHaveMII
=1, };
490 static struct pci_device_id starfire_pci_tbl
[] = {
491 { 0x9004, 0x6915, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, CH_6915
},
494 MODULE_DEVICE_TABLE(pci
, starfire_pci_tbl
);
496 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
497 static struct chip_info
{
500 } netdrv_tbl
[] __devinitdata
= {
501 { "Adaptec Starfire 6915", CanHaveMII
},
505 /* Offsets to the device registers.
506 Unlike software-only systems, device drivers interact with complex hardware.
507 It's not useful to define symbolic names for every register bit in the
508 device. The name can only partially document the semantics and make
509 the driver longer and more difficult to read.
510 In general, only the important configuration values or bits changed
511 multiple times should be defined symbolically.
513 enum register_offsets
{
514 PCIDeviceConfig
=0x50040, GenCtrl
=0x50070, IntrTimerCtrl
=0x50074,
515 IntrClear
=0x50080, IntrStatus
=0x50084, IntrEnable
=0x50088,
516 MIICtrl
=0x52000, TxStationAddr
=0x50120, EEPROMCtrl
=0x51000,
517 GPIOCtrl
=0x5008C, TxDescCtrl
=0x50090,
518 TxRingPtr
=0x50098, HiPriTxRingPtr
=0x50094, /* Low and High priority. */
519 TxRingHiAddr
=0x5009C, /* 64 bit address extension. */
520 TxProducerIdx
=0x500A0, TxConsumerIdx
=0x500A4,
522 CompletionHiAddr
=0x500B4, TxCompletionAddr
=0x500B8,
523 RxCompletionAddr
=0x500BC, RxCompletionQ2Addr
=0x500C0,
524 CompletionQConsumerIdx
=0x500C4, RxDMACtrl
=0x500D0,
525 RxDescQCtrl
=0x500D4, RxDescQHiAddr
=0x500DC, RxDescQAddr
=0x500E0,
526 RxDescQIdx
=0x500E8, RxDMAStatus
=0x500F0, RxFilterMode
=0x500F4,
527 TxMode
=0x55000, VlanType
=0x55064,
528 PerfFilterTable
=0x56000, HashTable
=0x56100,
529 TxGfpMem
=0x58000, RxGfpMem
=0x5a000,
533 * Bits in the interrupt status/mask registers.
534 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
535 * enables all the interrupt sources that are or'ed into those status bits.
537 enum intr_status_bits
{
538 IntrLinkChange
=0xf0000000, IntrStatsMax
=0x08000000,
539 IntrAbnormalSummary
=0x02000000, IntrGeneralTimer
=0x01000000,
540 IntrSoftware
=0x800000, IntrRxComplQ1Low
=0x400000,
541 IntrTxComplQLow
=0x200000, IntrPCI
=0x100000,
542 IntrDMAErr
=0x080000, IntrTxDataLow
=0x040000,
543 IntrRxComplQ2Low
=0x020000, IntrRxDescQ1Low
=0x010000,
544 IntrNormalSummary
=0x8000, IntrTxDone
=0x4000,
545 IntrTxDMADone
=0x2000, IntrTxEmpty
=0x1000,
546 IntrEarlyRxQ2
=0x0800, IntrEarlyRxQ1
=0x0400,
547 IntrRxQ2Done
=0x0200, IntrRxQ1Done
=0x0100,
548 IntrRxGFPDead
=0x80, IntrRxDescQ2Low
=0x40,
549 IntrNoTxCsum
=0x20, IntrTxBadID
=0x10,
550 IntrHiPriTxBadID
=0x08, IntrRxGfp
=0x04,
551 IntrTxGfp
=0x02, IntrPCIPad
=0x01,
553 IntrRxDone
=IntrRxQ2Done
| IntrRxQ1Done
,
554 IntrRxEmpty
=IntrRxDescQ1Low
| IntrRxDescQ2Low
,
555 IntrNormalMask
=0xff00, IntrAbnormalMask
=0x3ff00fe,
558 /* Bits in the RxFilterMode register. */
560 AcceptBroadcast
=0x04, AcceptAllMulticast
=0x02, AcceptAll
=0x01,
561 AcceptMulticast
=0x10, PerfectFilter
=0x40, HashFilter
=0x30,
562 PerfectFilterVlan
=0x80, MinVLANPrio
=0xE000, VlanMode
=0x0200,
566 /* Bits in the TxMode register */
568 MiiSoftReset
=0x8000, MIILoopback
=0x4000,
569 TxFlowEnable
=0x0800, RxFlowEnable
=0x0400,
570 PadEnable
=0x04, FullDuplex
=0x02, HugeFrame
=0x01,
573 /* Bits in the TxDescCtrl register. */
575 TxDescSpaceUnlim
=0x00, TxDescSpace32
=0x10, TxDescSpace64
=0x20,
576 TxDescSpace128
=0x30, TxDescSpace256
=0x40,
577 TxDescType0
=0x00, TxDescType1
=0x01, TxDescType2
=0x02,
578 TxDescType3
=0x03, TxDescType4
=0x04,
579 TxNoDMACompletion
=0x08,
580 TxDescQAddr64bit
=0x80, TxDescQAddr32bit
=0,
581 TxHiPriFIFOThreshShift
=24, TxPadLenShift
=16,
582 TxDMABurstSizeShift
=8,
585 /* Bits in the RxDescQCtrl register. */
587 RxBufferLenShift
=16, RxMinDescrThreshShift
=0,
588 RxPrefetchMode
=0x8000, RxVariableQ
=0x2000,
589 Rx2048QEntries
=0x4000, Rx256QEntries
=0,
590 RxDescAddr64bit
=0x1000, RxDescAddr32bit
=0,
591 RxDescQAddr64bit
=0x0100, RxDescQAddr32bit
=0,
592 RxDescSpace4
=0x000, RxDescSpace8
=0x100,
593 RxDescSpace16
=0x200, RxDescSpace32
=0x300,
594 RxDescSpace64
=0x400, RxDescSpace128
=0x500,
598 /* Bits in the RxDMACtrl register. */
599 enum rx_dmactrl_bits
{
600 RxReportBadFrames
=0x80000000, RxDMAShortFrames
=0x40000000,
601 RxDMABadFrames
=0x20000000, RxDMACrcErrorFrames
=0x10000000,
602 RxDMAControlFrame
=0x08000000, RxDMAPauseFrame
=0x04000000,
603 RxChecksumIgnore
=0, RxChecksumRejectTCPUDP
=0x02000000,
604 RxChecksumRejectTCPOnly
=0x01000000,
605 RxCompletionQ2Enable
=0x800000,
606 RxDMAQ2Disable
=0, RxDMAQ2FPOnly
=0x100000,
607 RxDMAQ2SmallPkt
=0x200000, RxDMAQ2HighPrio
=0x300000,
608 RxDMAQ2NonIP
=0x400000,
609 RxUseBackupQueue
=0x080000, RxDMACRC
=0x040000,
610 RxEarlyIntThreshShift
=12, RxHighPrioThreshShift
=8,
614 /* Bits in the RxCompletionAddr register */
616 RxComplQAddr64bit
=0x80, RxComplQAddr32bit
=0,
617 RxComplProducerWrEn
=0x40,
618 RxComplType0
=0x00, RxComplType1
=0x10,
619 RxComplType2
=0x20, RxComplType3
=0x30,
620 RxComplThreshShift
=0,
623 /* Bits in the TxCompletionAddr register */
625 TxComplQAddr64bit
=0x80, TxComplQAddr32bit
=0,
626 TxComplProducerWrEn
=0x40,
627 TxComplIntrStatus
=0x20,
628 CommonQueueMode
=0x10,
629 TxComplThreshShift
=0,
632 /* Bits in the GenCtrl register */
634 RxEnable
=0x05, TxEnable
=0x0a,
635 RxGFPEnable
=0x10, TxGFPEnable
=0x20,
638 /* Bits in the IntrTimerCtrl register */
639 enum intr_ctrl_bits
{
640 Timer10X
=0x800, EnableIntrMasking
=0x60, SmallFrameBypass
=0x100,
641 SmallFrame64
=0, SmallFrame128
=0x200, SmallFrame256
=0x400, SmallFrame512
=0x600,
642 IntrLatencyMask
=0x1f,
645 /* The Rx and Tx buffer descriptors. */
646 struct starfire_rx_desc
{
650 RxDescValid
=1, RxDescEndRing
=2,
653 /* Completion queue entry. */
654 struct short_rx_done_desc
{
655 u32 status
; /* Low 16 bits is length. */
657 struct basic_rx_done_desc
{
658 u32 status
; /* Low 16 bits is length. */
662 struct csum_rx_done_desc
{
663 u32 status
; /* Low 16 bits is length. */
664 u16 csum
; /* Partial checksum */
667 struct full_rx_done_desc
{
668 u32 status
; /* Low 16 bits is length. */
672 u16 csum
; /* partial checksum */
675 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
678 typedef struct full_rx_done_desc rx_done_desc
;
679 #define RxComplType RxComplType3
680 #else /* not VLAN_SUPPORT */
681 typedef struct csum_rx_done_desc rx_done_desc
;
682 #define RxComplType RxComplType2
683 #endif /* not VLAN_SUPPORT */
684 #else /* not HAS_FIRMWARE */
686 typedef struct basic_rx_done_desc rx_done_desc
;
687 #define RxComplType RxComplType1
688 #else /* not VLAN_SUPPORT */
689 typedef struct short_rx_done_desc rx_done_desc
;
690 #define RxComplType RxComplType0
691 #endif /* not VLAN_SUPPORT */
692 #endif /* not HAS_FIRMWARE */
695 RxOK
=0x20000000, RxFIFOErr
=0x10000000, RxBufQ2
=0x08000000,
698 /* Type 1 Tx descriptor. */
699 struct starfire_tx_desc_1
{
700 u32 status
; /* Upper bits are status, lower 16 length. */
704 /* Type 2 Tx descriptor. */
705 struct starfire_tx_desc_2
{
706 u32 status
; /* Upper bits are status, lower 16 length. */
712 typedef struct starfire_tx_desc_2 starfire_tx_desc
;
713 #define TX_DESC_TYPE TxDescType2
714 #else /* not ADDR_64BITS */
715 typedef struct starfire_tx_desc_1 starfire_tx_desc
;
716 #define TX_DESC_TYPE TxDescType1
717 #endif /* not ADDR_64BITS */
718 #define TX_DESC_SPACING TxDescSpaceUnlim
722 TxCRCEn
=0x01000000, TxDescIntr
=0x08000000,
723 TxRingWrap
=0x04000000, TxCalTCP
=0x02000000,
725 struct tx_done_desc
{
726 u32 status
; /* timestamp, index. */
728 u32 intrstatus
; /* interrupt status */
732 struct rx_ring_info
{
736 struct tx_ring_info
{
739 unsigned int used_slots
;
743 struct netdev_private
{
744 /* Descriptor rings first for alignment. */
745 struct starfire_rx_desc
*rx_ring
;
746 starfire_tx_desc
*tx_ring
;
747 dma_addr_t rx_ring_dma
;
748 dma_addr_t tx_ring_dma
;
749 /* The addresses of rx/tx-in-place skbuffs. */
750 struct rx_ring_info rx_info
[RX_RING_SIZE
];
751 struct tx_ring_info tx_info
[TX_RING_SIZE
];
752 /* Pointers to completion queues (full pages). */
753 rx_done_desc
*rx_done_q
;
754 dma_addr_t rx_done_q_dma
;
755 unsigned int rx_done
;
756 struct tx_done_desc
*tx_done_q
;
757 dma_addr_t tx_done_q_dma
;
758 unsigned int tx_done
;
759 struct net_device_stats stats
;
760 struct pci_dev
*pci_dev
;
762 struct vlan_group
*vlgrp
;
765 dma_addr_t queue_mem_dma
;
766 size_t queue_mem_size
;
768 /* Frequently used values: keep some adjacent for cache effect. */
770 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
771 unsigned int cur_tx
, dirty_tx
, reap_tx
;
772 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
773 /* These values keep track of the transceiver/media in use. */
774 int speed100
; /* Set if speed == 100MBit. */
778 /* MII transceiver section. */
779 struct mii_if_info mii_if
; /* MII lib hooks/info */
780 int phy_cnt
; /* MII device addresses. */
781 unsigned char phys
[PHY_CNT
]; /* MII device addresses. */
785 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
786 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
787 static int netdev_open(struct net_device
*dev
);
788 static void check_duplex(struct net_device
*dev
);
789 static void tx_timeout(struct net_device
*dev
);
790 static void init_ring(struct net_device
*dev
);
791 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
792 static irqreturn_t
intr_handler(int irq
, void *dev_instance
, struct pt_regs
*regs
);
793 static void netdev_error(struct net_device
*dev
, int intr_status
);
794 static int __netdev_rx(struct net_device
*dev
, int *quota
);
795 static void refill_rx_ring(struct net_device
*dev
);
796 static void netdev_error(struct net_device
*dev
, int intr_status
);
797 static void set_rx_mode(struct net_device
*dev
);
798 static struct net_device_stats
*get_stats(struct net_device
*dev
);
799 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
800 static int netdev_close(struct net_device
*dev
);
801 static void netdev_media_change(struct net_device
*dev
);
805 static void netdev_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
807 struct netdev_private
*np
= dev
->priv
;
809 spin_lock(&np
->lock
);
811 printk("%s: Setting vlgrp to %p\n", dev
->name
, grp
);
814 spin_unlock(&np
->lock
);
817 static void netdev_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
819 struct netdev_private
*np
= dev
->priv
;
821 spin_lock(&np
->lock
);
823 printk("%s: Adding vlanid %d to vlan filter\n", dev
->name
, vid
);
825 spin_unlock(&np
->lock
);
828 static void netdev_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
830 struct netdev_private
*np
= dev
->priv
;
832 spin_lock(&np
->lock
);
834 printk("%s: removing vlanid %d from vlan filter\n", dev
->name
, vid
);
836 np
->vlgrp
->vlan_devices
[vid
] = NULL
;
838 spin_unlock(&np
->lock
);
840 #endif /* VLAN_SUPPORT */
843 static int __devinit
starfire_init_one(struct pci_dev
*pdev
,
844 const struct pci_device_id
*ent
)
846 struct netdev_private
*np
;
847 int i
, irq
, option
, chip_idx
= ent
->driver_data
;
848 struct net_device
*dev
;
849 static int card_idx
= -1;
851 int drv_flags
, io_size
;
854 /* when built into the kernel, we only print version if device is found */
856 static int printed_version
;
857 if (!printed_version
++)
863 if (pci_enable_device (pdev
))
866 ioaddr
= pci_resource_start(pdev
, 0);
867 io_size
= pci_resource_len(pdev
, 0);
868 if (!ioaddr
|| ((pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) == 0)) {
869 printk(KERN_ERR DRV_NAME
" %d: no PCI MEM resources, aborting\n", card_idx
);
873 dev
= alloc_etherdev(sizeof(*np
));
875 printk(KERN_ERR DRV_NAME
" %d: cannot alloc etherdev, aborting\n", card_idx
);
878 SET_MODULE_OWNER(dev
);
879 SET_NETDEV_DEV(dev
, &pdev
->dev
);
883 if (pci_request_regions (pdev
, DRV_NAME
)) {
884 printk(KERN_ERR DRV_NAME
" %d: cannot reserve PCI resources, aborting\n", card_idx
);
885 goto err_out_free_netdev
;
888 /* ioremap is borken in Linux-2.2.x/sparc64 */
889 #if !defined(CONFIG_SPARC64) || LINUX_VERSION_CODE > 0x20300
890 ioaddr
= (long) ioremap(ioaddr
, io_size
);
892 printk(KERN_ERR DRV_NAME
" %d: cannot remap %#x @ %#lx, aborting\n",
893 card_idx
, io_size
, ioaddr
);
894 goto err_out_free_res
;
896 #endif /* !CONFIG_SPARC64 || Linux 2.3.0+ */
898 pci_set_master(pdev
);
900 /* enable MWI -- it vastly improves Rx performance on sparc64 */
904 dev
->features
|= NETIF_F_SG
;
905 #endif /* MAX_SKB_FRAGS */
907 /* Starfire can do TCP/UDP checksumming */
909 dev
->features
|= NETIF_F_IP_CSUM
;
910 #endif /* ZEROCOPY */
912 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
913 dev
->vlan_rx_register
= netdev_vlan_rx_register
;
914 dev
->vlan_rx_add_vid
= netdev_vlan_rx_add_vid
;
915 dev
->vlan_rx_kill_vid
= netdev_vlan_rx_kill_vid
;
916 #endif /* VLAN_RX_KILL_VID */
918 dev
->features
|= NETIF_F_HIGHDMA
;
919 #endif /* ADDR_64BITS */
921 /* Serial EEPROM reads are hidden by the hardware. */
922 for (i
= 0; i
< 6; i
++)
923 dev
->dev_addr
[i
] = readb(ioaddr
+ EEPROMCtrl
+ 20 - i
);
925 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
927 for (i
= 0; i
< 0x20; i
++)
929 (unsigned int)readb(ioaddr
+ EEPROMCtrl
+ i
),
930 i
% 16 != 15 ? " " : "\n");
933 /* Issue soft reset */
934 writel(MiiSoftReset
, ioaddr
+ TxMode
);
936 writel(0, ioaddr
+ TxMode
);
938 /* Reset the chip to erase previous misconfiguration. */
939 writel(1, ioaddr
+ PCIDeviceConfig
);
941 while (--boguscnt
> 0) {
943 if ((readl(ioaddr
+ PCIDeviceConfig
) & 1) == 0)
947 printk("%s: chipset reset never completed!\n", dev
->name
);
948 /* wait a little longer */
951 dev
->base_addr
= ioaddr
;
955 spin_lock_init(&np
->lock
);
956 pci_set_drvdata(pdev
, dev
);
960 np
->mii_if
.dev
= dev
;
961 np
->mii_if
.mdio_read
= mdio_read
;
962 np
->mii_if
.mdio_write
= mdio_write
;
963 np
->mii_if
.phy_id_mask
= 0x1f;
964 np
->mii_if
.reg_num_mask
= 0x1f;
966 drv_flags
= netdrv_tbl
[chip_idx
].drv_flags
;
968 option
= card_idx
< MAX_UNITS
? options
[card_idx
] : 0;
970 option
= dev
->mem_start
;
972 /* The lower four bits are the media type. */
974 np
->mii_if
.full_duplex
= 1;
976 if (card_idx
< MAX_UNITS
&& full_duplex
[card_idx
] > 0)
977 np
->mii_if
.full_duplex
= 1;
979 if (np
->mii_if
.full_duplex
)
980 np
->mii_if
.force_media
= 1;
982 np
->mii_if
.force_media
= 0;
985 /* timer resolution is 128 * 0.8us */
986 np
->intr_timer_ctrl
= (((intr_latency
* 10) / 1024) & IntrLatencyMask
) |
987 Timer10X
| EnableIntrMasking
;
989 if (small_frames
> 0) {
990 np
->intr_timer_ctrl
|= SmallFrameBypass
;
991 switch (small_frames
) {
993 np
->intr_timer_ctrl
|= SmallFrame64
;
996 np
->intr_timer_ctrl
|= SmallFrame128
;
999 np
->intr_timer_ctrl
|= SmallFrame256
;
1002 np
->intr_timer_ctrl
|= SmallFrame512
;
1003 if (small_frames
> 512)
1004 printk("Adjusting small_frames down to 512\n");
1009 /* The chip-specific entries in the device structure. */
1010 dev
->open
= &netdev_open
;
1011 dev
->hard_start_xmit
= &start_tx
;
1012 init_tx_timer(dev
, tx_timeout
, TX_TIMEOUT
);
1014 dev
->stop
= &netdev_close
;
1015 dev
->get_stats
= &get_stats
;
1016 dev
->set_multicast_list
= &set_rx_mode
;
1017 dev
->do_ioctl
= &netdev_ioctl
;
1022 if (register_netdev(dev
))
1023 goto err_out_cleardev
;
1025 printk(KERN_INFO
"%s: %s at %#lx, ",
1026 dev
->name
, netdrv_tbl
[chip_idx
].name
, ioaddr
);
1027 for (i
= 0; i
< 5; i
++)
1028 printk("%2.2x:", dev
->dev_addr
[i
]);
1029 printk("%2.2x, IRQ %d.\n", dev
->dev_addr
[i
], irq
);
1031 if (drv_flags
& CanHaveMII
) {
1032 int phy
, phy_idx
= 0;
1034 for (phy
= 0; phy
< 32 && phy_idx
< PHY_CNT
; phy
++) {
1035 mdio_write(dev
, phy
, MII_BMCR
, BMCR_RESET
);
1038 while (--boguscnt
> 0)
1039 if ((mdio_read(dev
, phy
, MII_BMCR
) & BMCR_RESET
) == 0)
1041 if (boguscnt
== 0) {
1042 printk("%s: PHY reset never completed!\n", dev
->name
);
1045 mii_status
= mdio_read(dev
, phy
, MII_BMSR
);
1046 if (mii_status
!= 0) {
1047 np
->phys
[phy_idx
++] = phy
;
1048 np
->mii_if
.advertising
= mdio_read(dev
, phy
, MII_ADVERTISE
);
1049 printk(KERN_INFO
"%s: MII PHY found at address %d, status "
1050 "%#4.4x advertising %#4.4x.\n",
1051 dev
->name
, phy
, mii_status
, np
->mii_if
.advertising
);
1052 /* there can be only one PHY on-board */
1056 np
->phy_cnt
= phy_idx
;
1057 if (np
->phy_cnt
> 0)
1058 np
->mii_if
.phy_id
= np
->phys
[0];
1060 memset(&np
->mii_if
, 0, sizeof(np
->mii_if
));
1063 printk(KERN_INFO
"%s: scatter-gather and hardware TCP cksumming %s.\n",
1064 dev
->name
, enable_hw_cksum
? "enabled" : "disabled");
1068 pci_set_drvdata(pdev
, NULL
);
1069 iounmap((void *)ioaddr
);
1071 pci_release_regions (pdev
);
1072 err_out_free_netdev
:
1078 /* Read the MII Management Data I/O (MDIO) interfaces. */
1079 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
1081 long mdio_addr
= dev
->base_addr
+ MIICtrl
+ (phy_id
<<7) + (location
<<2);
1082 int result
, boguscnt
=1000;
1083 /* ??? Should we add a busy-wait here? */
1085 result
= readl(mdio_addr
);
1086 while ((result
& 0xC0000000) != 0x80000000 && --boguscnt
> 0);
1089 if ((result
& 0xffff) == 0xffff)
1091 return result
& 0xffff;
1095 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
1097 long mdio_addr
= dev
->base_addr
+ MIICtrl
+ (phy_id
<<7) + (location
<<2);
1098 writel(value
, mdio_addr
);
1099 /* The busy-wait will occur before a read. */
1103 static int netdev_open(struct net_device
*dev
)
1105 struct netdev_private
*np
= dev
->priv
;
1106 long ioaddr
= dev
->base_addr
;
1108 size_t tx_done_q_size
, rx_done_q_size
, tx_ring_size
, rx_ring_size
;
1110 /* Do we ever need to reset the chip??? */
1112 COMPAT_MOD_INC_USE_COUNT
;
1114 retval
= request_irq(dev
->irq
, &intr_handler
, SA_SHIRQ
, dev
->name
, dev
);
1116 COMPAT_MOD_DEC_USE_COUNT
;
1120 /* Disable the Rx and Tx, and reset the chip. */
1121 writel(0, ioaddr
+ GenCtrl
);
1122 writel(1, ioaddr
+ PCIDeviceConfig
);
1124 printk(KERN_DEBUG
"%s: netdev_open() irq %d.\n",
1125 dev
->name
, dev
->irq
);
1127 /* Allocate the various queues. */
1128 if (np
->queue_mem
== 0) {
1129 tx_done_q_size
= ((sizeof(struct tx_done_desc
) * DONE_Q_SIZE
+ QUEUE_ALIGN
- 1) / QUEUE_ALIGN
) * QUEUE_ALIGN
;
1130 rx_done_q_size
= ((sizeof(rx_done_desc
) * DONE_Q_SIZE
+ QUEUE_ALIGN
- 1) / QUEUE_ALIGN
) * QUEUE_ALIGN
;
1131 tx_ring_size
= ((sizeof(starfire_tx_desc
) * TX_RING_SIZE
+ QUEUE_ALIGN
- 1) / QUEUE_ALIGN
) * QUEUE_ALIGN
;
1132 rx_ring_size
= sizeof(struct starfire_rx_desc
) * RX_RING_SIZE
;
1133 np
->queue_mem_size
= tx_done_q_size
+ rx_done_q_size
+ tx_ring_size
+ rx_ring_size
;
1134 np
->queue_mem
= pci_alloc_consistent(np
->pci_dev
, np
->queue_mem_size
, &np
->queue_mem_dma
);
1135 if (np
->queue_mem
== 0) {
1136 COMPAT_MOD_DEC_USE_COUNT
;
1140 np
->tx_done_q
= np
->queue_mem
;
1141 np
->tx_done_q_dma
= np
->queue_mem_dma
;
1142 np
->rx_done_q
= (void *) np
->tx_done_q
+ tx_done_q_size
;
1143 np
->rx_done_q_dma
= np
->tx_done_q_dma
+ tx_done_q_size
;
1144 np
->tx_ring
= (void *) np
->rx_done_q
+ rx_done_q_size
;
1145 np
->tx_ring_dma
= np
->rx_done_q_dma
+ rx_done_q_size
;
1146 np
->rx_ring
= (void *) np
->tx_ring
+ tx_ring_size
;
1147 np
->rx_ring_dma
= np
->tx_ring_dma
+ tx_ring_size
;
1150 /* Start with no carrier, it gets adjusted later */
1151 netif_carrier_off(dev
);
1153 /* Set the size of the Rx buffers. */
1154 writel((np
->rx_buf_sz
<< RxBufferLenShift
) |
1155 (0 << RxMinDescrThreshShift
) |
1156 RxPrefetchMode
| RxVariableQ
|
1158 RX_DESC_Q_ADDR_SIZE
| RX_DESC_ADDR_SIZE
|
1160 ioaddr
+ RxDescQCtrl
);
1162 /* Set up the Rx DMA controller. */
1163 writel(RxChecksumIgnore
|
1164 (0 << RxEarlyIntThreshShift
) |
1165 (6 << RxHighPrioThreshShift
) |
1166 ((DMA_BURST_SIZE
/ 32) << RxBurstSizeShift
),
1167 ioaddr
+ RxDMACtrl
);
1169 /* Set Tx descriptor */
1170 writel((2 << TxHiPriFIFOThreshShift
) |
1171 (0 << TxPadLenShift
) |
1172 ((DMA_BURST_SIZE
/ 32) << TxDMABurstSizeShift
) |
1173 TX_DESC_Q_ADDR_SIZE
|
1174 TX_DESC_SPACING
| TX_DESC_TYPE
,
1175 ioaddr
+ TxDescCtrl
);
1177 writel( (np
->queue_mem_dma
>> 16) >> 16, ioaddr
+ RxDescQHiAddr
);
1178 writel( (np
->queue_mem_dma
>> 16) >> 16, ioaddr
+ TxRingHiAddr
);
1179 writel( (np
->queue_mem_dma
>> 16) >> 16, ioaddr
+ CompletionHiAddr
);
1180 writel(np
->rx_ring_dma
, ioaddr
+ RxDescQAddr
);
1181 writel(np
->tx_ring_dma
, ioaddr
+ TxRingPtr
);
1183 writel(np
->tx_done_q_dma
, ioaddr
+ TxCompletionAddr
);
1184 writel(np
->rx_done_q_dma
|
1186 (0 << RxComplThreshShift
),
1187 ioaddr
+ RxCompletionAddr
);
1190 printk(KERN_DEBUG
"%s: Filling in the station address.\n", dev
->name
);
1192 /* Fill both the Tx SA register and the Rx perfect filter. */
1193 for (i
= 0; i
< 6; i
++)
1194 writeb(dev
->dev_addr
[i
], ioaddr
+ TxStationAddr
+ 5 - i
);
1195 /* The first entry is special because it bypasses the VLAN filter.
1197 writew(0, ioaddr
+ PerfFilterTable
);
1198 writew(0, ioaddr
+ PerfFilterTable
+ 4);
1199 writew(0, ioaddr
+ PerfFilterTable
+ 8);
1200 for (i
= 1; i
< 16; i
++) {
1201 u16
*eaddrs
= (u16
*)dev
->dev_addr
;
1202 long setup_frm
= ioaddr
+ PerfFilterTable
+ i
* 16;
1203 writew(cpu_to_be16(eaddrs
[2]), setup_frm
); setup_frm
+= 4;
1204 writew(cpu_to_be16(eaddrs
[1]), setup_frm
); setup_frm
+= 4;
1205 writew(cpu_to_be16(eaddrs
[0]), setup_frm
); setup_frm
+= 8;
1208 /* Initialize other registers. */
1209 /* Configure the PCI bus bursts and FIFO thresholds. */
1210 np
->tx_mode
= TxFlowEnable
|RxFlowEnable
|PadEnable
; /* modified when link is up. */
1211 writel(MiiSoftReset
| np
->tx_mode
, ioaddr
+ TxMode
);
1213 writel(np
->tx_mode
, ioaddr
+ TxMode
);
1214 np
->tx_threshold
= 4;
1215 writel(np
->tx_threshold
, ioaddr
+ TxThreshold
);
1217 writel(np
->intr_timer_ctrl
, ioaddr
+ IntrTimerCtrl
);
1219 netif_start_if(dev
);
1220 netif_start_queue(dev
);
1223 printk(KERN_DEBUG
"%s: Setting the Rx and Tx modes.\n", dev
->name
);
1226 np
->mii_if
.advertising
= mdio_read(dev
, np
->phys
[0], MII_ADVERTISE
);
1229 /* Enable GPIO interrupts on link change */
1230 writel(0x0f00ff00, ioaddr
+ GPIOCtrl
);
1232 /* Set the interrupt mask */
1233 writel(IntrRxDone
| IntrRxEmpty
| IntrDMAErr
|
1234 IntrTxDMADone
| IntrStatsMax
| IntrLinkChange
|
1235 IntrRxGFPDead
| IntrNoTxCsum
| IntrTxBadID
,
1236 ioaddr
+ IntrEnable
);
1237 /* Enable PCI interrupts. */
1238 writel(0x00800000 | readl(ioaddr
+ PCIDeviceConfig
),
1239 ioaddr
+ PCIDeviceConfig
);
1242 /* Set VLAN type to 802.1q */
1243 writel(ETH_P_8021Q
, ioaddr
+ VlanType
);
1244 #endif /* VLAN_SUPPORT */
1247 /* Load Rx/Tx firmware into the frame processors */
1248 for (i
= 0; i
< FIRMWARE_RX_SIZE
* 2; i
++)
1249 writel(firmware_rx
[i
], ioaddr
+ RxGfpMem
+ i
* 4);
1250 for (i
= 0; i
< FIRMWARE_TX_SIZE
* 2; i
++)
1251 writel(firmware_tx
[i
], ioaddr
+ TxGfpMem
+ i
* 4);
1252 #endif /* HAS_FIRMWARE */
1253 if (enable_hw_cksum
)
1254 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1255 writel(TxEnable
|TxGFPEnable
|RxEnable
|RxGFPEnable
, ioaddr
+ GenCtrl
);
1257 /* Enable the Rx and Tx units only. */
1258 writel(TxEnable
|RxEnable
, ioaddr
+ GenCtrl
);
1261 printk(KERN_DEBUG
"%s: Done netdev_open().\n",
1268 static void check_duplex(struct net_device
*dev
)
1270 struct netdev_private
*np
= dev
->priv
;
1272 int silly_count
= 1000;
1274 mdio_write(dev
, np
->phys
[0], MII_ADVERTISE
, np
->mii_if
.advertising
);
1275 mdio_write(dev
, np
->phys
[0], MII_BMCR
, BMCR_RESET
);
1277 while (--silly_count
&& mdio_read(dev
, np
->phys
[0], MII_BMCR
) & BMCR_RESET
)
1280 printk("%s: MII reset failed!\n", dev
->name
);
1284 reg0
= mdio_read(dev
, np
->phys
[0], MII_BMCR
);
1286 if (!np
->mii_if
.force_media
) {
1287 reg0
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
1289 reg0
&= ~(BMCR_ANENABLE
| BMCR_ANRESTART
);
1291 reg0
|= BMCR_SPEED100
;
1292 if (np
->mii_if
.full_duplex
)
1293 reg0
|= BMCR_FULLDPLX
;
1294 printk(KERN_DEBUG
"%s: Link forced to %sMbit %s-duplex\n",
1296 np
->speed100
? "100" : "10",
1297 np
->mii_if
.full_duplex
? "full" : "half");
1299 mdio_write(dev
, np
->phys
[0], MII_BMCR
, reg0
);
1303 static void tx_timeout(struct net_device
*dev
)
1305 struct netdev_private
*np
= dev
->priv
;
1306 long ioaddr
= dev
->base_addr
;
1309 printk(KERN_WARNING
"%s: Transmit timed out, status %#8.8x, "
1310 "resetting...\n", dev
->name
, (int) readl(ioaddr
+ IntrStatus
));
1312 /* Perhaps we should reinitialize the hardware here. */
1315 * Stop and restart the interface.
1316 * Cheat and increase the debug level temporarily.
1324 /* Trigger an immediate transmit demand. */
1326 dev
->trans_start
= jiffies
;
1327 np
->stats
.tx_errors
++;
1328 netif_wake_queue(dev
);
1332 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1333 static void init_ring(struct net_device
*dev
)
1335 struct netdev_private
*np
= dev
->priv
;
1338 np
->cur_rx
= np
->cur_tx
= np
->reap_tx
= 0;
1339 np
->dirty_rx
= np
->dirty_tx
= np
->rx_done
= np
->tx_done
= 0;
1341 np
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
1343 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1344 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1345 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
1346 np
->rx_info
[i
].skb
= skb
;
1349 np
->rx_info
[i
].mapping
= pci_map_single(np
->pci_dev
, skb
->tail
, np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1350 skb
->dev
= dev
; /* Mark as being used by this device. */
1351 /* Grrr, we cannot offset to correctly align the IP header. */
1352 np
->rx_ring
[i
].rxaddr
= cpu_to_dma(np
->rx_info
[i
].mapping
| RxDescValid
);
1354 writew(i
- 1, dev
->base_addr
+ RxDescQIdx
);
1355 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
1357 /* Clear the remainder of the Rx buffer ring. */
1358 for ( ; i
< RX_RING_SIZE
; i
++) {
1359 np
->rx_ring
[i
].rxaddr
= 0;
1360 np
->rx_info
[i
].skb
= NULL
;
1361 np
->rx_info
[i
].mapping
= 0;
1363 /* Mark the last entry as wrapping the ring. */
1364 np
->rx_ring
[RX_RING_SIZE
- 1].rxaddr
|= cpu_to_dma(RxDescEndRing
);
1366 /* Clear the completion rings. */
1367 for (i
= 0; i
< DONE_Q_SIZE
; i
++) {
1368 np
->rx_done_q
[i
].status
= 0;
1369 np
->tx_done_q
[i
].status
= 0;
1372 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1373 memset(&np
->tx_info
[i
], 0, sizeof(np
->tx_info
[i
]));
1379 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1381 struct netdev_private
*np
= dev
->priv
;
1386 kick_tx_timer(dev
, tx_timeout
, TX_TIMEOUT
);
1389 * be cautious here, wrapping the queue has weird semantics
1390 * and we may not have enough slots even when it seems we do.
1392 if ((np
->cur_tx
- np
->dirty_tx
) + skb_num_frags(skb
) * 2 > TX_RING_SIZE
) {
1393 netif_stop_queue(dev
);
1397 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1399 int has_bad_length
= 0;
1401 if (skb_first_frag_len(skb
) == 1)
1404 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
1405 if (skb_shinfo(skb
)->frags
[i
].size
== 1) {
1412 skb_checksum_help(skb
);
1414 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1416 entry
= np
->cur_tx
% TX_RING_SIZE
;
1417 for (i
= 0; i
< skb_num_frags(skb
); i
++) {
1422 np
->tx_info
[entry
].skb
= skb
;
1424 if (entry
>= TX_RING_SIZE
- skb_num_frags(skb
)) {
1425 status
|= TxRingWrap
;
1429 status
|= TxDescIntr
;
1432 if (skb
->ip_summed
== CHECKSUM_HW
) {
1434 np
->stats
.tx_compressed
++;
1436 status
|= skb_first_frag_len(skb
) | (skb_num_frags(skb
) << 16);
1438 np
->tx_info
[entry
].mapping
=
1439 pci_map_single(np
->pci_dev
, skb
->data
, skb_first_frag_len(skb
), PCI_DMA_TODEVICE
);
1441 #ifdef MAX_SKB_FRAGS
1442 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1443 status
|= this_frag
->size
;
1444 np
->tx_info
[entry
].mapping
=
1445 pci_map_single(np
->pci_dev
, page_address(this_frag
->page
) + this_frag
->page_offset
, this_frag
->size
, PCI_DMA_TODEVICE
);
1446 #endif /* MAX_SKB_FRAGS */
1449 np
->tx_ring
[entry
].addr
= cpu_to_dma(np
->tx_info
[entry
].mapping
);
1450 np
->tx_ring
[entry
].status
= cpu_to_le32(status
);
1452 printk(KERN_DEBUG
"%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1453 dev
->name
, np
->cur_tx
, np
->dirty_tx
,
1456 np
->tx_info
[entry
].used_slots
= TX_RING_SIZE
- entry
;
1457 np
->cur_tx
+= np
->tx_info
[entry
].used_slots
;
1460 np
->tx_info
[entry
].used_slots
= 1;
1461 np
->cur_tx
+= np
->tx_info
[entry
].used_slots
;
1464 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1465 if (np
->cur_tx
% (TX_RING_SIZE
/ 2) == 0)
1469 /* Non-x86: explicitly flush descriptor cache lines here. */
1470 /* Ensure all descriptors are written back before the transmit is
1474 /* Update the producer index. */
1475 writel(entry
* (sizeof(starfire_tx_desc
) / 8), dev
->base_addr
+ TxProducerIdx
);
1477 /* 4 is arbitrary, but should be ok */
1478 if ((np
->cur_tx
- np
->dirty_tx
) + 4 > TX_RING_SIZE
)
1479 netif_stop_queue(dev
);
1481 dev
->trans_start
= jiffies
;
1487 /* The interrupt handler does all of the Rx thread work and cleans up
1488 after the Tx thread. */
1489 static irqreturn_t
intr_handler(int irq
, void *dev_instance
, struct pt_regs
*rgs
)
1491 struct net_device
*dev
= dev_instance
;
1492 struct netdev_private
*np
;
1494 int boguscnt
= max_interrupt_work
;
1499 ioaddr
= dev
->base_addr
;
1503 u32 intr_status
= readl(ioaddr
+ IntrClear
);
1506 printk(KERN_DEBUG
"%s: Interrupt status %#8.8x.\n",
1507 dev
->name
, intr_status
);
1509 if (intr_status
== 0 || intr_status
== (u32
) -1)
1514 if (intr_status
& (IntrRxDone
| IntrRxEmpty
))
1515 netdev_rx(dev
, ioaddr
);
1517 /* Scavenge the skbuff list based on the Tx-done queue.
1518 There are redundant checks here that may be cleaned up
1519 after the driver has proven to be reliable. */
1520 consumer
= readl(ioaddr
+ TxConsumerIdx
);
1522 printk(KERN_DEBUG
"%s: Tx Consumer index is %d.\n",
1523 dev
->name
, consumer
);
1525 while ((tx_status
= le32_to_cpu(np
->tx_done_q
[np
->tx_done
].status
)) != 0) {
1527 printk(KERN_DEBUG
"%s: Tx completion #%d entry %d is %#8.8x.\n",
1528 dev
->name
, np
->dirty_tx
, np
->tx_done
, tx_status
);
1529 if ((tx_status
& 0xe0000000) == 0xa0000000) {
1530 np
->stats
.tx_packets
++;
1531 } else if ((tx_status
& 0xe0000000) == 0x80000000) {
1532 u16 entry
= (tx_status
& 0x7fff) / sizeof(starfire_tx_desc
);
1533 struct sk_buff
*skb
= np
->tx_info
[entry
].skb
;
1534 np
->tx_info
[entry
].skb
= NULL
;
1535 pci_unmap_single(np
->pci_dev
,
1536 np
->tx_info
[entry
].mapping
,
1537 skb_first_frag_len(skb
),
1539 np
->tx_info
[entry
].mapping
= 0;
1540 np
->dirty_tx
+= np
->tx_info
[entry
].used_slots
;
1541 entry
= (entry
+ np
->tx_info
[entry
].used_slots
) % TX_RING_SIZE
;
1542 #ifdef MAX_SKB_FRAGS
1545 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1546 pci_unmap_single(np
->pci_dev
,
1547 np
->tx_info
[entry
].mapping
,
1548 skb_shinfo(skb
)->frags
[i
].size
,
1554 #endif /* MAX_SKB_FRAGS */
1555 dev_kfree_skb_irq(skb
);
1557 np
->tx_done_q
[np
->tx_done
].status
= 0;
1558 np
->tx_done
= (np
->tx_done
+ 1) % DONE_Q_SIZE
;
1560 writew(np
->tx_done
, ioaddr
+ CompletionQConsumerIdx
+ 2);
1562 if (netif_queue_stopped(dev
) &&
1563 (np
->cur_tx
- np
->dirty_tx
+ 4 < TX_RING_SIZE
)) {
1564 /* The ring is no longer full, wake the queue. */
1565 netif_wake_queue(dev
);
1568 /* Stats overflow */
1569 if (intr_status
& IntrStatsMax
)
1572 /* Media change interrupt. */
1573 if (intr_status
& IntrLinkChange
)
1574 netdev_media_change(dev
);
1576 /* Abnormal error summary/uncommon events handlers. */
1577 if (intr_status
& IntrAbnormalSummary
)
1578 netdev_error(dev
, intr_status
);
1580 if (--boguscnt
< 0) {
1582 printk(KERN_WARNING
"%s: Too much work at interrupt, "
1584 dev
->name
, intr_status
);
1590 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#8.8x.\n",
1591 dev
->name
, (int) readl(ioaddr
+ IntrStatus
));
1592 return IRQ_RETVAL(handled
);
1596 /* This routine is logically part of the interrupt/poll handler, but separated
1597 for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1598 static int __netdev_rx(struct net_device
*dev
, int *quota
)
1600 struct netdev_private
*np
= dev
->priv
;
1604 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1605 while ((desc_status
= le32_to_cpu(np
->rx_done_q
[np
->rx_done
].status
)) != 0) {
1606 struct sk_buff
*skb
;
1609 rx_done_desc
*desc
= &np
->rx_done_q
[np
->rx_done
];
1612 printk(KERN_DEBUG
" netdev_rx() status of %d was %#8.8x.\n", np
->rx_done
, desc_status
);
1613 if (!(desc_status
& RxOK
)) {
1614 /* There was a error. */
1616 printk(KERN_DEBUG
" netdev_rx() Rx error was %#8.8x.\n", desc_status
);
1617 np
->stats
.rx_errors
++;
1618 if (desc_status
& RxFIFOErr
)
1619 np
->stats
.rx_fifo_errors
++;
1623 if (*quota
<= 0) { /* out of rx quota */
1629 pkt_len
= desc_status
; /* Implicitly Truncate */
1630 entry
= (desc_status
>> 16) & 0x7ff;
1633 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len
, *quota
);
1634 /* Check if the packet is long enough to accept without copying
1635 to a minimally-sized skbuff. */
1636 if (pkt_len
< rx_copybreak
1637 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1639 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1640 pci_dma_sync_single_for_cpu(np
->pci_dev
,
1641 np
->rx_info
[entry
].mapping
,
1642 pkt_len
, PCI_DMA_FROMDEVICE
);
1643 eth_copy_and_sum(skb
, np
->rx_info
[entry
].skb
->tail
, pkt_len
, 0);
1644 pci_dma_sync_single_for_device(np
->pci_dev
,
1645 np
->rx_info
[entry
].mapping
,
1646 pkt_len
, PCI_DMA_FROMDEVICE
);
1647 skb_put(skb
, pkt_len
);
1649 pci_unmap_single(np
->pci_dev
, np
->rx_info
[entry
].mapping
, np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1650 skb
= np
->rx_info
[entry
].skb
;
1651 skb_put(skb
, pkt_len
);
1652 np
->rx_info
[entry
].skb
= NULL
;
1653 np
->rx_info
[entry
].mapping
= 0;
1655 #ifndef final_version /* Remove after testing. */
1656 /* You will want this info for the initial debug. */
1658 printk(KERN_DEBUG
" Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1659 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1660 skb
->data
[0], skb
->data
[1], skb
->data
[2], skb
->data
[3],
1661 skb
->data
[4], skb
->data
[5], skb
->data
[6], skb
->data
[7],
1662 skb
->data
[8], skb
->data
[9], skb
->data
[10],
1663 skb
->data
[11], skb
->data
[12], skb
->data
[13]);
1666 skb
->protocol
= eth_type_trans(skb
, dev
);
1667 #if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT)
1669 printk(KERN_DEBUG
" netdev_rx() status2 of %d was %#4.4x.\n", np
->rx_done
, le16_to_cpu(desc
->status2
));
1672 if (le16_to_cpu(desc
->status2
) & 0x0100) {
1673 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1674 np
->stats
.rx_compressed
++;
1677 * This feature doesn't seem to be working, at least
1678 * with the two firmware versions I have. If the GFP sees
1679 * an IP fragment, it either ignores it completely, or reports
1680 * "bad checksum" on it.
1682 * Maybe I missed something -- corrections are welcome.
1683 * Until then, the printk stays. :-) -Ion
1685 else if (le16_to_cpu(desc
->status2
) & 0x0040) {
1686 skb
->ip_summed
= CHECKSUM_HW
;
1687 skb
->csum
= le16_to_cpu(desc
->csum
);
1688 printk(KERN_DEBUG
"%s: checksum_hw, status2 = %#x\n", dev
->name
, le16_to_cpu(desc
->status2
));
1690 #endif /* HAS_FIRMWARE */
1692 if (np
->vlgrp
&& le16_to_cpu(desc
->status2
) & 0x0200) {
1694 printk(KERN_DEBUG
" netdev_rx() vlanid = %d\n", le16_to_cpu(desc
->vlanid
));
1695 /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1696 vlan_netdev_receive_skb(skb
, np
->vlgrp
, le16_to_cpu(desc
->vlanid
) & VLAN_VID_MASK
);
1698 #endif /* VLAN_SUPPORT */
1699 netdev_receive_skb(skb
);
1700 dev
->last_rx
= jiffies
;
1701 np
->stats
.rx_packets
++;
1706 np
->rx_done
= (np
->rx_done
+ 1) % DONE_Q_SIZE
;
1708 writew(np
->rx_done
, dev
->base_addr
+ CompletionQConsumerIdx
);
1711 refill_rx_ring(dev
);
1713 printk(KERN_DEBUG
" exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1714 retcode
, np
->rx_done
, desc_status
);
1719 #ifdef HAVE_NETDEV_POLL
1720 static int netdev_poll(struct net_device
*dev
, int *budget
)
1723 long ioaddr
= dev
->base_addr
;
1724 int retcode
= 0, quota
= dev
->quota
;
1727 writel(IntrRxDone
| IntrRxEmpty
, ioaddr
+ IntrClear
);
1729 retcode
= __netdev_rx(dev
, "a
);
1730 *budget
-= (dev
->quota
- quota
);
1735 intr_status
= readl(ioaddr
+ IntrStatus
);
1736 } while (intr_status
& (IntrRxDone
| IntrRxEmpty
));
1738 netif_rx_complete(dev
);
1739 intr_status
= readl(ioaddr
+ IntrEnable
);
1740 intr_status
|= IntrRxDone
| IntrRxEmpty
;
1741 writel(intr_status
, ioaddr
+ IntrEnable
);
1745 printk(KERN_DEBUG
" exiting netdev_poll(): %d.\n", retcode
);
1747 /* Restart Rx engine if stopped. */
1750 #endif /* HAVE_NETDEV_POLL */
1753 static void refill_rx_ring(struct net_device
*dev
)
1755 struct netdev_private
*np
= dev
->priv
;
1756 struct sk_buff
*skb
;
1759 /* Refill the Rx ring buffers. */
1760 for (; np
->cur_rx
- np
->dirty_rx
> 0; np
->dirty_rx
++) {
1761 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1762 if (np
->rx_info
[entry
].skb
== NULL
) {
1763 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1764 np
->rx_info
[entry
].skb
= skb
;
1766 break; /* Better luck next round. */
1767 np
->rx_info
[entry
].mapping
=
1768 pci_map_single(np
->pci_dev
, skb
->tail
, np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1769 skb
->dev
= dev
; /* Mark as being used by this device. */
1770 np
->rx_ring
[entry
].rxaddr
=
1771 cpu_to_dma(np
->rx_info
[entry
].mapping
| RxDescValid
);
1773 if (entry
== RX_RING_SIZE
- 1)
1774 np
->rx_ring
[entry
].rxaddr
|= cpu_to_dma(RxDescEndRing
);
1777 writew(entry
, dev
->base_addr
+ RxDescQIdx
);
1781 static void netdev_media_change(struct net_device
*dev
)
1783 struct netdev_private
*np
= dev
->priv
;
1784 long ioaddr
= dev
->base_addr
;
1785 u16 reg0
, reg1
, reg4
, reg5
;
1787 u32 new_intr_timer_ctrl
;
1789 /* reset status first */
1790 mdio_read(dev
, np
->phys
[0], MII_BMCR
);
1791 mdio_read(dev
, np
->phys
[0], MII_BMSR
);
1793 reg0
= mdio_read(dev
, np
->phys
[0], MII_BMCR
);
1794 reg1
= mdio_read(dev
, np
->phys
[0], MII_BMSR
);
1796 if (reg1
& BMSR_LSTATUS
) {
1798 if (reg0
& BMCR_ANENABLE
) {
1799 /* autonegotiation is enabled */
1800 reg4
= mdio_read(dev
, np
->phys
[0], MII_ADVERTISE
);
1801 reg5
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
1802 if (reg4
& ADVERTISE_100FULL
&& reg5
& LPA_100FULL
) {
1804 np
->mii_if
.full_duplex
= 1;
1805 } else if (reg4
& ADVERTISE_100HALF
&& reg5
& LPA_100HALF
) {
1807 np
->mii_if
.full_duplex
= 0;
1808 } else if (reg4
& ADVERTISE_10FULL
&& reg5
& LPA_10FULL
) {
1810 np
->mii_if
.full_duplex
= 1;
1813 np
->mii_if
.full_duplex
= 0;
1816 /* autonegotiation is disabled */
1817 if (reg0
& BMCR_SPEED100
)
1821 if (reg0
& BMCR_FULLDPLX
)
1822 np
->mii_if
.full_duplex
= 1;
1824 np
->mii_if
.full_duplex
= 0;
1826 netif_carrier_on(dev
);
1827 printk(KERN_DEBUG
"%s: Link is up, running at %sMbit %s-duplex\n",
1829 np
->speed100
? "100" : "10",
1830 np
->mii_if
.full_duplex
? "full" : "half");
1832 new_tx_mode
= np
->tx_mode
& ~FullDuplex
; /* duplex setting */
1833 if (np
->mii_if
.full_duplex
)
1834 new_tx_mode
|= FullDuplex
;
1835 if (np
->tx_mode
!= new_tx_mode
) {
1836 np
->tx_mode
= new_tx_mode
;
1837 writel(np
->tx_mode
| MiiSoftReset
, ioaddr
+ TxMode
);
1839 writel(np
->tx_mode
, ioaddr
+ TxMode
);
1842 new_intr_timer_ctrl
= np
->intr_timer_ctrl
& ~Timer10X
;
1844 new_intr_timer_ctrl
|= Timer10X
;
1845 if (np
->intr_timer_ctrl
!= new_intr_timer_ctrl
) {
1846 np
->intr_timer_ctrl
= new_intr_timer_ctrl
;
1847 writel(new_intr_timer_ctrl
, ioaddr
+ IntrTimerCtrl
);
1850 netif_carrier_off(dev
);
1851 printk(KERN_DEBUG
"%s: Link is down\n", dev
->name
);
1856 static void netdev_error(struct net_device
*dev
, int intr_status
)
1858 struct netdev_private
*np
= dev
->priv
;
1860 /* Came close to underrunning the Tx FIFO, increase threshold. */
1861 if (intr_status
& IntrTxDataLow
) {
1862 if (np
->tx_threshold
<= PKT_BUF_SZ
/ 16) {
1863 writel(++np
->tx_threshold
, dev
->base_addr
+ TxThreshold
);
1864 printk(KERN_NOTICE
"%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1865 dev
->name
, np
->tx_threshold
* 16);
1867 printk(KERN_WARNING
"%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev
->name
);
1869 if (intr_status
& IntrRxGFPDead
) {
1870 np
->stats
.rx_fifo_errors
++;
1871 np
->stats
.rx_errors
++;
1873 if (intr_status
& (IntrNoTxCsum
| IntrDMAErr
)) {
1874 np
->stats
.tx_fifo_errors
++;
1875 np
->stats
.tx_errors
++;
1877 if ((intr_status
& ~(IntrNormalMask
| IntrAbnormalSummary
| IntrLinkChange
| IntrStatsMax
| IntrTxDataLow
| IntrRxGFPDead
| IntrNoTxCsum
| IntrPCIPad
)) && debug
)
1878 printk(KERN_ERR
"%s: Something Wicked happened! %#8.8x.\n",
1879 dev
->name
, intr_status
);
1883 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1885 long ioaddr
= dev
->base_addr
;
1886 struct netdev_private
*np
= dev
->priv
;
1888 /* This adapter architecture needs no SMP locks. */
1889 np
->stats
.tx_bytes
= readl(ioaddr
+ 0x57010);
1890 np
->stats
.rx_bytes
= readl(ioaddr
+ 0x57044);
1891 np
->stats
.tx_packets
= readl(ioaddr
+ 0x57000);
1892 np
->stats
.tx_aborted_errors
=
1893 readl(ioaddr
+ 0x57024) + readl(ioaddr
+ 0x57028);
1894 np
->stats
.tx_window_errors
= readl(ioaddr
+ 0x57018);
1895 np
->stats
.collisions
=
1896 readl(ioaddr
+ 0x57004) + readl(ioaddr
+ 0x57008);
1898 /* The chip only need report frame silently dropped. */
1899 np
->stats
.rx_dropped
+= readw(ioaddr
+ RxDMAStatus
);
1900 writew(0, ioaddr
+ RxDMAStatus
);
1901 np
->stats
.rx_crc_errors
= readl(ioaddr
+ 0x5703C);
1902 np
->stats
.rx_frame_errors
= readl(ioaddr
+ 0x57040);
1903 np
->stats
.rx_length_errors
= readl(ioaddr
+ 0x57058);
1904 np
->stats
.rx_missed_errors
= readl(ioaddr
+ 0x5707C);
1910 /* Chips may use the upper or lower CRC bits, and may reverse and/or invert
1911 them. Select the endian-ness that results in minimal calculations.
1913 static void set_rx_mode(struct net_device
*dev
)
1915 long ioaddr
= dev
->base_addr
;
1916 u32 rx_mode
= MinVLANPrio
;
1917 struct dev_mc_list
*mclist
;
1920 struct netdev_private
*np
= dev
->priv
;
1922 rx_mode
|= VlanMode
;
1925 long filter_addr
= ioaddr
+ HashTable
+ 8;
1926 for (i
= 0; i
< VLAN_VID_MASK
; i
++) {
1927 if (np
->vlgrp
->vlan_devices
[i
]) {
1928 if (vlan_count
>= 32)
1930 writew(cpu_to_be16(i
), filter_addr
);
1935 if (i
== VLAN_VID_MASK
) {
1936 rx_mode
|= PerfectFilterVlan
;
1937 while (vlan_count
< 32) {
1938 writew(0, filter_addr
);
1944 #endif /* VLAN_SUPPORT */
1946 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1947 rx_mode
|= AcceptAll
;
1948 } else if ((dev
->mc_count
> multicast_filter_limit
)
1949 || (dev
->flags
& IFF_ALLMULTI
)) {
1950 /* Too many to match, or accept all multicasts. */
1951 rx_mode
|= AcceptBroadcast
|AcceptAllMulticast
|PerfectFilter
;
1952 } else if (dev
->mc_count
<= 14) {
1953 /* Use the 16 element perfect filter, skip first two entries. */
1954 long filter_addr
= ioaddr
+ PerfFilterTable
+ 2 * 16;
1956 for (i
= 2, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
+ 2;
1957 i
++, mclist
= mclist
->next
) {
1958 eaddrs
= (u16
*)mclist
->dmi_addr
;
1959 writew(cpu_to_be16(eaddrs
[2]), filter_addr
); filter_addr
+= 4;
1960 writew(cpu_to_be16(eaddrs
[1]), filter_addr
); filter_addr
+= 4;
1961 writew(cpu_to_be16(eaddrs
[0]), filter_addr
); filter_addr
+= 8;
1963 eaddrs
= (u16
*)dev
->dev_addr
;
1965 writew(cpu_to_be16(eaddrs
[0]), filter_addr
); filter_addr
+= 4;
1966 writew(cpu_to_be16(eaddrs
[1]), filter_addr
); filter_addr
+= 4;
1967 writew(cpu_to_be16(eaddrs
[2]), filter_addr
); filter_addr
+= 8;
1969 rx_mode
|= AcceptBroadcast
|PerfectFilter
;
1971 /* Must use a multicast hash table. */
1974 u16 mc_filter
[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1976 memset(mc_filter
, 0, sizeof(mc_filter
));
1977 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1978 i
++, mclist
= mclist
->next
) {
1979 int bit_nr
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
) >> 23;
1980 __u32
*fptr
= (__u32
*) &mc_filter
[(bit_nr
>> 4) & ~1];
1982 *fptr
|= cpu_to_le32(1 << (bit_nr
& 31));
1984 /* Clear the perfect filter list, skip first two entries. */
1985 filter_addr
= ioaddr
+ PerfFilterTable
+ 2 * 16;
1986 eaddrs
= (u16
*)dev
->dev_addr
;
1987 for (i
= 2; i
< 16; i
++) {
1988 writew(cpu_to_be16(eaddrs
[0]), filter_addr
); filter_addr
+= 4;
1989 writew(cpu_to_be16(eaddrs
[1]), filter_addr
); filter_addr
+= 4;
1990 writew(cpu_to_be16(eaddrs
[2]), filter_addr
); filter_addr
+= 8;
1992 for (filter_addr
= ioaddr
+ HashTable
, i
= 0; i
< 32; filter_addr
+= 16, i
++)
1993 writew(mc_filter
[i
], filter_addr
);
1994 rx_mode
|= AcceptBroadcast
|PerfectFilter
|HashFilter
;
1996 writel(rx_mode
, ioaddr
+ RxFilterMode
);
2000 static int netdev_ethtool_ioctl(struct net_device
*dev
, void __user
*useraddr
)
2002 struct ethtool_cmd ecmd
;
2003 struct netdev_private
*np
= dev
->priv
;
2005 if (copy_from_user(&ecmd
, useraddr
, sizeof(ecmd
)))
2009 case ETHTOOL_GDRVINFO
: {
2010 struct ethtool_drvinfo info
;
2011 memset(&info
, 0, sizeof(info
));
2012 info
.cmd
= ecmd
.cmd
;
2013 strcpy(info
.driver
, DRV_NAME
);
2014 strcpy(info
.version
, DRV_VERSION
);
2015 *info
.fw_version
= 0;
2016 strcpy(info
.bus_info
, PCI_SLOT_NAME(np
->pci_dev
));
2017 if (copy_to_user(useraddr
, &info
, sizeof(info
)))
2023 case ETHTOOL_GSET
: {
2024 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
2025 spin_lock_irq(&np
->lock
);
2026 mii_ethtool_gset(&np
->mii_if
, &ecmd
);
2027 spin_unlock_irq(&np
->lock
);
2028 if (copy_to_user(useraddr
, &ecmd
, sizeof(ecmd
)))
2033 case ETHTOOL_SSET
: {
2035 struct ethtool_cmd ecmd
;
2036 if (copy_from_user(&ecmd
, useraddr
, sizeof(ecmd
)))
2038 spin_lock_irq(&np
->lock
);
2039 r
= mii_ethtool_sset(&np
->mii_if
, &ecmd
);
2040 spin_unlock_irq(&np
->lock
);
2044 /* restart autonegotiation */
2045 case ETHTOOL_NWAY_RST
: {
2046 return mii_nway_restart(&np
->mii_if
);
2048 /* get link status */
2049 case ETHTOOL_GLINK
: {
2050 struct ethtool_value edata
= {ETHTOOL_GLINK
};
2051 edata
.data
= mii_link_ok(&np
->mii_if
);
2052 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
2057 /* get message-level */
2058 case ETHTOOL_GMSGLVL
: {
2059 struct ethtool_value edata
= {ETHTOOL_GMSGLVL
};
2061 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
2065 /* set message-level */
2066 case ETHTOOL_SMSGLVL
: {
2067 struct ethtool_value edata
;
2068 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
2079 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2081 struct netdev_private
*np
= dev
->priv
;
2084 if (!netif_running(dev
))
2087 if (cmd
== SIOCETHTOOL
)
2088 rc
= netdev_ethtool_ioctl(dev
, rq
->ifr_data
);
2091 struct mii_ioctl_data
*data
= if_mii(rq
);
2092 spin_lock_irq(&np
->lock
);
2093 rc
= generic_mii_ioctl(&np
->mii_if
, data
, cmd
, NULL
);
2094 spin_unlock_irq(&np
->lock
);
2096 if ((cmd
== SIOCSMIIREG
) && (data
->phy_id
== np
->phys
[0]))
2103 static int netdev_close(struct net_device
*dev
)
2105 long ioaddr
= dev
->base_addr
;
2106 struct netdev_private
*np
= dev
->priv
;
2109 netif_stop_queue(dev
);
2113 printk(KERN_DEBUG
"%s: Shutting down ethercard, Intr status %#8.8x.\n",
2114 dev
->name
, (int) readl(ioaddr
+ IntrStatus
));
2115 printk(KERN_DEBUG
"%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2116 dev
->name
, np
->cur_tx
, np
->dirty_tx
,
2117 np
->cur_rx
, np
->dirty_rx
);
2120 /* Disable interrupts by clearing the interrupt mask. */
2121 writel(0, ioaddr
+ IntrEnable
);
2123 /* Stop the chip's Tx and Rx processes. */
2124 writel(0, ioaddr
+ GenCtrl
);
2125 readl(ioaddr
+ GenCtrl
);
2128 printk(KERN_DEBUG
" Tx ring at %#llx:\n",
2129 (long long) np
->tx_ring_dma
);
2130 for (i
= 0; i
< 8 /* TX_RING_SIZE is huge! */; i
++)
2131 printk(KERN_DEBUG
" #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2132 i
, le32_to_cpu(np
->tx_ring
[i
].status
),
2133 (long long) dma_to_cpu(np
->tx_ring
[i
].addr
),
2134 le32_to_cpu(np
->tx_done_q
[i
].status
));
2135 printk(KERN_DEBUG
" Rx ring at %#llx -> %p:\n",
2136 (long long) np
->rx_ring_dma
, np
->rx_done_q
);
2138 for (i
= 0; i
< 8 /* RX_RING_SIZE */; i
++) {
2139 printk(KERN_DEBUG
" #%d desc. %#llx -> %#8.8x\n",
2140 i
, (long long) dma_to_cpu(np
->rx_ring
[i
].rxaddr
), le32_to_cpu(np
->rx_done_q
[i
].status
));
2144 free_irq(dev
->irq
, dev
);
2146 /* Free all the skbuffs in the Rx queue. */
2147 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
2148 np
->rx_ring
[i
].rxaddr
= cpu_to_dma(0xBADF00D0); /* An invalid address. */
2149 if (np
->rx_info
[i
].skb
!= NULL
) {
2150 pci_unmap_single(np
->pci_dev
, np
->rx_info
[i
].mapping
, np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
2151 dev_kfree_skb(np
->rx_info
[i
].skb
);
2153 np
->rx_info
[i
].skb
= NULL
;
2154 np
->rx_info
[i
].mapping
= 0;
2156 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
2157 struct sk_buff
*skb
= np
->tx_info
[i
].skb
;
2160 pci_unmap_single(np
->pci_dev
,
2161 np
->tx_info
[i
].mapping
,
2162 skb_first_frag_len(skb
), PCI_DMA_TODEVICE
);
2163 np
->tx_info
[i
].mapping
= 0;
2165 np
->tx_info
[i
].skb
= NULL
;
2168 COMPAT_MOD_DEC_USE_COUNT
;
2174 static void __devexit
starfire_remove_one (struct pci_dev
*pdev
)
2176 struct net_device
*dev
= pci_get_drvdata(pdev
);
2177 struct netdev_private
*np
;
2184 pci_free_consistent(pdev
, np
->queue_mem_size
, np
->queue_mem
, np
->queue_mem_dma
);
2186 unregister_netdev(dev
);
2188 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2189 pci_set_power_state(pdev
, 3); /* go to sleep in D3 mode */
2190 pci_disable_device(pdev
);
2192 iounmap((char *)dev
->base_addr
);
2193 pci_release_regions(pdev
);
2195 pci_set_drvdata(pdev
, NULL
);
2196 free_netdev(dev
); /* Will also free np!! */
2200 static struct pci_driver starfire_driver
= {
2202 .probe
= starfire_init_one
,
2203 .remove
= __devexit_p(starfire_remove_one
),
2204 .id_table
= starfire_pci_tbl
,
2208 static int __init
starfire_init (void)
2210 /* when a module, this is printed whether or not devices are found in probe */
2215 /* we can do this test only at run-time... sigh */
2216 if (sizeof(dma_addr_t
) == sizeof(u64
)) {
2217 printk("This driver has not been ported to this 64-bit architecture yet\n");
2220 #endif /* not ADDR_64BITS */
2221 #ifndef HAS_FIRMWARE
2222 /* unconditionally disable hw cksums if firmware is not present */
2223 enable_hw_cksum
= 0;
2224 #endif /* not HAS_FIRMWARE */
2225 return pci_module_init (&starfire_driver
);
2229 static void __exit
starfire_cleanup (void)
2231 pci_unregister_driver (&starfire_driver
);
2235 module_init(starfire_init
);
2236 module_exit(starfire_cleanup
);