* add p cc
[mascara-docs.git] / i386 / linux / linux-2.3.21 / drivers / net / yellowfin.c
blobe3b2abb46a982bee55a68b5c7e0ba91a7dbf9423
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-1998 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License, incorporated herein by reference.
8 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
9 It also supports the Symbios Logic version of the same chip core.
11 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
12 Center of Excellence in Space Data and Information Sciences
13 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
15 Support and updates available at
16 http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html
19 static const char *version =
20 "yellowfin.c:v1.02 7/26/98 Written by Donald Becker, becker@cesdis.edu\n"
21 " http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html\n";
23 /* A few user-configurable values. */
25 static int max_interrupt_work = 20;
26 static int min_pci_latency = 64;
27 static int mtu = 0;
28 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
29 /* System-wide count of bogus-rx frames. */
30 static int bogus_rx = 0;
31 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
32 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
33 #elif YF_NEW /* A future perfect board :->. */
34 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
35 static int fifo_cfg = 0x0028;
36 #else
37 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
38 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
39 #endif
41 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
42 Setting to > 1514 effectively disables this feature. */
43 static int rx_copybreak = 0;
45 /* Used to pass the media type, etc.
46 No media types are currently defined. These exist for driver
47 interoperability.
49 #define MAX_UNITS 8 /* More are supported, limit only on options */
50 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
51 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
53 /* Operational parameters that are set at compile time. */
55 /* Keep the ring sizes a power of two for efficiency.
56 Making the Tx ring too large decreases the effectiveness of channel
57 bonding and packet priority.
58 There are no ill effects from too-large receive rings. */
59 #define TX_RING_SIZE 16
60 #define RX_RING_SIZE 32
62 /* Operational parameters that usually are not changed. */
63 /* Time in jiffies before concluding the transmitter is hung. */
64 #define TX_TIMEOUT ((2000*HZ)/1000)
66 #include <linux/module.h>
67 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/string.h>
70 #include <linux/timer.h>
71 #include <linux/ptrace.h>
72 #include <linux/errno.h>
73 #include <linux/ioport.h>
74 #include <linux/malloc.h>
75 #include <linux/interrupt.h>
76 #include <linux/pci.h>
77 #include <asm/processor.h> /* Processor type for cache alignment. */
78 #include <asm/bitops.h>
79 #include <asm/unaligned.h>
80 #include <asm/io.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
86 /* Kernel compatibility defines, most common to the PCCard package. */
87 #include <linux/version.h> /* Evil and unneccessary */
89 #define RUN_AT(x) (jiffies + (x))
91 #if (LINUX_VERSION_CODE < 0x20123)
92 #define test_and_set_bit(val, addr) set_bit(val, addr)
93 #endif
94 #if LINUX_VERSION_CODE <= 0x20139
95 #define net_device_stats enet_statistics
96 #define NETSTATS_VER2
97 #endif
98 #if LINUX_VERSION_CODE < 0x20155
99 #define PCI_SUPPORT_VER1
100 #define pci_present pcibios_present
101 #endif
102 #if LINUX_VERSION_CODE < 0x20159
103 #define DEV_FREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE);
104 #else
105 #define DEV_FREE_SKB(skb) dev_kfree_skb(skb);
106 #endif
108 /* The PCI I/O space extent. */
109 #define YELLOWFIN_TOTAL_SIZE 0x100
111 int yellowfin_debug = 1;
114 Theory of Operation
116 I. Board Compatibility
118 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
119 Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
120 PCI card.
122 II. Board-specific settings
124 PCI bus devices are configured by the system at boot time, so no jumpers
125 need to be set on the board. The system BIOS preferably should assign the
126 PCI INTA signal to an otherwise unused system IRQ line.
127 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
128 interrupt lines.
130 III. Driver operation
132 IIIa. Ring buffers
134 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
135 This is a descriptor list scheme similar to that used by the EEPro100 and
136 Tulip. This driver uses two statically allocated fixed-size descriptor lists
137 formed into rings by a branch from the final descriptor to the beginning of
138 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
140 The driver allocates full frame size skbuffs for the Rx ring buffers at
141 open() time and passes the skb->data field to the Yellowfin as receive data
142 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
143 a fresh skbuff is allocated and the frame is copied to the new skbuff.
144 When the incoming frame is larger, the skbuff is passed directly up the
145 protocol stack and replaced by a newly allocated skbuff.
147 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
148 using a full-sized skbuff for small frames vs. the copying costs of larger
149 frames. For small frames the copying cost is negligible (esp. considering
150 that we are pre-loading the cache with immediately useful header
151 information). For large frames the copying cost is non-trivial, and the
152 larger copy might flush the cache of useful data.
154 IIIC. Synchronization
156 The driver runs as two independent, single-threaded flows of control. One
157 is the send-packet routine, which enforces single-threaded use by the
158 dev->tbusy flag. The other thread is the interrupt handler, which is single
159 threaded by the hardware and other software.
161 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
162 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
163 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
164 the 'yp->tx_full' flag.
166 The interrupt handler has exclusive control over the Rx ring and records stats
167 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
168 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
169 clears both the tx_full and tbusy flags.
171 IV. Notes
173 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
174 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
175 and an AlphaStation to verifty the Alpha port!
177 IVb. References
179 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
180 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
181 Data Manual v3.0
182 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
183 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
185 IVc. Errata
187 See Packet Engines confidential appendix (prototype chips only).
191 /* A few values that may be tweaked. */
192 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
194 #ifndef PCI_VENDOR_ID_PKT_ENG /* To be defined in linux/pci.h */
195 #define PCI_VENDOR_ID_PKT_ENG 0x1000 /* Hmm, likely number.. */
196 #define PCI_DEVICE_ID_SYM58C885 0x0701
197 #define PCI_DEVICE_ID_YELLOWFIN 0x0702
198 #endif
200 /* The rest of these values should never change. */
202 static void yellowfin_timer(unsigned long data);
204 enum capability_flags {HasMII=1, FullTxStatus=2};
205 static struct chip_info {
206 u16 vendor_id, device_id, device_id_mask, pci_flags;
207 const char *name;
208 void (*media_timer)(unsigned long data);
209 u32 chip_rev; /* As read from ChipRev, not PCI dev ID. */
210 int flags;
211 } chip_tbl[] = {
212 {0x1000, 0x0702, 0xffff, 0, "Yellowfin G-NIC Gbit Ethernet",
213 yellowfin_timer, 0x0702, FullTxStatus},
214 {0x1000, 0x0701, 0xffff, 0, "Symbios SYM83C885",
215 yellowfin_timer, 0x0701, HasMII},
216 {0,},
219 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
220 enum yellowfin_offsets {
221 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
222 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
223 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
224 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
225 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
226 ChipRev=0x8C, DMACtrl=0x90, Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
227 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
228 MII_Status=0xAE,
229 RxDepth=0xB8, FlowCtrl=0xBC,
230 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
231 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
232 EEFeature=0xF5,
235 /* The Yellowfin Rx and Tx buffer descriptors. */
236 struct yellowfin_desc {
237 u16 request_cnt;
238 u16 cmd;
239 u32 addr;
240 u32 branch_addr;
241 u16 result_cnt;
242 u16 status;
245 struct tx_status_words {
246 u16 tx_cnt;
247 u16 tx_errs;
248 u16 total_tx_cnt;
249 u16 paused;
252 /* Bits in yellowfin_desc.cmd */
253 enum desc_cmd_bits {
254 CMD_TX_PKT=0x1000, CMD_RX_BUF=0x2000, CMD_TXSTATUS=0x3000,
255 CMD_NOP=0x6000, CMD_STOP=0x7000,
256 BRANCH_ALWAYS=0x0C, INTR_ALWAYS=0x30, WAIT_ALWAYS=0x03,
257 BRANCH_IFTRUE=0x04,
260 /* Bits in yellowfin_desc.status */
261 enum desc_status_bits { RX_EOP=0x0040, };
263 /* Bits in the interrupt status/mask registers. */
264 enum intr_status_bits {
265 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
266 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
267 IntrEarlyRx=0x100, IntrWakeup=0x200, };
269 struct yellowfin_private {
270 /* Descriptor rings first for alignment. Tx requires a second descriptor
271 for status. */
272 struct yellowfin_desc rx_ring[RX_RING_SIZE];
273 struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
274 const char *product_name;
275 struct net_device *next_module;
276 /* The addresses of receive-in-place skbuffs. */
277 struct sk_buff* rx_skbuff[RX_RING_SIZE];
278 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
279 struct sk_buff* tx_skbuff[TX_RING_SIZE];
280 struct tx_status_words tx_status[TX_RING_SIZE];
281 struct timer_list timer; /* Media selection timer. */
282 struct enet_statistics stats;
283 /* Frequently used and paired value: keep adjacent for cache effect. */
284 int chip_id;
285 int in_interrupt;
286 struct yellowfin_desc *rx_head_desc;
287 struct tx_status_words *tx_tail_desc;
288 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
289 unsigned int cur_tx, dirty_tx;
290 unsigned int rx_buf_sz; /* Based on MTU+slack. */
291 unsigned int tx_full:1; /* The Tx queue is full. */
292 unsigned int full_duplex:1; /* Full-duplex operation requested. */
293 unsigned int duplex_lock:1;
294 unsigned int medialock:1; /* Do not sense media. */
295 unsigned int default_port:4; /* Last dev->if_port value. */
296 /* MII transceiver section. */
297 int mii_cnt; /* MII device addresses. */
298 u16 advertising; /* NWay media advertisement */
299 unsigned char phys[2]; /* MII device addresses. */
300 u32 pad[4]; /* Used for 32-byte alignment */
303 #ifdef MODULE
305 #if LINUX_VERSION_CODE > 0x20115
306 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
307 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
308 MODULE_PARM(max_interrupt_work, "i");
309 MODULE_PARM(min_pci_latency, "i");
310 MODULE_PARM(mtu, "i");
311 MODULE_PARM(debug, "i");
312 MODULE_PARM(rx_copybreak, "i");
313 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
314 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
315 #endif
317 #endif
319 static struct net_device *yellowfin_probe1(struct net_device *dev, long ioaddr,
320 int irq, int chip_id, int options);
321 static int read_eeprom(long ioaddr, int location);
322 static int mdio_read(long ioaddr, int phy_id, int location);
323 static void mdio_write(long ioaddr, int phy_id, int location, int value);
324 #ifdef HAVE_PRIVATE_IOCTL
325 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
326 #endif
327 static int yellowfin_open(struct net_device *dev);
328 static void yellowfin_timer(unsigned long data);
329 static void yellowfin_tx_timeout(struct net_device *dev);
330 static void yellowfin_init_ring(struct net_device *dev);
331 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
332 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
333 static int yellowfin_rx(struct net_device *dev);
334 static void yellowfin_error(struct net_device *dev, int intr_status);
335 static int yellowfin_close(struct net_device *dev);
336 static struct enet_statistics *yellowfin_get_stats(struct net_device *dev);
337 static void set_rx_mode(struct net_device *dev);
341 /* A list of all installed Yellowfin devices, for removing the driver module. */
342 static struct net_device *root_yellowfin_dev = NULL;
344 int yellowfin_probe(struct net_device *dev)
346 int cards_found = 0;
347 int pci_index = 0;
348 unsigned char pci_bus, pci_device_fn;
350 if ( ! pci_present())
351 return -ENODEV;
353 for (;pci_index < 0xff; pci_index++) {
354 u8 pci_latency;
355 u16 pci_command, new_command, vendor, device;
356 int chip_idx;
357 int irq;
358 long ioaddr;
360 if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
361 pci_index,
362 &pci_bus, &pci_device_fn)
363 != PCIBIOS_SUCCESSFUL)
364 break;
366 pcibios_read_config_word(pci_bus, pci_device_fn,
367 PCI_VENDOR_ID, &vendor);
368 pcibios_read_config_word(pci_bus, pci_device_fn,
369 PCI_DEVICE_ID, &device);
371 for (chip_idx = 0; chip_tbl[chip_idx].vendor_id; chip_idx++)
372 if (vendor == chip_tbl[chip_idx].vendor_id
373 && (device & chip_tbl[chip_idx].device_id_mask) ==
374 chip_tbl[chip_idx].device_id)
375 break;
376 if (chip_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
377 continue;
380 struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
381 ioaddr = pdev->resource[0].start;
382 irq = pdev->irq;
385 if (yellowfin_debug > 2)
386 printk(KERN_INFO "Found %s at I/O %#lx, IRQ %d.\n",
387 chip_tbl[chip_idx].name, ioaddr, irq);
389 if (check_region(ioaddr, YELLOWFIN_TOTAL_SIZE))
390 continue;
392 pcibios_read_config_word(pci_bus, pci_device_fn,
393 PCI_COMMAND, &pci_command);
394 new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
395 if (pci_command != new_command) {
396 printk(KERN_INFO " The PCI BIOS has not enabled the"
397 " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
398 pci_bus, pci_device_fn, pci_command, new_command);
399 pcibios_write_config_word(pci_bus, pci_device_fn,
400 PCI_COMMAND, new_command);
403 dev = yellowfin_probe1(dev, ioaddr, irq, chip_idx, cards_found);
405 if (dev) {
406 /* Get and check the bus-master and latency values. */
407 pcibios_read_config_byte(pci_bus, pci_device_fn,
408 PCI_LATENCY_TIMER, &pci_latency);
409 if (pci_latency < min_pci_latency) {
410 printk(KERN_INFO " PCI latency timer (CFLT) is "
411 "unreasonably low at %d. Setting to %d clocks.\n",
412 pci_latency, min_pci_latency);
413 pcibios_write_config_byte(pci_bus, pci_device_fn,
414 PCI_LATENCY_TIMER, min_pci_latency);
415 } else if (yellowfin_debug > 1)
416 printk(KERN_INFO " PCI latency timer (CFLT) is %#x.\n",
417 pci_latency);
418 dev = 0;
419 cards_found++;
423 return cards_found ? 0 : -ENODEV;
426 static struct net_device *yellowfin_probe1(struct net_device *dev, long ioaddr,
427 int irq, int chip_id, int card_idx)
429 static int did_version = 0; /* Already printed version info. */
430 struct yellowfin_private *yp;
431 int option, i;
433 if (yellowfin_debug > 0 && did_version++ == 0)
434 printk(version);
436 dev = init_etherdev(dev, sizeof(struct yellowfin_private));
438 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
439 dev->name, chip_tbl[chip_id].name, inl(ioaddr + ChipRev), ioaddr);
441 if (inw(ioaddr + ChipRev) == 0x0702)
442 for (i = 0; i < 6; i++)
443 dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
444 else {
445 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
446 for (i = 0; i < 6; i++)
447 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
449 for (i = 0; i < 5; i++)
450 printk("%2.2x:", dev->dev_addr[i]);
451 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
453 /* Reset the chip. */
454 outl(0x80000000, ioaddr + DMACtrl);
456 /* We do a request_region() only to register /proc/ioports info. */
457 request_region(ioaddr, YELLOWFIN_TOTAL_SIZE, dev->name);
459 dev->base_addr = ioaddr;
460 dev->irq = irq;
462 /* Make certain the descriptor lists are aligned. */
463 yp = (void *)(((long)kmalloc(sizeof(*yp), GFP_KERNEL) + 31) & ~31);
464 memset(yp, 0, sizeof(*yp));
465 dev->priv = yp;
467 yp->next_module = root_yellowfin_dev;
468 root_yellowfin_dev = dev;
470 yp->chip_id = chip_id;
472 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
473 if (dev->mem_start)
474 option = dev->mem_start;
476 /* The lower four bits are the media type. */
477 if (option > 0) {
478 if (option & 0x200)
479 yp->full_duplex = 1;
480 yp->default_port = option & 15;
481 if (yp->default_port)
482 yp->medialock = 1;
484 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
485 yp->full_duplex = 1;
487 if (yp->full_duplex)
488 yp->duplex_lock = 1;
490 /* The Yellowfin-specific entries in the device structure. */
491 dev->open = &yellowfin_open;
492 dev->hard_start_xmit = &yellowfin_start_xmit;
493 dev->stop = &yellowfin_close;
494 dev->get_stats = &yellowfin_get_stats;
495 dev->set_multicast_list = &set_rx_mode;
496 #ifdef HAVE_PRIVATE_IOCTL
497 dev->do_ioctl = &mii_ioctl;
498 #endif
499 if (mtu)
500 dev->mtu = mtu;
502 if (chip_tbl[yp->chip_id].flags & HasMII) {
503 int phy, phy_idx = 0;
504 for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
505 int mii_status = mdio_read(ioaddr, phy, 1);
506 if (mii_status != 0xffff &&
507 mii_status != 0x0000) {
508 yp->phys[phy_idx++] = phy;
509 yp->advertising = mdio_read(ioaddr, phy, 4);
510 printk(KERN_INFO "%s: MII PHY found at address %d, status "
511 "0x%4.4x advertising %4.4x.\n",
512 dev->name, phy, mii_status, yp->advertising);
515 yp->mii_cnt = phy_idx;
518 return dev;
521 static int read_eeprom(long ioaddr, int location)
523 int bogus_cnt = 1000;
525 outb(location, ioaddr + EEAddr);
526 outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
527 while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
529 return inb(ioaddr + EERead);
532 /* MII Managemen Data I/O accesses.
533 These routines assume the MDIO controller is idle, and do not exit until
534 the command is finished. */
536 static int mdio_read(long ioaddr, int phy_id, int location)
538 int i;
540 outw((phy_id<<8) + location, ioaddr + MII_Addr);
541 outw(1, ioaddr + MII_Cmd);
542 for (i = 10000; i >= 0; i--)
543 if ((inw(ioaddr + MII_Status) & 1) == 0)
544 break;
545 return inw(ioaddr + MII_Rd_Data);
548 static void mdio_write(long ioaddr, int phy_id, int location, int value)
550 int i;
552 outw((phy_id<<8) + location, ioaddr + MII_Addr);
553 outw(value, ioaddr + MII_Wr_Data);
555 /* Wait for the command to finish. */
556 for (i = 10000; i >= 0; i--)
557 if ((inw(ioaddr + MII_Status) & 1) == 0)
558 break;
559 return;
563 static int yellowfin_open(struct net_device *dev)
565 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
566 long ioaddr = dev->base_addr;
567 int i;
569 /* Reset the chip. */
570 outl(0x80000000, ioaddr + DMACtrl);
572 if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev))
573 return -EAGAIN;
575 if (yellowfin_debug > 1)
576 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
577 dev->name, dev->irq);
579 MOD_INC_USE_COUNT;
581 yellowfin_init_ring(dev);
583 outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
584 outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
586 for (i = 0; i < 6; i++)
587 outb(dev->dev_addr[i], ioaddr + StnAddr + i);
589 /* Set up various condition 'select' registers.
590 There are no options here. */
591 outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
592 outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
593 outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
594 outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
595 outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
596 outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
598 /* Initialize other registers: with so many this eventually this will
599 converted to an offset/value list. */
600 outl(dma_ctrl, ioaddr + DMACtrl);
601 outw(fifo_cfg, ioaddr + FIFOcfg);
602 /* Enable automatic generation of flow control frames, period 0xffff. */
603 outl(0x0030FFFF, ioaddr + FlowCtrl);
605 if (dev->if_port == 0)
606 dev->if_port = yp->default_port;
608 dev->tbusy = 0;
609 dev->interrupt = 0;
610 yp->in_interrupt = 0;
612 /* Setting the Rx mode will start the Rx process. */
613 if (yp->chip_id == 0) {
614 /* We are always in full-duplex mode with gigabit! */
615 yp->full_duplex = 1;
616 outw(0x01CF, ioaddr + Cnfg);
617 } else {
618 outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
619 outw(0x1018, ioaddr + FrameGap1);
620 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
622 set_rx_mode(dev);
624 dev->start = 1;
626 /* Enable interrupts by setting the interrupt mask. */
627 outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
628 outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
629 outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
630 outl(0x80008000, ioaddr + TxCtrl);
632 if (yellowfin_debug > 2) {
633 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
634 dev->name);
636 /* Set the timer to check for link beat. */
637 init_timer(&yp->timer);
638 yp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
639 yp->timer.data = (unsigned long)dev;
640 yp->timer.function = &yellowfin_timer; /* timer handler */
641 add_timer(&yp->timer);
643 return 0;
646 static void yellowfin_timer(unsigned long data)
648 struct net_device *dev = (struct net_device *)data;
649 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
650 long ioaddr = dev->base_addr;
651 int next_tick = 0;
653 if (yellowfin_debug > 3) {
654 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
655 dev->name, inw(ioaddr + IntrStatus));
658 if (yp->mii_cnt) {
659 int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
660 int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
661 int negotiated = mii_reg5 & yp->advertising;
662 if (yellowfin_debug > 1)
663 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
664 "link partner capability %4.4x.\n",
665 dev->name, yp->phys[0], mii_reg1, mii_reg5);
667 if ( ! yp->duplex_lock &&
668 ((negotiated & 0x0300) == 0x0100
669 || (negotiated & 0x00C0) == 0x0040)) {
670 yp->full_duplex = 1;
672 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
674 if (mii_reg1 & 0x0004)
675 next_tick = 60*HZ;
676 else
677 next_tick = 3*HZ;
680 if (next_tick) {
681 yp->timer.expires = RUN_AT(next_tick);
682 add_timer(&yp->timer);
686 static void yellowfin_tx_timeout(struct net_device *dev)
688 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
689 long ioaddr = dev->base_addr;
691 printk(KERN_WARNING "%s: Yellowfin transmit timed out, status %8.8x, resetting...\n",
692 dev->name, inl(ioaddr));
694 #ifndef __alpha__
696 int i;
697 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)yp->rx_ring);
698 for (i = 0; i < RX_RING_SIZE; i++)
699 printk(" %8.8x", (unsigned int)yp->rx_ring[i].status);
700 printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)yp->tx_ring);
701 for (i = 0; i < TX_RING_SIZE; i++)
702 printk(" %4.4x /%4.4x", yp->tx_status[i].tx_errs, yp->tx_ring[i].status);
703 printk("\n");
705 #endif
707 /* Perhaps we should reinitialize the hardware here. */
708 dev->if_port = 0;
709 /* Stop and restart the chip's Tx processes . */
711 /* Trigger an immediate transmit demand. */
713 dev->trans_start = jiffies;
714 yp->stats.tx_errors++;
715 return;
719 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
720 static void yellowfin_init_ring(struct net_device *dev)
722 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
723 int i;
725 yp->tx_full = 0;
726 yp->cur_rx = yp->cur_tx = 0;
727 yp->dirty_rx = yp->dirty_tx = 0;
729 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
730 yp->rx_head_desc = &yp->rx_ring[0];
732 for (i = 0; i < RX_RING_SIZE; i++) {
733 struct sk_buff *skb;
735 yp->rx_ring[i].request_cnt = yp->rx_buf_sz;
736 yp->rx_ring[i].cmd = CMD_RX_BUF | INTR_ALWAYS;
738 skb = dev_alloc_skb(yp->rx_buf_sz);
739 yp->rx_skbuff[i] = skb;
740 if (skb) {
741 skb->dev = dev; /* Mark as being used by this device. */
742 skb_reserve(skb, 2); /* 16 byte align the IP header. */
743 yp->rx_ring[i].addr = virt_to_bus(skb->tail);
744 } else if (yp->dirty_rx == 0)
745 yp->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
746 yp->rx_ring[i].branch_addr = virt_to_bus(&yp->rx_ring[i+1]);
748 /* Mark the last entry as wrapping the ring. */
749 yp->rx_ring[i-1].cmd = CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
750 yp->rx_ring[i-1].branch_addr = virt_to_bus(&yp->rx_ring[0]);
752 /*#define NO_TXSTATS*/
753 #ifdef NO_TXSTATS
754 /* In this mode the Tx ring needs only a single descriptor. */
755 for (i = 0; i < TX_RING_SIZE; i++) {
756 yp->tx_skbuff[i] = 0;
757 yp->tx_ring[i].cmd = CMD_STOP;
758 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
760 yp->tx_ring[--i].cmd = CMD_STOP | BRANCH_ALWAYS; /* Wrap ring */
761 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
762 #else
763 /* Tx ring needs a pair of descriptors, the second for the status. */
764 for (i = 0; i < TX_RING_SIZE*2; i++) {
765 yp->tx_skbuff[i/2] = 0;
766 yp->tx_ring[i].cmd = CMD_STOP; /* Branch on Tx error. */
767 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
768 i++;
769 if (chip_tbl[yp->chip_id].flags & FullTxStatus) {
770 yp->tx_ring[i].cmd = CMD_TXSTATUS;
771 yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
772 yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2]);
773 } else { /* Symbios chips write only tx_errs word. */
774 yp->tx_ring[i].cmd = CMD_TXSTATUS | INTR_ALWAYS;
775 yp->tx_ring[i].request_cnt = 2;
776 yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2].tx_errs);
778 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
780 /* Wrap ring */
781 yp->tx_ring[--i].cmd = CMD_TXSTATUS | BRANCH_ALWAYS | INTR_ALWAYS;
782 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
783 #endif
784 yp->tx_tail_desc = &yp->tx_status[0];
785 return;
788 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
790 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
791 unsigned entry;
793 /* Block a timer-based transmit from overlapping. This could better be
794 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
795 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
796 if (jiffies - dev->trans_start < TX_TIMEOUT)
797 return 1;
798 yellowfin_tx_timeout(dev);
799 return 1;
802 /* Caution: the write order is important here, set the base address
803 with the "ownership" bits last. */
805 /* Calculate the next Tx descriptor entry. */
806 entry = yp->cur_tx % TX_RING_SIZE;
808 yp->tx_skbuff[entry] = skb;
810 #ifdef NO_TXSTATS
811 yp->tx_ring[entry].request_cnt = skb->len;
812 yp->tx_ring[entry].addr = virt_to_bus(skb->data);
813 yp->tx_ring[entry].status = 0;
814 if (entry >= TX_RING_SIZE-1) {
815 yp->tx_ring[0].cmd = CMD_STOP; /* New stop command. */
816 yp->tx_ring[TX_RING_SIZE-1].cmd = CMD_TX_PKT | BRANCH_ALWAYS;
817 } else {
818 yp->tx_ring[entry+1].cmd = CMD_STOP; /* New stop command. */
819 yp->tx_ring[entry].cmd = CMD_TX_PKT | BRANCH_IFTRUE;
821 yp->cur_tx++;
822 #else
823 yp->tx_ring[entry<<1].request_cnt = skb->len;
824 yp->tx_ring[entry<<1].addr = virt_to_bus(skb->data);
825 /* The input_last (status-write) command is constant, but we must rewrite
826 the subsequent 'stop' command. */
828 yp->cur_tx++;
830 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
831 yp->tx_ring[next_entry<<1].cmd = CMD_STOP;
833 /* Final step -- overwrite the old 'stop' command. */
835 yp->tx_ring[entry<<1].cmd =
836 (entry % 6) == 0 ? CMD_TX_PKT | INTR_ALWAYS | BRANCH_IFTRUE :
837 CMD_TX_PKT | BRANCH_IFTRUE;
838 #endif
840 /* Non-x86 Todo: explicitly flush cache lines here. */
842 /* Wake the potentially-idle transmit channel. */
843 outl(0x10001000, dev->base_addr + TxCtrl);
845 if (yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 1)
846 clear_bit(0, (void*)&dev->tbusy); /* Typical path */
847 else
848 yp->tx_full = 1;
849 dev->trans_start = jiffies;
851 if (yellowfin_debug > 4) {
852 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
853 dev->name, yp->cur_tx, entry);
855 return 0;
858 /* The interrupt handler does all of the Rx thread work and cleans up
859 after the Tx thread. */
860 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
862 struct net_device *dev = (struct net_device *)dev_instance;
863 struct yellowfin_private *yp;
864 long ioaddr, boguscnt = max_interrupt_work;
866 #ifndef final_version /* Can never occur. */
867 if (dev == NULL) {
868 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
869 return;
871 #endif
873 ioaddr = dev->base_addr;
874 yp = (struct yellowfin_private *)dev->priv;
875 if (test_and_set_bit(0, (void*)&yp->in_interrupt)) {
876 dev->interrupt = 1;
877 printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
878 return;
881 do {
882 u16 intr_status = inw(ioaddr + IntrClear);
884 if (yellowfin_debug > 4)
885 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
886 dev->name, intr_status);
888 if (intr_status == 0)
889 break;
891 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
892 yellowfin_rx(dev);
893 outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
896 #ifdef NO_TXSTATS
897 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
898 int entry = yp->dirty_tx % TX_RING_SIZE;
899 if (yp->tx_ring[entry].status == 0)
900 break;
901 /* Free the original skb. */
902 DEV_FREE_SKB(yp->tx_skbuff[entry]);
903 yp->tx_skbuff[entry] = 0;
904 yp->stats.tx_packets++;
906 if (yp->tx_full && dev->tbusy
907 && yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 4) {
908 /* The ring is no longer full, clear tbusy. */
909 yp->tx_full = 0;
910 clear_bit(0, (void*)&dev->tbusy);
911 mark_bh(NET_BH);
913 #else
914 if (intr_status & IntrTxDone
915 || yp->tx_tail_desc->tx_errs) {
916 unsigned dirty_tx = yp->dirty_tx;
918 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
919 dirty_tx++) {
920 /* Todo: optimize this. */
921 int entry = dirty_tx % TX_RING_SIZE;
922 u16 tx_errs = yp->tx_status[entry].tx_errs;
924 #ifndef final_version
925 if (yellowfin_debug > 5)
926 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
927 "%4.4x %4.4x %4.4x %4.4x.\n",
928 dev->name, entry,
929 yp->tx_status[entry].tx_cnt,
930 yp->tx_status[entry].tx_errs,
931 yp->tx_status[entry].total_tx_cnt,
932 yp->tx_status[entry].paused);
933 #endif
934 if (tx_errs == 0)
935 break; /* It still hasn't been Txed */
936 if (tx_errs & 0xF8100000) {
937 /* There was an major error, log it. */
938 #ifndef final_version
939 if (yellowfin_debug > 1)
940 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
941 dev->name, tx_errs);
942 #endif
943 yp->stats.tx_errors++;
944 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
945 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
946 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
947 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
948 #ifdef ETHER_STATS
949 if (tx_errs & 0x1000) yp->stats.collisions16++;
950 #endif
951 } else {
952 #ifndef final_version
953 if (yellowfin_debug > 4)
954 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
955 dev->name, tx_errs);
956 #endif
957 #ifdef ETHER_STATS
958 if (tx_errs & 0x0400) yp->stats.tx_deferred++;
959 #endif
960 yp->stats.collisions += tx_errs & 15;
961 yp->stats.tx_packets++;
964 /* Free the original skb. */
965 DEV_FREE_SKB(yp->tx_skbuff[entry]);
966 yp->tx_skbuff[entry] = 0;
967 /* Mark status as empty. */
968 yp->tx_status[entry].tx_errs = 0;
971 #ifndef final_version
972 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
973 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
974 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
975 dirty_tx += TX_RING_SIZE;
977 #endif
979 if (yp->tx_full && dev->tbusy
980 && yp->cur_tx - dirty_tx < TX_RING_SIZE - 2) {
981 /* The ring is no longer full, clear tbusy. */
982 yp->tx_full = 0;
983 clear_bit(0, (void*)&dev->tbusy);
984 mark_bh(NET_BH);
987 yp->dirty_tx = dirty_tx;
988 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
990 #endif
992 /* Log errors and other uncommon events. */
993 if (intr_status & 0x2ee) /* Abnormal error summary. */
994 yellowfin_error(dev, intr_status);
996 if (--boguscnt < 0) {
997 printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
998 dev->name, intr_status);
999 break;
1001 } while (1);
1003 if (yellowfin_debug > 3)
1004 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1005 dev->name, inw(ioaddr + IntrStatus));
1007 /* Code that should never be run! Perhaps remove after testing.. */
1009 static int stopit = 10;
1010 if (dev->start == 0 && --stopit < 0) {
1011 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
1012 dev->name);
1013 free_irq(irq, dev);
1017 dev->interrupt = 0;
1018 clear_bit(0, (void*)&yp->in_interrupt);
1019 return;
1022 /* This routine is logically part of the interrupt handler, but separated
1023 for clarity and better register allocation. */
1024 static int yellowfin_rx(struct net_device *dev)
1026 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1027 int entry = yp->cur_rx % RX_RING_SIZE;
1028 int boguscnt = 20;
1030 if (yellowfin_debug > 4) {
1031 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %4.4x.\n",
1032 entry, yp->rx_ring[entry].status);
1033 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x.\n",
1034 entry, yp->rx_ring[entry].cmd,
1035 yp->rx_ring[entry].request_cnt, yp->rx_ring[entry].addr,
1036 yp->rx_ring[entry].result_cnt, yp->rx_ring[entry].status);
1039 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1040 while (yp->rx_head_desc->status) {
1041 struct yellowfin_desc *desc = yp->rx_head_desc;
1042 u16 desc_status = desc->status;
1043 int data_size = desc->request_cnt - desc->result_cnt;
1044 u8 *buf_addr = bus_to_virt(desc->addr);
1045 s16 frame_status = get_unaligned((s16*)(buf_addr+data_size-2));
1047 if (yellowfin_debug > 4)
1048 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1049 frame_status);
1050 if (--boguscnt < 0)
1051 break;
1052 if ( ! (desc_status & RX_EOP)) {
1053 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1054 " status %4.4x!\n", dev->name, desc_status);
1055 yp->stats.rx_length_errors++;
1056 } else if (yp->chip_id == 0 && (frame_status & 0x0038)) {
1057 /* There was a error. */
1058 if (yellowfin_debug > 3)
1059 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1060 frame_status);
1061 yp->stats.rx_errors++;
1062 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1063 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1064 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1065 if (frame_status < 0) yp->stats.rx_dropped++;
1066 } else if (yp->chip_id != 0 &&
1067 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1068 u8 status1 = buf_addr[data_size-2];
1069 u8 status2 = buf_addr[data_size-1];
1070 yp->stats.rx_errors++;
1071 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1072 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1073 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1074 if (status2 & 0x80) yp->stats.rx_dropped++;
1075 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1076 } else if (memcmp(bus_to_virt(yp->rx_ring[entry].addr),
1077 dev->dev_addr, 6) != 0
1078 && memcmp(bus_to_virt(yp->rx_ring[entry].addr),
1079 "\377\377\377\377\377\377", 6) != 0) {
1080 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
1081 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1082 buf_addr[3], buf_addr[4], buf_addr[5]);
1083 bogus_rx++;
1084 #endif
1085 } else {
1086 struct sk_buff *skb;
1087 int pkt_len = data_size -
1088 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1089 /* To verify: Yellowfin Length should omit the CRC! */
1091 #ifndef final_version
1092 if (yellowfin_debug > 4)
1093 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1094 " of %d, bogus_cnt %d.\n",
1095 pkt_len, data_size, boguscnt);
1096 #endif
1097 /* Check if the packet is long enough to just pass up the skbuff
1098 without copying to a properly sized skbuff. */
1099 if (pkt_len > rx_copybreak) {
1100 char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
1101 #ifndef final_verison /* Remove after testing. */
1102 if (bus_to_virt(yp->rx_ring[entry].addr) != temp)
1103 printk(KERN_WARNING "%s: Warning -- the skbuff addresses "
1104 "do not match in yellowfin_rx: %p vs. %p / %p.\n",
1105 dev->name, bus_to_virt(yp->rx_ring[entry].addr),
1106 skb->head, temp);
1107 #endif
1108 yp->rx_skbuff[entry] = NULL;
1109 } else {
1110 skb = dev_alloc_skb(pkt_len + 2);
1111 if (skb == NULL)
1112 break;
1113 skb->dev = dev;
1114 skb_reserve(skb, 2); /* 16 byte align the data fields */
1115 #if 1
1116 eth_copy_and_sum(skb, bus_to_virt(yp->rx_ring[entry].addr),
1117 pkt_len, 0);
1118 skb_put(skb, pkt_len);
1119 #else
1120 memcpy(skb_put(skb, pkt_len),
1121 bus_to_virt(yp->rx_ring[entry].addr), pkt_len);
1122 #endif
1124 skb->protocol = eth_type_trans(skb, dev);
1125 netif_rx(skb);
1126 dev->last_rx = jiffies;
1127 yp->stats.rx_packets++;
1129 entry = (++yp->cur_rx) % RX_RING_SIZE;
1130 yp->rx_head_desc = &yp->rx_ring[entry];
1133 /* Refill the Rx ring buffers. */
1134 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1135 struct sk_buff *skb;
1136 entry = yp->dirty_rx % RX_RING_SIZE;
1137 if (yp->rx_skbuff[entry] == NULL) {
1138 skb = dev_alloc_skb(yp->rx_buf_sz);
1139 if (skb == NULL)
1140 break; /* Better luck next round. */
1141 skb->dev = dev; /* Mark as being used by this device. */
1142 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1143 yp->rx_ring[entry].addr = virt_to_bus(skb->tail);
1144 yp->rx_skbuff[entry] = skb;
1146 yp->rx_ring[entry].cmd = CMD_STOP;
1147 yp->rx_ring[entry].status = 0; /* Clear complete bit. */
1148 if (entry != 0)
1149 yp->rx_ring[entry - 1].cmd = CMD_RX_BUF | INTR_ALWAYS;
1150 else
1151 yp->rx_ring[RX_RING_SIZE - 1].cmd =
1152 CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
1155 return 0;
1158 static void yellowfin_error(struct net_device *dev, int intr_status)
1160 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1162 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1163 dev->name, intr_status);
1164 /* Hmmmmm, it's not clear what to do here. */
1165 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1166 yp->stats.tx_errors++;
1167 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1168 yp->stats.rx_errors++;
1171 static int yellowfin_close(struct net_device *dev)
1173 long ioaddr = dev->base_addr;
1174 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1175 int i;
1177 dev->start = 0;
1178 dev->tbusy = 1;
1180 if (yellowfin_debug > 1) {
1181 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
1182 dev->name, inw(ioaddr + TxStatus),
1183 inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
1184 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1185 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1188 /* Disable interrupts by clearing the interrupt mask. */
1189 outw(0x0000, ioaddr + IntrEnb);
1191 /* Stop the chip's Tx and Rx processes. */
1192 outl(0x80000000, ioaddr + RxCtrl);
1193 outl(0x80000000, ioaddr + TxCtrl);
1195 del_timer(&yp->timer);
1197 #ifdef __i386__
1198 if (yellowfin_debug > 2) {
1199 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", (int)virt_to_bus(yp->tx_ring));
1200 for (i = 0; i < TX_RING_SIZE*2; i++)
1201 printk(" %c #%d desc. %4.4x %4.4x %8.8x %8.8x %4.4x %4.4x.\n",
1202 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1203 i, yp->tx_ring[i].cmd,
1204 yp->tx_ring[i].request_cnt, yp->tx_ring[i].addr,
1205 yp->tx_ring[i].branch_addr,
1206 yp->tx_ring[i].result_cnt, yp->tx_ring[i].status);
1207 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1208 for (i = 0; i < TX_RING_SIZE; i++)
1209 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1210 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1211 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1213 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", (int)virt_to_bus(yp->rx_ring));
1214 for (i = 0; i < RX_RING_SIZE; i++) {
1215 printk(KERN_DEBUG " %c #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x\n",
1216 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1217 i, yp->rx_ring[i].cmd,
1218 yp->rx_ring[i].request_cnt, yp->rx_ring[i].addr,
1219 yp->rx_ring[i].result_cnt, yp->rx_ring[i].status);
1220 if (yellowfin_debug > 6) {
1221 if (*(u8*)yp->rx_ring[i].addr != 0x69) {
1222 int j;
1223 for (j = 0; j < 0x50; j++)
1224 printk(" %4.4x", ((u16*)yp->rx_ring[i].addr)[j]);
1225 printk("\n");
1230 #endif /* __i386__ debugging only */
1232 free_irq(dev->irq, dev);
1234 /* Free all the skbuffs in the Rx queue. */
1235 for (i = 0; i < RX_RING_SIZE; i++) {
1236 yp->rx_ring[i].cmd = CMD_STOP;
1237 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1238 if (yp->rx_skbuff[i]) {
1239 #if LINUX_VERSION_CODE < 0x20100
1240 yp->rx_skbuff[i]->free = 1;
1241 #endif
1242 DEV_FREE_SKB(yp->rx_skbuff[i]);
1244 yp->rx_skbuff[i] = 0;
1246 for (i = 0; i < TX_RING_SIZE; i++) {
1247 if (yp->tx_skbuff[i])
1248 DEV_FREE_SKB(yp->tx_skbuff[i]);
1249 yp->tx_skbuff[i] = 0;
1252 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1253 if (yellowfin_debug > 0) {
1254 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1255 dev->name, bogus_rx);
1257 #endif
1258 MOD_DEC_USE_COUNT;
1260 return 0;
1263 static struct enet_statistics *yellowfin_get_stats(struct net_device *dev)
1265 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1266 return &yp->stats;
1269 /* Set or clear the multicast filter for this adaptor. */
1271 /* The little-endian AUTODIN32 ethernet CRC calculation.
1272 N.B. Do not use for bulk data, use a table-based routine instead.
1273 This is common code and should be moved to net/core/crc.c */
1274 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1276 static inline unsigned ether_crc_le(int length, unsigned char *data)
1278 unsigned int crc = 0xffffffff; /* Initial value. */
1279 while(--length >= 0) {
1280 unsigned char current_octet = *data++;
1281 int bit;
1282 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1283 if ((crc ^ current_octet) & 1) {
1284 crc >>= 1;
1285 crc ^= ethernet_polynomial_le;
1286 } else
1287 crc >>= 1;
1290 return crc;
1294 static void set_rx_mode(struct net_device *dev)
1296 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1297 long ioaddr = dev->base_addr;
1298 u16 cfg_value = inw(ioaddr + Cnfg);
1300 /* Stop the Rx process to change any value. */
1301 outw(cfg_value & ~0x1000, ioaddr + Cnfg);
1302 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1303 /* Unconditionally log net taps. */
1304 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1305 outw(0x000F, ioaddr + AddrMode);
1306 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1307 /* Too many to filter well, or accept all multicasts. */
1308 outw(0x000B, ioaddr + AddrMode);
1309 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1310 struct dev_mc_list *mclist;
1311 u16 hash_table[4];
1312 int i;
1313 memset(hash_table, 0, sizeof(hash_table));
1314 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1315 i++, mclist = mclist->next) {
1316 /* Due to a bug in the early chip versions, multiple filter
1317 slots must be set for each address. */
1318 if (yp->chip_id == 0) {
1319 set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
1320 hash_table);
1321 set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
1322 hash_table);
1323 set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
1324 hash_table);
1326 set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
1327 hash_table);
1329 /* Copy the hash table to the chip. */
1330 for (i = 0; i < 4; i++)
1331 outw(hash_table[i], ioaddr + HashTbl + i*2);
1332 outw(0x0003, ioaddr + AddrMode);
1333 } else { /* Normal, unicast/broadcast-only mode. */
1334 outw(0x0001, ioaddr + AddrMode);
1336 /* Restart the Rx process. */
1337 outw(cfg_value | 0x1000, ioaddr + Cnfg);
1340 #ifdef HAVE_PRIVATE_IOCTL
1341 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1343 long ioaddr = dev->base_addr;
1344 u16 *data = (u16 *)&rq->ifr_data;
1346 switch(cmd) {
1347 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1348 data[0] = ((struct yellowfin_private *)dev->priv)->phys[0] & 0x1f;
1349 /* Fall Through */
1350 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1351 data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
1352 return 0;
1353 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1354 if (!suser())
1355 return -EPERM;
1356 mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1357 return 0;
1358 default:
1359 return -EOPNOTSUPP;
1362 #endif /* HAVE_PRIVATE_IOCTL */
1365 #ifdef MODULE
1367 /* An additional parameter that may be passed in... */
1368 static int debug = -1;
1370 int init_module(void)
1372 if (debug >= 0)
1373 yellowfin_debug = debug;
1375 return yellowfin_probe(0);
1378 void cleanup_module(void)
1380 struct net_device *next_dev;
1382 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1383 while (root_yellowfin_dev) {
1384 next_dev = ((struct yellowfin_private *)root_yellowfin_dev->priv)->next_module;
1385 unregister_netdev(root_yellowfin_dev);
1386 release_region(root_yellowfin_dev->base_addr, YELLOWFIN_TOTAL_SIZE);
1387 kfree(root_yellowfin_dev);
1388 root_yellowfin_dev = next_dev;
1392 #endif /* MODULE */
1395 * Local variables:
1396 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1397 * compile-command-alphaLX: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS` -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1398 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1399 * c-indent-level: 4
1400 * c-basic-offset: 4
1401 * tab-width: 4
1402 * End: