* added 0.99 linux version
[mascara-docs.git] / i386 / linux / linux-2.3.21 / drivers / net / mace.c
blob94dc1118f43246959eeb24fc7dbb0a588898d213
1 /*
2 * Network device driver for the MACE ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1996 Paul Mackerras.
6 */
8 #ifdef MODULE
9 #include <linux/module.h>
10 #include <linux/version.h>
11 #endif
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/timer.h>
19 #include <asm/prom.h>
20 #include <asm/dbdma.h>
21 #include <asm/io.h>
22 #include <asm/pgtable.h>
23 #include "mace.h"
25 #ifdef MODULE
26 static struct net_device *mace_devs = NULL;
27 #endif
29 #define N_RX_RING 8
30 #define N_TX_RING 6
31 #define MAX_TX_ACTIVE 1
32 #define NCMDS_TX 1 /* dma commands per element in tx ring */
33 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
34 #define TX_TIMEOUT HZ /* 1 second */
36 /* Bits in transmit DMA status */
37 #define TX_DMA_ERR 0x80
39 struct mace_data {
40 volatile struct mace *mace;
41 volatile struct dbdma_regs *tx_dma;
42 int tx_dma_intr;
43 volatile struct dbdma_regs *rx_dma;
44 int rx_dma_intr;
45 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
46 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
47 struct sk_buff *rx_bufs[N_RX_RING];
48 int rx_fill;
49 int rx_empty;
50 struct sk_buff *tx_bufs[N_TX_RING];
51 int tx_fill;
52 int tx_empty;
53 unsigned char maccc;
54 unsigned char tx_fullup;
55 unsigned char tx_active;
56 unsigned char tx_bad_runt;
57 struct net_device_stats stats;
58 struct timer_list tx_timeout;
59 int timeout_active;
63 * Number of bytes of private data per MACE: allow enough for
64 * the rx and tx dma commands plus a branch dma command each,
65 * and another 16 bytes to allow us to align the dma command
66 * buffers on a 16 byte boundary.
68 #define PRIV_BYTES (sizeof(struct mace_data) \
69 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
71 static int bitrev(int);
72 static int mace_open(struct net_device *dev);
73 static int mace_close(struct net_device *dev);
74 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
75 static struct net_device_stats *mace_stats(struct net_device *dev);
76 static void mace_set_multicast(struct net_device *dev);
77 static void mace_reset(struct net_device *dev);
78 static int mace_set_address(struct net_device *dev, void *addr);
79 static void mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
80 static void mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
81 static void mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
82 static void mace_set_timeout(struct net_device *dev);
83 static void mace_tx_timeout(unsigned long data);
84 static inline void dbdma_reset(volatile struct dbdma_regs *dma);
85 static inline void mace_clean_rings(struct mace_data *mp);
86 static void __mace_set_address(struct net_device *dev, void *addr);
89 * If we can't get a skbuff when we need it, we use this area for DMA.
91 static unsigned char dummy_buf[RX_BUFLEN+2];
93 /* Bit-reverse one byte of an ethernet hardware address. */
94 static inline int
95 bitrev(int b)
97 int d = 0, i;
99 for (i = 0; i < 8; ++i, b >>= 1)
100 d = (d << 1) | (b & 1);
101 return d;
105 mace_probe(struct net_device *dev)
107 int j, rev;
108 struct mace_data *mp;
109 struct device_node *mace;
110 unsigned char *addr;
111 static int maces_found = 0;
112 static struct device_node *next_mace;
114 if (!maces_found) {
115 next_mace = find_devices("mace");
116 maces_found = 1;
118 mace = next_mace;
119 if (mace == 0)
120 return -ENODEV;
121 next_mace = mace->next;
123 if (mace->n_addrs != 3 || mace->n_intrs != 3) {
124 printk(KERN_ERR "can't use MACE %s: expect 3 addrs and 3 intrs\n",
125 mace->full_name);
126 return -ENODEV;
129 if (dev == NULL)
130 dev = init_etherdev(0, PRIV_BYTES);
131 else {
132 dev->priv = kmalloc(PRIV_BYTES, GFP_KERNEL);
133 if (dev->priv == 0)
134 return -ENOMEM;
136 memset(dev->priv, 0, PRIV_BYTES);
138 mp = (struct mace_data *) dev->priv;
139 dev->base_addr = mace->addrs[0].address;
140 mp->mace = (volatile struct mace *)
141 ioremap(mace->addrs[0].address, 0x1000);
142 dev->irq = mace->intrs[0].line;
144 addr = get_property(mace, "mac-address", NULL);
145 if (addr == NULL) {
146 addr = get_property(mace, "local-mac-address", NULL);
147 if (addr == NULL) {
148 printk(KERN_ERR "Can't get mac-address for MACE at %lx\n",
149 dev->base_addr);
150 return -EAGAIN;
154 printk(KERN_INFO "%s: MACE at", dev->name);
155 rev = addr[0] == 0 && addr[1] == 0xA0;
156 for (j = 0; j < 6; ++j) {
157 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
158 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
160 printk(", chip revision %d.%d\n",
161 in_8(&mp->mace->chipid_hi), in_8(&mp->mace->chipid_lo));
163 mp = (struct mace_data *) dev->priv;
164 mp->maccc = ENXMT | ENRCV;
165 mp->tx_dma = (volatile struct dbdma_regs *)
166 ioremap(mace->addrs[1].address, 0x1000);
167 mp->tx_dma_intr = mace->intrs[1].line;
168 mp->rx_dma = (volatile struct dbdma_regs *)
169 ioremap(mace->addrs[2].address, 0x1000);
170 mp->rx_dma_intr = mace->intrs[2].line;
172 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
173 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
175 memset(&mp->stats, 0, sizeof(mp->stats));
176 memset((char *) mp->tx_cmds, 0,
177 (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
178 init_timer(&mp->tx_timeout);
179 mp->timeout_active = 0;
181 dev->open = mace_open;
182 dev->stop = mace_close;
183 dev->hard_start_xmit = mace_xmit_start;
184 dev->get_stats = mace_stats;
185 dev->set_multicast_list = mace_set_multicast;
186 dev->set_mac_address = mace_set_address;
188 ether_setup(dev);
190 mace_reset(dev);
192 if (request_irq(dev->irq, mace_interrupt, 0, "MACE", dev)) {
193 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
194 return -EAGAIN;
196 if (request_irq(mace->intrs[1].line, mace_txdma_intr, 0, "MACE-txdma",
197 dev)) {
198 printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line);
199 return -EAGAIN;
201 if (request_irq(mace->intrs[2].line, mace_rxdma_intr, 0, "MACE-rxdma",
202 dev)) {
203 printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line);
204 return -EAGAIN;
207 return 0;
210 static void dbdma_reset(volatile struct dbdma_regs *dma)
212 int i;
214 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
217 * Yes this looks peculiar, but apparently it needs to be this
218 * way on some machines.
220 for (i = 200; i > 0; --i)
221 if (ld_le32(&dma->control) & RUN)
222 udelay(1);
225 static void mace_reset(struct net_device *dev)
227 struct mace_data *mp = (struct mace_data *) dev->priv;
228 volatile struct mace *mb = mp->mace;
229 int i;
231 /* soft-reset the chip */
232 i = 200;
233 while (--i) {
234 out_8(&mb->biucc, SWRST);
235 if (in_8(&mb->biucc) & SWRST) {
236 udelay(10);
237 continue;
239 break;
241 if (!i) {
242 printk(KERN_ERR "mace: cannot reset chip!\n");
243 return;
246 out_8(&mb->imr, 0xff); /* disable all intrs for now */
247 i = in_8(&mb->ir);
248 out_8(&mb->maccc, 0); /* turn off tx, rx */
250 out_8(&mb->biucc, XMTSP_64);
251 out_8(&mb->utr, RTRD);
252 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
253 out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
254 out_8(&mb->rcvfc, 0);
256 /* load up the hardware address */
257 __mace_set_address(dev, dev->dev_addr);
259 /* clear the multicast filter */
260 out_8(&mb->iac, ADDRCHG | LOGADDR);
261 while ((in_8(&mb->iac) & ADDRCHG) != 0)
263 for (i = 0; i < 8; ++i) {
264 out_8(&mb->ladrf, 0);
266 /* done changing address */
267 out_8(&mb->iac, 0);
269 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
272 static void __mace_set_address(struct net_device *dev, void *addr)
274 volatile struct mace *mb = ((struct mace_data *) dev->priv)->mace;
275 unsigned char *p = addr;
276 int i;
278 /* load up the hardware address */
279 out_8(&mb->iac, ADDRCHG | PHYADDR);
280 while ((in_8(&mb->iac) & ADDRCHG) != 0)
282 for (i = 0; i < 6; ++i)
283 out_8(&mb->padr, dev->dev_addr[i] = p[i]);
286 static int mace_set_address(struct net_device *dev, void *addr)
288 struct mace_data *mp = (struct mace_data *) dev->priv;
289 volatile struct mace *mb = mp->mace;
290 unsigned long flags;
292 save_flags(flags); cli();
294 __mace_set_address(dev, addr);
296 out_8(&mb->iac, 0);
297 /* note: setting ADDRCHG clears ENRCV */
298 out_8(&mb->maccc, mp->maccc);
300 restore_flags(flags);
301 return 0;
304 static int mace_open(struct net_device *dev)
306 struct mace_data *mp = (struct mace_data *) dev->priv;
307 volatile struct mace *mb = mp->mace;
308 volatile struct dbdma_regs *rd = mp->rx_dma;
309 volatile struct dbdma_regs *td = mp->tx_dma;
310 volatile struct dbdma_cmd *cp;
311 int i;
312 struct sk_buff *skb;
313 unsigned char *data;
315 /* reset the chip */
316 mace_reset(dev);
318 /* initialize list of sk_buffs for receiving and set up recv dma */
319 mace_clean_rings(mp);
320 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
321 cp = mp->rx_cmds;
322 for (i = 0; i < N_RX_RING - 1; ++i) {
323 skb = dev_alloc_skb(RX_BUFLEN + 2);
324 if (skb == 0) {
325 data = dummy_buf;
326 } else {
327 skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
328 data = skb->data;
330 mp->rx_bufs[i] = skb;
331 st_le16(&cp->req_count, RX_BUFLEN);
332 st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
333 st_le32(&cp->phy_addr, virt_to_bus(data));
334 cp->xfer_status = 0;
335 ++cp;
337 mp->rx_bufs[i] = 0;
338 st_le16(&cp->command, DBDMA_STOP);
339 mp->rx_fill = i;
340 mp->rx_empty = 0;
342 /* Put a branch back to the beginning of the receive command list */
343 ++cp;
344 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
345 st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
347 /* start rx dma */
348 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
349 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
350 out_le32(&rd->control, (RUN << 16) | RUN);
352 /* put a branch at the end of the tx command list */
353 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
354 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
355 st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
357 /* reset tx dma */
358 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
359 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
360 mp->tx_fill = 0;
361 mp->tx_empty = 0;
362 mp->tx_fullup = 0;
363 mp->tx_active = 0;
364 mp->tx_bad_runt = 0;
366 /* turn it on! */
367 out_8(&mb->maccc, mp->maccc);
368 /* enable all interrupts except receive interrupts */
369 out_8(&mb->imr, RCVINT);
371 #ifdef MOD_INC_USE_COUNT
372 MOD_INC_USE_COUNT;
373 #endif
374 return 0;
377 static inline void mace_clean_rings(struct mace_data *mp)
379 int i;
381 /* free some skb's */
382 for (i = 0; i < N_RX_RING; ++i) {
383 if (mp->rx_bufs[i] != 0) {
384 dev_kfree_skb(mp->rx_bufs[i]);
385 mp->rx_bufs[i] = 0;
388 for (i = mp->tx_empty; i != mp->tx_fill; ) {
389 dev_kfree_skb(mp->tx_bufs[i]);
390 if (++i >= N_TX_RING)
391 i = 0;
395 static int mace_close(struct net_device *dev)
397 struct mace_data *mp = (struct mace_data *) dev->priv;
398 volatile struct mace *mb = mp->mace;
399 volatile struct dbdma_regs *rd = mp->rx_dma;
400 volatile struct dbdma_regs *td = mp->tx_dma;
402 /* disable rx and tx */
403 out_8(&mb->maccc, 0);
404 out_8(&mb->imr, 0xff); /* disable all intrs */
406 /* disable rx and tx dma */
407 st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
408 st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
410 mace_clean_rings(mp);
412 #ifdef MOD_DEC_USE_COUNT
413 MOD_DEC_USE_COUNT;
414 #endif
416 return 0;
419 static inline void mace_set_timeout(struct net_device *dev)
421 struct mace_data *mp = (struct mace_data *) dev->priv;
422 unsigned long flags;
424 save_flags(flags);
425 cli();
426 if (mp->timeout_active)
427 del_timer(&mp->tx_timeout);
428 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
429 mp->tx_timeout.function = mace_tx_timeout;
430 mp->tx_timeout.data = (unsigned long) dev;
431 add_timer(&mp->tx_timeout);
432 mp->timeout_active = 1;
433 restore_flags(flags);
436 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
438 struct mace_data *mp = (struct mace_data *) dev->priv;
439 volatile struct dbdma_regs *td = mp->tx_dma;
440 volatile struct dbdma_cmd *cp, *np;
441 unsigned long flags;
442 int fill, next, len;
444 /* see if there's a free slot in the tx ring */
445 save_flags(flags); cli();
446 fill = mp->tx_fill;
447 next = fill + 1;
448 if (next >= N_TX_RING)
449 next = 0;
450 if (next == mp->tx_empty) {
451 dev->tbusy = 1;
452 mp->tx_fullup = 1;
453 restore_flags(flags);
454 return 1; /* can't take it at the moment */
456 restore_flags(flags);
458 /* partially fill in the dma command block */
459 len = skb->len;
460 if (len > ETH_FRAME_LEN) {
461 printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
462 len = ETH_FRAME_LEN;
464 mp->tx_bufs[fill] = skb;
465 cp = mp->tx_cmds + NCMDS_TX * fill;
466 st_le16(&cp->req_count, len);
467 st_le32(&cp->phy_addr, virt_to_bus(skb->data));
469 np = mp->tx_cmds + NCMDS_TX * next;
470 out_le16(&np->command, DBDMA_STOP);
472 /* poke the tx dma channel */
473 save_flags(flags);
474 cli();
475 mp->tx_fill = next;
476 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
477 out_le16(&cp->xfer_status, 0);
478 out_le16(&cp->command, OUTPUT_LAST);
479 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
480 ++mp->tx_active;
481 mace_set_timeout(dev);
483 if (++next >= N_TX_RING)
484 next = 0;
485 if (next == mp->tx_empty)
486 dev->tbusy = 1;
487 restore_flags(flags);
489 return 0;
492 static struct net_device_stats *mace_stats(struct net_device *dev)
494 struct mace_data *p = (struct mace_data *) dev->priv;
496 return &p->stats;
500 * CRC polynomial - used in working out multicast filter bits.
502 #define CRC_POLY 0xedb88320
504 static void mace_set_multicast(struct net_device *dev)
506 struct mace_data *mp = (struct mace_data *) dev->priv;
507 volatile struct mace *mb = mp->mace;
508 int i, j, k, b;
509 unsigned long crc;
511 mp->maccc &= ~PROM;
512 if (dev->flags & IFF_PROMISC) {
513 mp->maccc |= PROM;
514 } else {
515 unsigned char multicast_filter[8];
516 struct dev_mc_list *dmi = dev->mc_list;
518 if (dev->flags & IFF_ALLMULTI) {
519 for (i = 0; i < 8; i++)
520 multicast_filter[i] = 0xff;
521 } else {
522 for (i = 0; i < 8; i++)
523 multicast_filter[i] = 0;
524 for (i = 0; i < dev->mc_count; i++) {
525 crc = ~0;
526 for (j = 0; j < 6; ++j) {
527 b = dmi->dmi_addr[j];
528 for (k = 0; k < 8; ++k) {
529 if ((crc ^ b) & 1)
530 crc = (crc >> 1) ^ CRC_POLY;
531 else
532 crc >>= 1;
533 b >>= 1;
536 j = crc >> 26; /* bit number in multicast_filter */
537 multicast_filter[j >> 3] |= 1 << (j & 7);
538 dmi = dmi->next;
541 #if 0
542 printk("Multicast filter :");
543 for (i = 0; i < 8; i++)
544 printk("%02x ", multicast_filter[i]);
545 printk("\n");
546 #endif
548 out_8(&mb->iac, ADDRCHG | LOGADDR);
549 while ((in_8(&mb->iac) & ADDRCHG) != 0)
551 for (i = 0; i < 8; ++i) {
552 out_8(&mb->ladrf, multicast_filter[i]);
555 /* reset maccc */
556 out_8(&mb->maccc, mp->maccc);
559 static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
561 volatile struct mace *mb = mp->mace;
562 static int mace_babbles, mace_jabbers;
564 if (intr & MPCO)
565 mp->stats.rx_missed_errors += 256;
566 mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
567 if (intr & RNTPCO)
568 mp->stats.rx_length_errors += 256;
569 mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
570 if (intr & CERR)
571 ++mp->stats.tx_heartbeat_errors;
572 if (intr & BABBLE)
573 if (mace_babbles++ < 4)
574 printk(KERN_DEBUG "mace: babbling transmitter\n");
575 if (intr & JABBER)
576 if (mace_jabbers++ < 4)
577 printk(KERN_DEBUG "mace: jabbering transceiver\n");
580 static void mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
582 struct net_device *dev = (struct net_device *) dev_id;
583 struct mace_data *mp = (struct mace_data *) dev->priv;
584 volatile struct mace *mb = mp->mace;
585 volatile struct dbdma_regs *td = mp->tx_dma;
586 volatile struct dbdma_cmd *cp;
587 int intr, fs, i, stat, x;
588 int xcount, dstat;
589 /* static int mace_last_fs, mace_last_xcount; */
591 intr = in_8(&mb->ir); /* read interrupt register */
592 in_8(&mb->xmtrc); /* get retries */
593 mace_handle_misc_intrs(mp, intr);
595 i = mp->tx_empty;
596 while (in_8(&mb->pr) & XMTSV) {
597 del_timer(&mp->tx_timeout);
598 mp->timeout_active = 0;
600 * Clear any interrupt indication associated with this status
601 * word. This appears to unlatch any error indication from
602 * the DMA controller.
604 intr = in_8(&mb->ir);
605 if (intr != 0)
606 mace_handle_misc_intrs(mp, intr);
607 if (mp->tx_bad_runt) {
608 fs = in_8(&mb->xmtfs);
609 mp->tx_bad_runt = 0;
610 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
611 continue;
613 dstat = ld_le32(&td->status);
614 /* stop DMA controller */
615 out_le32(&td->control, RUN << 16);
617 * xcount is the number of complete frames which have been
618 * written to the fifo but for which status has not been read.
620 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
621 if (xcount == 0 || (dstat & DEAD)) {
623 * If a packet was aborted before the DMA controller has
624 * finished transferring it, it seems that there are 2 bytes
625 * which are stuck in some buffer somewhere. These will get
626 * transmitted as soon as we read the frame status (which
627 * reenables the transmit data transfer request). Turning
628 * off the DMA controller and/or resetting the MACE doesn't
629 * help. So we disable auto-padding and FCS transmission
630 * so the two bytes will only be a runt packet which should
631 * be ignored by other stations.
633 out_8(&mb->xmtfc, DXMTFCS);
635 fs = in_8(&mb->xmtfs);
636 if ((fs & XMTSV) == 0) {
637 printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
638 fs, xcount, dstat);
639 mace_reset(dev);
641 * XXX mace likes to hang the machine after a xmtfs error.
642 * This is hard to reproduce, reseting *may* help
645 cp = mp->tx_cmds + NCMDS_TX * i;
646 stat = ld_le16(&cp->xfer_status);
647 if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
649 * Check whether there were in fact 2 bytes written to
650 * the transmit FIFO.
652 udelay(1);
653 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
654 if (x != 0) {
655 /* there were two bytes with an end-of-packet indication */
656 mp->tx_bad_runt = 1;
657 mace_set_timeout(dev);
658 } else {
660 * Either there weren't the two bytes buffered up, or they
661 * didn't have an end-of-packet indication.
662 * We flush the transmit FIFO just in case (by setting the
663 * XMTFWU bit with the transmitter disabled).
665 out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
666 out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
667 udelay(1);
668 out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
669 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
672 /* dma should have finished */
673 if (i == mp->tx_fill) {
674 printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
675 fs, xcount, dstat);
676 continue;
678 /* Update stats */
679 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
680 ++mp->stats.tx_errors;
681 if (fs & LCAR)
682 ++mp->stats.tx_carrier_errors;
683 if (fs & (UFLO|LCOL|RTRY))
684 ++mp->stats.tx_aborted_errors;
685 } else {
686 mp->stats.tx_bytes += mp->tx_bufs[i]->len;
687 ++mp->stats.tx_packets;
689 dev_kfree_skb(mp->tx_bufs[i]);
690 --mp->tx_active;
691 if (++i >= N_TX_RING)
692 i = 0;
693 #if 0
694 mace_last_fs = fs;
695 mace_last_xcount = xcount;
696 #endif
699 if (i != mp->tx_empty) {
700 mp->tx_fullup = 0;
701 dev->tbusy = 0;
702 mark_bh(NET_BH);
704 mp->tx_empty = i;
705 i += mp->tx_active;
706 if (i >= N_TX_RING)
707 i -= N_TX_RING;
708 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
709 do {
710 /* set up the next one */
711 cp = mp->tx_cmds + NCMDS_TX * i;
712 out_le16(&cp->xfer_status, 0);
713 out_le16(&cp->command, OUTPUT_LAST);
714 ++mp->tx_active;
715 if (++i >= N_TX_RING)
716 i = 0;
717 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
718 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
719 mace_set_timeout(dev);
723 static void mace_tx_timeout(unsigned long data)
725 struct net_device *dev = (struct net_device *) data;
726 struct mace_data *mp = (struct mace_data *) dev->priv;
727 volatile struct mace *mb = mp->mace;
728 volatile struct dbdma_regs *td = mp->tx_dma;
729 volatile struct dbdma_regs *rd = mp->rx_dma;
730 volatile struct dbdma_cmd *cp;
731 unsigned long flags;
732 int i;
734 save_flags(flags);
735 cli();
736 mp->timeout_active = 0;
737 if (mp->tx_active == 0 && !mp->tx_bad_runt)
738 goto out;
740 /* update various counters */
741 mace_handle_misc_intrs(mp, in_8(&mb->ir));
743 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
745 /* turn off both tx and rx and reset the chip */
746 out_8(&mb->maccc, 0);
747 printk(KERN_ERR "mace: transmit timeout - resetting\n");
748 dbdma_reset(td);
749 mace_reset(dev);
751 /* restart rx dma */
752 cp = bus_to_virt(ld_le32(&rd->cmdptr));
753 dbdma_reset(rd);
754 out_le16(&cp->xfer_status, 0);
755 out_le32(&rd->cmdptr, virt_to_bus(cp));
756 out_le32(&rd->control, (RUN << 16) | RUN);
758 /* fix up the transmit side */
759 i = mp->tx_empty;
760 mp->tx_active = 0;
761 ++mp->stats.tx_errors;
762 if (mp->tx_bad_runt) {
763 mp->tx_bad_runt = 0;
764 } else if (i != mp->tx_fill) {
765 dev_kfree_skb(mp->tx_bufs[i]);
766 if (++i >= N_TX_RING)
767 i = 0;
768 mp->tx_empty = i;
770 mp->tx_fullup = 0;
771 dev->tbusy = 0;
772 mark_bh(NET_BH);
773 if (i != mp->tx_fill) {
774 cp = mp->tx_cmds + NCMDS_TX * i;
775 out_le16(&cp->xfer_status, 0);
776 out_le16(&cp->command, OUTPUT_LAST);
777 out_le32(&td->cmdptr, virt_to_bus(cp));
778 out_le32(&td->control, (RUN << 16) | RUN);
779 ++mp->tx_active;
780 mace_set_timeout(dev);
783 /* turn it back on */
784 out_8(&mb->imr, RCVINT);
785 out_8(&mb->maccc, mp->maccc);
787 out:
788 restore_flags(flags);
791 static void mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
795 static void mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
797 struct net_device *dev = (struct net_device *) dev_id;
798 struct mace_data *mp = (struct mace_data *) dev->priv;
799 volatile struct dbdma_regs *rd = mp->rx_dma;
800 volatile struct dbdma_cmd *cp, *np;
801 int i, nb, stat, next;
802 struct sk_buff *skb;
803 unsigned frame_status;
804 static int mace_lost_status;
805 unsigned char *data;
807 for (i = mp->rx_empty; i != mp->rx_fill; ) {
808 cp = mp->rx_cmds + i;
809 stat = ld_le16(&cp->xfer_status);
810 if ((stat & ACTIVE) == 0) {
811 next = i + 1;
812 if (next >= N_RX_RING)
813 next = 0;
814 np = mp->rx_cmds + next;
815 if (next != mp->rx_fill
816 && (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
817 printk(KERN_DEBUG "mace: lost a status word\n");
818 ++mace_lost_status;
819 } else
820 break;
822 nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
823 out_le16(&cp->command, DBDMA_STOP);
824 /* got a packet, have a look at it */
825 skb = mp->rx_bufs[i];
826 if (skb == 0) {
827 ++mp->stats.rx_dropped;
828 } else if (nb > 8) {
829 data = skb->data;
830 frame_status = (data[nb-3] << 8) + data[nb-4];
831 if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
832 ++mp->stats.rx_errors;
833 if (frame_status & RS_OFLO)
834 ++mp->stats.rx_over_errors;
835 if (frame_status & RS_FRAMERR)
836 ++mp->stats.rx_frame_errors;
837 if (frame_status & RS_FCSERR)
838 ++mp->stats.rx_crc_errors;
839 } else {
840 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
841 * FCS on frames with 802.3 headers. This means that Ethernet
842 * frames have 8 extra octets at the end, while 802.3 frames
843 * have only 4. We need to correctly account for this. */
844 if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
845 nb -= 4;
846 else /* Ethernet header; mace includes FCS */
847 nb -= 8;
848 skb_put(skb, nb);
849 skb->dev = dev;
850 skb->protocol = eth_type_trans(skb, dev);
851 netif_rx(skb);
852 mp->rx_bufs[i] = 0;
853 mp->stats.rx_bytes += skb->len;
854 ++mp->stats.rx_packets;
856 } else {
857 ++mp->stats.rx_errors;
858 ++mp->stats.rx_length_errors;
861 /* advance to next */
862 if (++i >= N_RX_RING)
863 i = 0;
865 mp->rx_empty = i;
867 i = mp->rx_fill;
868 for (;;) {
869 next = i + 1;
870 if (next >= N_RX_RING)
871 next = 0;
872 if (next == mp->rx_empty)
873 break;
874 cp = mp->rx_cmds + i;
875 skb = mp->rx_bufs[i];
876 if (skb == 0) {
877 skb = dev_alloc_skb(RX_BUFLEN + 2);
878 if (skb != 0) {
879 skb_reserve(skb, 2);
880 mp->rx_bufs[i] = skb;
883 st_le16(&cp->req_count, RX_BUFLEN);
884 data = skb? skb->data: dummy_buf;
885 st_le32(&cp->phy_addr, virt_to_bus(data));
886 out_le16(&cp->xfer_status, 0);
887 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
888 #if 0
889 if ((ld_le32(&rd->status) & ACTIVE) != 0) {
890 out_le32(&rd->control, (PAUSE << 16) | PAUSE);
891 while ((in_le32(&rd->status) & ACTIVE) != 0)
894 #endif
895 i = next;
897 if (i != mp->rx_fill) {
898 out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
899 mp->rx_fill = i;
903 #ifdef MODULE
905 #if LINUX_VERSION_CODE > 0x20118
906 MODULE_AUTHOR("Paul Mackerras");
907 MODULE_DESCRIPTION("PowerMac MACE driver.");
908 #endif
910 int init_module(void)
912 int res;
914 if(mace_devs != NULL)
915 return -EBUSY;
916 res = mace_probe(NULL);
917 return res;
920 void cleanup_module(void)
922 struct mace_data *mp = (struct mace_data *) mace_devs->priv;
923 unregister_netdev(mace_devs);
925 free_irq(mace_devs->irq, mace_interrupt);
926 free_irq(mp->tx_dma_intr, mace_txdma_intr);
927 free_irq(mp->rx_dma_intr, mace_rxdma_intr);
929 kfree(mace_devs);
930 mace_devs = NULL;
933 #endif