2 * Driver for the Macintosh 68K onboard MACE controller with PSC
3 * driven DMA. The MACE driver code is derived from mace.c. The
4 * Mac68k theory of operation is courtesy of the MacBSD wizards.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Copyright (C) 1996 Paul Mackerras.
12 * Copyright (C) 1998 Alan Cox <alan@redhat.com>
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/delay.h>
23 #include <linux/string.h>
24 #include <linux/crc32.h>
25 #include <linux/bitrev.h>
27 #include <asm/pgtable.h>
29 #include <asm/macintosh.h>
30 #include <asm/macints.h>
31 #include <asm/mac_psc.h>
37 #define N_RX_PAGES ((N_RX_RING * 0x0800 + PAGE_SIZE - 1) / PAGE_SIZE)
40 /* Bits in transmit DMA status */
41 #define TX_DMA_ERR 0x80
43 /* The MACE is simply wired down on a Mac68K box */
45 #define MACE_BASE (void *)(0x50F1C000)
46 #define MACE_PROM (void *)(0x50F08001)
49 volatile struct mace
*mace
;
50 volatile unsigned char *tx_ring
;
51 volatile unsigned char *tx_ring_phys
;
52 volatile unsigned char *rx_ring
;
53 volatile unsigned char *rx_ring_phys
;
55 struct net_device_stats stats
;
57 int tx_slot
, tx_sloti
, tx_count
;
68 /* And frame continues.. */
71 #define PRIV_BYTES sizeof(struct mace_data)
73 extern void psc_debug_dump(void);
75 static int mace_open(struct net_device
*dev
);
76 static int mace_close(struct net_device
*dev
);
77 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
);
78 static struct net_device_stats
*mace_stats(struct net_device
*dev
);
79 static void mace_set_multicast(struct net_device
*dev
);
80 static int mace_set_address(struct net_device
*dev
, void *addr
);
81 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
);
82 static irqreturn_t
mace_dma_intr(int irq
, void *dev_id
);
83 static void mace_tx_timeout(struct net_device
*dev
);
86 * Load a receive DMA channel with a base address and ring length
89 static void mace_load_rxdma_base(struct net_device
*dev
, int set
)
91 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
93 psc_write_word(PSC_ENETRD_CMD
+ set
, 0x0100);
94 psc_write_long(PSC_ENETRD_ADDR
+ set
, (u32
) mp
->rx_ring_phys
);
95 psc_write_long(PSC_ENETRD_LEN
+ set
, N_RX_RING
);
96 psc_write_word(PSC_ENETRD_CMD
+ set
, 0x9800);
101 * Reset the receive DMA subsystem
104 static void mace_rxdma_reset(struct net_device
*dev
)
106 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
107 volatile struct mace
*mace
= mp
->mace
;
108 u8 maccc
= mace
->maccc
;
110 mace
->maccc
= maccc
& ~ENRCV
;
112 psc_write_word(PSC_ENETRD_CTL
, 0x8800);
113 mace_load_rxdma_base(dev
, 0x00);
114 psc_write_word(PSC_ENETRD_CTL
, 0x0400);
116 psc_write_word(PSC_ENETRD_CTL
, 0x8800);
117 mace_load_rxdma_base(dev
, 0x10);
118 psc_write_word(PSC_ENETRD_CTL
, 0x0400);
123 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET0
, 0x9800);
124 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET1
, 0x9800);
128 * Reset the transmit DMA subsystem
131 static void mace_txdma_reset(struct net_device
*dev
)
133 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
134 volatile struct mace
*mace
= mp
->mace
;
137 psc_write_word(PSC_ENETWR_CTL
, 0x8800);
140 mace
->maccc
= maccc
& ~ENXMT
;
142 mp
->tx_slot
= mp
->tx_sloti
= 0;
143 mp
->tx_count
= N_TX_RING
;
145 psc_write_word(PSC_ENETWR_CTL
, 0x0400);
153 static void mace_dma_off(struct net_device
*dev
)
155 psc_write_word(PSC_ENETRD_CTL
, 0x8800);
156 psc_write_word(PSC_ENETRD_CTL
, 0x1000);
157 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET0
, 0x1100);
158 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET1
, 0x1100);
160 psc_write_word(PSC_ENETWR_CTL
, 0x8800);
161 psc_write_word(PSC_ENETWR_CTL
, 0x1000);
162 psc_write_word(PSC_ENETWR_CMD
+ PSC_SET0
, 0x1100);
163 psc_write_word(PSC_ENETWR_CMD
+ PSC_SET1
, 0x1100);
167 * Not really much of a probe. The hardware table tells us if this
168 * model of Macintrash has a MACE (AV macintoshes)
171 struct net_device
*mace_probe(int unit
)
174 struct mace_data
*mp
;
176 struct net_device
*dev
;
177 unsigned char checksum
= 0;
178 static int found
= 0;
181 if (found
|| macintosh_config
->ether_type
!= MAC_ETHER_MACE
)
182 return ERR_PTR(-ENODEV
);
184 found
= 1; /* prevent 'finding' one on every device probe */
186 dev
= alloc_etherdev(PRIV_BYTES
);
188 return ERR_PTR(-ENOMEM
);
191 sprintf(dev
->name
, "eth%d", unit
);
193 mp
= (struct mace_data
*) dev
->priv
;
194 dev
->base_addr
= (u32
)MACE_BASE
;
195 mp
->mace
= (volatile struct mace
*) MACE_BASE
;
197 dev
->irq
= IRQ_MAC_MACE
;
198 mp
->dma_intr
= IRQ_MAC_MACE_DMA
;
201 * The PROM contains 8 bytes which total 0xFF when XOR'd
202 * together. Due to the usual peculiar apple brain damage
203 * the bytes are spaced out in a strange boundary and the
207 addr
= (void *)MACE_PROM
;
209 for (j
= 0; j
< 6; ++j
) {
210 u8 v
= bitrev8(addr
[j
<<4]);
212 dev
->dev_addr
[j
] = v
;
215 checksum
^= bitrev8(addr
[j
<<4]);
218 if (checksum
!= 0xFF) {
220 return ERR_PTR(-ENODEV
);
223 memset(&mp
->stats
, 0, sizeof(mp
->stats
));
225 dev
->open
= mace_open
;
226 dev
->stop
= mace_close
;
227 dev
->hard_start_xmit
= mace_xmit_start
;
228 dev
->tx_timeout
= mace_tx_timeout
;
229 dev
->watchdog_timeo
= TX_TIMEOUT
;
230 dev
->get_stats
= mace_stats
;
231 dev
->set_multicast_list
= mace_set_multicast
;
232 dev
->set_mac_address
= mace_set_address
;
234 printk(KERN_INFO
"%s: 68K MACE, hardware address %.2X", dev
->name
, dev
->dev_addr
[0]);
235 for (j
= 1 ; j
< 6 ; j
++) printk(":%.2X", dev
->dev_addr
[j
]);
238 err
= register_netdev(dev
);
247 * Load the address on a mace controller.
250 static int mace_set_address(struct net_device
*dev
, void *addr
)
252 unsigned char *p
= addr
;
253 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
254 volatile struct mace
*mb
= mp
->mace
;
259 local_irq_save(flags
);
263 /* load up the hardware address */
264 mb
->iac
= ADDRCHG
| PHYADDR
;
265 while ((mb
->iac
& ADDRCHG
) != 0);
267 for (i
= 0; i
< 6; ++i
) {
268 mb
->padr
= dev
->dev_addr
[i
] = p
[i
];
272 local_irq_restore(flags
);
278 * Open the Macintosh MACE. Most of this is playing with the DMA
279 * engine. The ethernet chip is quite friendly.
282 static int mace_open(struct net_device
*dev
)
284 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
285 volatile struct mace
*mb
= mp
->mace
;
292 if (mb
->biucc
& SWRST
) {
299 printk(KERN_ERR
"%s: software reset failed!!\n", dev
->name
);
304 mb
->biucc
= XMTSP_64
;
305 mb
->fifocc
= XMTFW_16
| RCVFW_64
| XMTFWU
| RCVFWU
| XMTBRST
| RCVBRST
;
306 mb
->xmtfc
= AUTO_PAD_XMIT
;
307 mb
->plscc
= PORTSEL_AUI
;
308 /* mb->utr = RTRD; */
310 if (request_irq(dev
->irq
, mace_interrupt
, 0, dev
->name
, dev
)) {
311 printk(KERN_ERR
"%s: can't get irq %d\n", dev
->name
, dev
->irq
);
314 if (request_irq(mp
->dma_intr
, mace_dma_intr
, 0, dev
->name
, dev
)) {
315 printk(KERN_ERR
"%s: can't get irq %d\n", dev
->name
, mp
->dma_intr
);
316 free_irq(dev
->irq
, dev
);
320 /* Allocate the DMA ring buffers */
322 mp
->rx_ring
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, N_RX_PAGES
);
323 mp
->tx_ring
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, 0);
325 if (mp
->tx_ring
==NULL
|| mp
->rx_ring
==NULL
) {
326 if (mp
->rx_ring
) free_pages((u32
) mp
->rx_ring
, N_RX_PAGES
);
327 if (mp
->tx_ring
) free_pages((u32
) mp
->tx_ring
, 0);
328 free_irq(dev
->irq
, dev
);
329 free_irq(mp
->dma_intr
, dev
);
330 printk(KERN_ERR
"%s: unable to allocate DMA buffers\n", dev
->name
);
334 mp
->rx_ring_phys
= (unsigned char *) virt_to_bus((void *)mp
->rx_ring
);
335 mp
->tx_ring_phys
= (unsigned char *) virt_to_bus((void *)mp
->tx_ring
);
337 /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */
339 kernel_set_cachemode((void *)mp
->rx_ring
, N_RX_PAGES
* PAGE_SIZE
, IOMAP_NOCACHE_NONSER
);
340 kernel_set_cachemode((void *)mp
->tx_ring
, PAGE_SIZE
, IOMAP_WRITETHROUGH
);
344 /* Not sure what these do */
346 psc_write_word(PSC_ENETWR_CTL
, 0x9000);
347 psc_write_word(PSC_ENETRD_CTL
, 0x9000);
348 psc_write_word(PSC_ENETWR_CTL
, 0x0400);
349 psc_write_word(PSC_ENETRD_CTL
, 0x0400);
352 /* load up the hardware address */
354 mb
->iac
= ADDRCHG
| PHYADDR
;
356 while ((mb
->iac
& ADDRCHG
) != 0);
358 for (i
= 0; i
< 6; ++i
)
359 mb
->padr
= dev
->dev_addr
[i
];
361 /* clear the multicast filter */
362 mb
->iac
= ADDRCHG
| LOGADDR
;
364 while ((mb
->iac
& ADDRCHG
) != 0);
366 for (i
= 0; i
< 8; ++i
)
369 mb
->plscc
= PORTSEL_GPSI
+ ENPLSIO
;
371 mb
->maccc
= ENXMT
| ENRCV
;
375 mace_rxdma_reset(dev
);
376 mace_txdma_reset(dev
);
382 * Shut down the mace and its interrupt channel
385 static int mace_close(struct net_device
*dev
)
387 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
388 volatile struct mace
*mb
= mp
->mace
;
390 mb
->maccc
= 0; /* disable rx and tx */
391 mb
->imr
= 0xFF; /* disable all irqs */
392 mace_dma_off(dev
); /* disable rx and tx dma */
394 free_irq(dev
->irq
, dev
);
395 free_irq(IRQ_MAC_MACE_DMA
, dev
);
397 free_pages((u32
) mp
->rx_ring
, N_RX_PAGES
);
398 free_pages((u32
) mp
->tx_ring
, 0);
407 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
)
409 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
411 /* Stop the queue if the buffer is full */
414 netif_stop_queue(dev
);
419 mp
->stats
.tx_packets
++;
420 mp
->stats
.tx_bytes
+= skb
->len
;
422 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
424 memcpy((void *) mp
->tx_ring
, skb
->data
, skb
->len
);
426 /* load the Tx DMA and fire it off */
428 psc_write_long(PSC_ENETWR_ADDR
+ mp
->tx_slot
, (u32
) mp
->tx_ring_phys
);
429 psc_write_long(PSC_ENETWR_LEN
+ mp
->tx_slot
, skb
->len
);
430 psc_write_word(PSC_ENETWR_CMD
+ mp
->tx_slot
, 0x9800);
439 static struct net_device_stats
*mace_stats(struct net_device
*dev
)
441 struct mace_data
*p
= (struct mace_data
*) dev
->priv
;
445 static void mace_set_multicast(struct net_device
*dev
)
447 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
448 volatile struct mace
*mb
= mp
->mace
;
456 if (dev
->flags
& IFF_PROMISC
) {
459 unsigned char multicast_filter
[8];
460 struct dev_mc_list
*dmi
= dev
->mc_list
;
462 if (dev
->flags
& IFF_ALLMULTI
) {
463 for (i
= 0; i
< 8; i
++) {
464 multicast_filter
[i
] = 0xFF;
467 for (i
= 0; i
< 8; i
++)
468 multicast_filter
[i
] = 0;
469 for (i
= 0; i
< dev
->mc_count
; i
++) {
470 crc
= ether_crc_le(6, dmi
->dmi_addr
);
471 j
= crc
>> 26; /* bit number in multicast_filter */
472 multicast_filter
[j
>> 3] |= 1 << (j
& 7);
477 mb
->iac
= ADDRCHG
| LOGADDR
;
478 while (mb
->iac
& ADDRCHG
);
480 for (i
= 0; i
< 8; ++i
) {
481 mb
->ladrf
= multicast_filter
[i
];
489 * Miscellaneous interrupts are handled here. We may end up
490 * having to bash the chip on the head for bad errors
493 static void mace_handle_misc_intrs(struct mace_data
*mp
, int intr
)
495 volatile struct mace
*mb
= mp
->mace
;
496 static int mace_babbles
, mace_jabbers
;
499 mp
->stats
.rx_missed_errors
+= 256;
501 mp
->stats
.rx_missed_errors
+= mb
->mpc
; /* reading clears it */
504 mp
->stats
.rx_length_errors
+= 256;
506 mp
->stats
.rx_length_errors
+= mb
->rntpc
; /* reading clears it */
509 ++mp
->stats
.tx_heartbeat_errors
;
512 if (mace_babbles
++ < 4) {
513 printk(KERN_DEBUG
"mace: babbling transmitter\n");
517 if (mace_jabbers
++ < 4) {
518 printk(KERN_DEBUG
"mace: jabbering transceiver\n");
524 * A transmit error has occurred. (We kick the transmit side from
525 * the DMA completion)
528 static void mace_xmit_error(struct net_device
*dev
)
530 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
531 volatile struct mace
*mb
= mp
->mace
;
539 printk("%s: DMA underrun.\n", dev
->name
);
540 mp
->stats
.tx_errors
++;
541 mp
->stats
.tx_fifo_errors
++;
542 mace_txdma_reset(dev
);
545 mp
->stats
.collisions
++;
551 * A receive interrupt occurred.
554 static void mace_recv_interrupt(struct net_device
*dev
)
556 /* struct mace_data *mp = (struct mace_data *) dev->priv; */
557 // volatile struct mace *mb = mp->mace;
561 * Process the chip interrupt
564 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
)
566 struct net_device
*dev
= (struct net_device
*) dev_id
;
567 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
568 volatile struct mace
*mb
= mp
->mace
;
572 mace_handle_misc_intrs(mp
, ir
);
575 mace_xmit_error(dev
);
578 mace_recv_interrupt(dev
);
583 static void mace_tx_timeout(struct net_device
*dev
)
585 /* struct mace_data *mp = (struct mace_data *) dev->priv; */
586 // volatile struct mace *mb = mp->mace;
590 * Handle a newly arrived frame
593 static void mace_dma_rx_frame(struct net_device
*dev
, struct mace_frame
*mf
)
595 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
598 if (mf
->status
& RS_OFLO
) {
599 printk("%s: fifo overflow.\n", dev
->name
);
600 mp
->stats
.rx_errors
++;
601 mp
->stats
.rx_fifo_errors
++;
603 if (mf
->status
&(RS_CLSN
|RS_FRAMERR
|RS_FCSERR
))
604 mp
->stats
.rx_errors
++;
606 if (mf
->status
&RS_CLSN
) {
607 mp
->stats
.collisions
++;
609 if (mf
->status
&RS_FRAMERR
) {
610 mp
->stats
.rx_frame_errors
++;
612 if (mf
->status
&RS_FCSERR
) {
613 mp
->stats
.rx_crc_errors
++;
616 skb
= dev_alloc_skb(mf
->len
+2);
618 mp
->stats
.rx_dropped
++;
622 memcpy(skb_put(skb
, mf
->len
), mf
->data
, mf
->len
);
625 skb
->protocol
= eth_type_trans(skb
, dev
);
627 dev
->last_rx
= jiffies
;
628 mp
->stats
.rx_packets
++;
629 mp
->stats
.rx_bytes
+= mf
->len
;
633 * The PSC has passed us a DMA interrupt event.
636 static irqreturn_t
mace_dma_intr(int irq
, void *dev_id
)
638 struct net_device
*dev
= (struct net_device
*) dev_id
;
639 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
644 /* Not sure what this does */
646 while ((baka
= psc_read_long(PSC_MYSTERY
)) != psc_read_long(PSC_MYSTERY
));
647 if (!(baka
& 0x60000000)) return IRQ_NONE
;
650 * Process the read queue
653 status
= psc_read_word(PSC_ENETRD_CTL
);
655 if (status
& 0x2000) {
656 mace_rxdma_reset(dev
);
657 } else if (status
& 0x0100) {
658 psc_write_word(PSC_ENETRD_CMD
+ mp
->rx_slot
, 0x1100);
660 left
= psc_read_long(PSC_ENETRD_LEN
+ mp
->rx_slot
);
661 head
= N_RX_RING
- left
;
663 /* Loop through the ring buffer and process new packages */
665 while (mp
->rx_tail
< head
) {
666 mace_dma_rx_frame(dev
, (struct mace_frame
*) (mp
->rx_ring
+ (mp
->rx_tail
* 0x0800)));
670 /* If we're out of buffers in this ring then switch to */
671 /* the other set, otherwise just reactivate this one. */
674 mace_load_rxdma_base(dev
, mp
->rx_slot
);
677 psc_write_word(PSC_ENETRD_CMD
+ mp
->rx_slot
, 0x9800);
682 * Process the write queue
685 status
= psc_read_word(PSC_ENETWR_CTL
);
687 if (status
& 0x2000) {
688 mace_txdma_reset(dev
);
689 } else if (status
& 0x0100) {
690 psc_write_word(PSC_ENETWR_CMD
+ mp
->tx_sloti
, 0x0100);
691 mp
->tx_sloti
^= 0x10;
693 netif_wake_queue(dev
);
698 MODULE_LICENSE("GPL");