x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / drivers / net / a2065.c
blobb7ec0368d7e8f96fbc93c7be656ff483351c5fbb
1 /*
2 * Amiga Linux/68k A2065 Ethernet Driver
4 * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
6 * Fixes and tips by:
7 * - Janos Farkas (CHEXUM@sparta.banki.hu)
8 * - Jes Degn Soerensen (jds@kom.auc.dk)
9 * - Matt Domsch (Matt_Domsch@dell.com)
11 * ----------------------------------------------------------------------------
13 * This program is based on
15 * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
16 * (C) Copyright 1995 by Geert Uytterhoeven,
17 * Peter De Schrijver
19 * lance.c: An AMD LANCE ethernet driver for linux.
20 * Written 1993-94 by Donald Becker.
22 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
23 * Advanced Micro Devices
24 * Publication #16907, Rev. B, Amendment/0, May 1994
26 * ----------------------------------------------------------------------------
28 * This file is subject to the terms and conditions of the GNU General Public
29 * License. See the file COPYING in the main directory of the Linux
30 * distribution for more details.
32 * ----------------------------------------------------------------------------
34 * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
36 * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
37 * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
40 #include <linux/errno.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/module.h>
44 #include <linux/stddef.h>
45 #include <linux/kernel.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioport.h>
48 #include <linux/skbuff.h>
49 #include <linux/slab.h>
50 #include <linux/string.h>
51 #include <linux/init.h>
52 #include <linux/crc32.h>
53 #include <linux/zorro.h>
54 #include <linux/bitops.h>
56 #include <asm/irq.h>
57 #include <asm/amigaints.h>
58 #include <asm/amigahw.h>
60 #include "a2065.h"
64 * Transmit/Receive Ring Definitions
67 #define LANCE_LOG_TX_BUFFERS (2)
68 #define LANCE_LOG_RX_BUFFERS (4)
70 #define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
71 #define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
73 #define TX_RING_MOD_MASK (TX_RING_SIZE-1)
74 #define RX_RING_MOD_MASK (RX_RING_SIZE-1)
76 #define PKT_BUF_SIZE (1544)
77 #define RX_BUFF_SIZE PKT_BUF_SIZE
78 #define TX_BUFF_SIZE PKT_BUF_SIZE
82 * Layout of the Lance's RAM Buffer
86 struct lance_init_block {
87 unsigned short mode; /* Pre-set mode (reg. 15) */
88 unsigned char phys_addr[6]; /* Physical ethernet address */
89 unsigned filter[2]; /* Multicast filter. */
91 /* Receive and transmit ring base, along with extra bits. */
92 unsigned short rx_ptr; /* receive descriptor addr */
93 unsigned short rx_len; /* receive len and high addr */
94 unsigned short tx_ptr; /* transmit descriptor addr */
95 unsigned short tx_len; /* transmit len and high addr */
97 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
98 struct lance_rx_desc brx_ring[RX_RING_SIZE];
99 struct lance_tx_desc btx_ring[TX_RING_SIZE];
101 char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
102 char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
107 * Private Device Data
110 struct lance_private {
111 char *name;
112 volatile struct lance_regs *ll;
113 volatile struct lance_init_block *init_block; /* Hosts view */
114 volatile struct lance_init_block *lance_init_block; /* Lance view */
116 int rx_new, tx_new;
117 int rx_old, tx_old;
119 int lance_log_rx_bufs, lance_log_tx_bufs;
120 int rx_ring_mod_mask, tx_ring_mod_mask;
122 int tpe; /* cable-selection is TPE */
123 int auto_select; /* cable-selection by carrier */
124 unsigned short busmaster_regval;
126 #ifdef CONFIG_SUNLANCE
127 struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
128 int burst_sizes; /* ledma SBus burst sizes */
129 #endif
130 struct timer_list multicast_timer;
133 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
134 lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
135 lp->tx_old - lp->tx_new-1)
138 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
140 /* Load the CSR registers */
141 static void load_csrs (struct lance_private *lp)
143 volatile struct lance_regs *ll = lp->ll;
144 volatile struct lance_init_block *aib = lp->lance_init_block;
145 int leptr;
147 leptr = LANCE_ADDR (aib);
149 ll->rap = LE_CSR1;
150 ll->rdp = (leptr & 0xFFFF);
151 ll->rap = LE_CSR2;
152 ll->rdp = leptr >> 16;
153 ll->rap = LE_CSR3;
154 ll->rdp = lp->busmaster_regval;
156 /* Point back to csr0 */
157 ll->rap = LE_CSR0;
160 #define ZERO 0
162 /* Setup the Lance Rx and Tx rings */
163 static void lance_init_ring (struct net_device *dev)
165 struct lance_private *lp = netdev_priv(dev);
166 volatile struct lance_init_block *ib = lp->init_block;
167 volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
168 int leptr;
169 int i;
171 aib = lp->lance_init_block;
173 /* Lock out other processes while setting up hardware */
174 netif_stop_queue(dev);
175 lp->rx_new = lp->tx_new = 0;
176 lp->rx_old = lp->tx_old = 0;
178 ib->mode = 0;
180 /* Copy the ethernet address to the lance init block
181 * Note that on the sparc you need to swap the ethernet address.
183 ib->phys_addr [0] = dev->dev_addr [1];
184 ib->phys_addr [1] = dev->dev_addr [0];
185 ib->phys_addr [2] = dev->dev_addr [3];
186 ib->phys_addr [3] = dev->dev_addr [2];
187 ib->phys_addr [4] = dev->dev_addr [5];
188 ib->phys_addr [5] = dev->dev_addr [4];
190 if (ZERO)
191 printk(KERN_DEBUG "TX rings:\n");
193 /* Setup the Tx ring entries */
194 for (i = 0; i <= (1<<lp->lance_log_tx_bufs); i++) {
195 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
196 ib->btx_ring [i].tmd0 = leptr;
197 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
198 ib->btx_ring [i].tmd1_bits = 0;
199 ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
200 ib->btx_ring [i].misc = 0;
201 if (i < 3 && ZERO)
202 printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
205 /* Setup the Rx ring entries */
206 if (ZERO)
207 printk(KERN_DEBUG "RX rings:\n");
208 for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
209 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
211 ib->brx_ring [i].rmd0 = leptr;
212 ib->brx_ring [i].rmd1_hadr = leptr >> 16;
213 ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
214 ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
215 ib->brx_ring [i].mblength = 0;
216 if (i < 3 && ZERO)
217 printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
220 /* Setup the initialization block */
222 /* Setup rx descriptor pointer */
223 leptr = LANCE_ADDR(&aib->brx_ring);
224 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
225 ib->rx_ptr = leptr;
226 if (ZERO)
227 printk(KERN_DEBUG "RX ptr: %8.8x\n", leptr);
229 /* Setup tx descriptor pointer */
230 leptr = LANCE_ADDR(&aib->btx_ring);
231 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
232 ib->tx_ptr = leptr;
233 if (ZERO)
234 printk(KERN_DEBUG "TX ptr: %8.8x\n", leptr);
236 /* Clear the multicast filter */
237 ib->filter [0] = 0;
238 ib->filter [1] = 0;
241 static int init_restart_lance (struct lance_private *lp)
243 volatile struct lance_regs *ll = lp->ll;
244 int i;
246 ll->rap = LE_CSR0;
247 ll->rdp = LE_C0_INIT;
249 /* Wait for the lance to complete initialization */
250 for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
251 barrier();
252 if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
253 printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
254 i, ll->rdp);
255 return -EIO;
258 /* Clear IDON by writing a "1", enable interrupts and start lance */
259 ll->rdp = LE_C0_IDON;
260 ll->rdp = LE_C0_INEA | LE_C0_STRT;
262 return 0;
265 static int lance_rx (struct net_device *dev)
267 struct lance_private *lp = netdev_priv(dev);
268 volatile struct lance_init_block *ib = lp->init_block;
269 volatile struct lance_regs *ll = lp->ll;
270 volatile struct lance_rx_desc *rd;
271 unsigned char bits;
273 #ifdef TEST_HITS
274 int i;
275 printk(KERN_DEBUG "[");
276 for (i = 0; i < RX_RING_SIZE; i++) {
277 if (i == lp->rx_new)
278 printk ("%s",
279 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
280 else
281 printk ("%s",
282 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
284 printk ("]\n");
285 #endif
287 ll->rdp = LE_C0_RINT|LE_C0_INEA;
288 for (rd = &ib->brx_ring [lp->rx_new];
289 !((bits = rd->rmd1_bits) & LE_R1_OWN);
290 rd = &ib->brx_ring [lp->rx_new]) {
292 /* We got an incomplete frame? */
293 if ((bits & LE_R1_POK) != LE_R1_POK) {
294 dev->stats.rx_over_errors++;
295 dev->stats.rx_errors++;
296 continue;
297 } else if (bits & LE_R1_ERR) {
298 /* Count only the end frame as a rx error,
299 * not the beginning
301 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
302 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
303 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
304 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
305 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
306 } else {
307 int len = (rd->mblength & 0xfff) - 4;
308 struct sk_buff *skb = dev_alloc_skb (len+2);
310 if (!skb) {
311 printk(KERN_WARNING "%s: Memory squeeze, "
312 "deferring packet.\n", dev->name);
313 dev->stats.rx_dropped++;
314 rd->mblength = 0;
315 rd->rmd1_bits = LE_R1_OWN;
316 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
317 return 0;
320 skb_reserve (skb, 2); /* 16 byte align */
321 skb_put (skb, len); /* make room */
322 skb_copy_to_linear_data(skb,
323 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
324 len);
325 skb->protocol = eth_type_trans (skb, dev);
326 netif_rx (skb);
327 dev->stats.rx_packets++;
328 dev->stats.rx_bytes += len;
331 /* Return the packet to the pool */
332 rd->mblength = 0;
333 rd->rmd1_bits = LE_R1_OWN;
334 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
336 return 0;
339 static int lance_tx (struct net_device *dev)
341 struct lance_private *lp = netdev_priv(dev);
342 volatile struct lance_init_block *ib = lp->init_block;
343 volatile struct lance_regs *ll = lp->ll;
344 volatile struct lance_tx_desc *td;
345 int i, j;
346 int status;
348 /* csr0 is 2f3 */
349 ll->rdp = LE_C0_TINT | LE_C0_INEA;
350 /* csr0 is 73 */
352 j = lp->tx_old;
353 for (i = j; i != lp->tx_new; i = j) {
354 td = &ib->btx_ring [i];
356 /* If we hit a packet not owned by us, stop */
357 if (td->tmd1_bits & LE_T1_OWN)
358 break;
360 if (td->tmd1_bits & LE_T1_ERR) {
361 status = td->misc;
363 dev->stats.tx_errors++;
364 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
365 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
367 if (status & LE_T3_CLOS) {
368 dev->stats.tx_carrier_errors++;
369 if (lp->auto_select) {
370 lp->tpe = 1 - lp->tpe;
371 printk(KERN_ERR "%s: Carrier Lost, "
372 "trying %s\n", dev->name,
373 lp->tpe?"TPE":"AUI");
374 /* Stop the lance */
375 ll->rap = LE_CSR0;
376 ll->rdp = LE_C0_STOP;
377 lance_init_ring (dev);
378 load_csrs (lp);
379 init_restart_lance (lp);
380 return 0;
384 /* buffer errors and underflows turn off the transmitter */
385 /* Restart the adapter */
386 if (status & (LE_T3_BUF|LE_T3_UFL)) {
387 dev->stats.tx_fifo_errors++;
389 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, "
390 "restarting\n", dev->name);
391 /* Stop the lance */
392 ll->rap = LE_CSR0;
393 ll->rdp = LE_C0_STOP;
394 lance_init_ring (dev);
395 load_csrs (lp);
396 init_restart_lance (lp);
397 return 0;
399 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
401 * So we don't count the packet more than once.
403 td->tmd1_bits &= ~(LE_T1_POK);
405 /* One collision before packet was sent. */
406 if (td->tmd1_bits & LE_T1_EONE)
407 dev->stats.collisions++;
409 /* More than one collision, be optimistic. */
410 if (td->tmd1_bits & LE_T1_EMORE)
411 dev->stats.collisions += 2;
413 dev->stats.tx_packets++;
416 j = (j + 1) & lp->tx_ring_mod_mask;
418 lp->tx_old = j;
419 ll->rdp = LE_C0_TINT | LE_C0_INEA;
420 return 0;
423 static irqreturn_t lance_interrupt (int irq, void *dev_id)
425 struct net_device *dev;
426 struct lance_private *lp;
427 volatile struct lance_regs *ll;
428 int csr0;
430 dev = (struct net_device *) dev_id;
432 lp = netdev_priv(dev);
433 ll = lp->ll;
435 ll->rap = LE_CSR0; /* LANCE Controller Status */
436 csr0 = ll->rdp;
438 if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
439 return IRQ_NONE; /* been generated by the Lance. */
441 /* Acknowledge all the interrupt sources ASAP */
442 ll->rdp = csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|
443 LE_C0_INIT);
445 if ((csr0 & LE_C0_ERR)) {
446 /* Clear the error condition */
447 ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
450 if (csr0 & LE_C0_RINT)
451 lance_rx (dev);
453 if (csr0 & LE_C0_TINT)
454 lance_tx (dev);
456 /* Log misc errors. */
457 if (csr0 & LE_C0_BABL)
458 dev->stats.tx_errors++; /* Tx babble. */
459 if (csr0 & LE_C0_MISS)
460 dev->stats.rx_errors++; /* Missed a Rx frame. */
461 if (csr0 & LE_C0_MERR) {
462 printk(KERN_ERR "%s: Bus master arbitration failure, status "
463 "%4.4x.\n", dev->name, csr0);
464 /* Restart the chip. */
465 ll->rdp = LE_C0_STRT;
468 if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0)
469 netif_wake_queue(dev);
471 ll->rap = LE_CSR0;
472 ll->rdp = LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|
473 LE_C0_IDON|LE_C0_INEA;
474 return IRQ_HANDLED;
477 static int lance_open (struct net_device *dev)
479 struct lance_private *lp = netdev_priv(dev);
480 volatile struct lance_regs *ll = lp->ll;
481 int ret;
483 /* Stop the Lance */
484 ll->rap = LE_CSR0;
485 ll->rdp = LE_C0_STOP;
487 /* Install the Interrupt handler */
488 ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
489 dev->name, dev);
490 if (ret) return ret;
492 load_csrs (lp);
493 lance_init_ring (dev);
495 netif_start_queue(dev);
497 return init_restart_lance (lp);
500 static int lance_close (struct net_device *dev)
502 struct lance_private *lp = netdev_priv(dev);
503 volatile struct lance_regs *ll = lp->ll;
505 netif_stop_queue(dev);
506 del_timer_sync(&lp->multicast_timer);
508 /* Stop the card */
509 ll->rap = LE_CSR0;
510 ll->rdp = LE_C0_STOP;
512 free_irq(IRQ_AMIGA_PORTS, dev);
513 return 0;
516 static inline int lance_reset (struct net_device *dev)
518 struct lance_private *lp = netdev_priv(dev);
519 volatile struct lance_regs *ll = lp->ll;
520 int status;
522 /* Stop the lance */
523 ll->rap = LE_CSR0;
524 ll->rdp = LE_C0_STOP;
526 load_csrs (lp);
528 lance_init_ring (dev);
529 dev->trans_start = jiffies;
530 netif_start_queue(dev);
532 status = init_restart_lance (lp);
533 #ifdef DEBUG_DRIVER
534 printk(KERN_DEBUG "Lance restart=%d\n", status);
535 #endif
536 return status;
539 static void lance_tx_timeout(struct net_device *dev)
541 struct lance_private *lp = netdev_priv(dev);
542 volatile struct lance_regs *ll = lp->ll;
544 printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
545 dev->name, ll->rdp);
546 lance_reset(dev);
547 netif_wake_queue(dev);
550 static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
551 struct net_device *dev)
553 struct lance_private *lp = netdev_priv(dev);
554 volatile struct lance_regs *ll = lp->ll;
555 volatile struct lance_init_block *ib = lp->init_block;
556 int entry, skblen;
557 int status = NETDEV_TX_OK;
558 unsigned long flags;
560 if (skb_padto(skb, ETH_ZLEN))
561 return NETDEV_TX_OK;
562 skblen = max_t(unsigned, skb->len, ETH_ZLEN);
564 local_irq_save(flags);
566 if (!TX_BUFFS_AVAIL){
567 local_irq_restore(flags);
568 return NETDEV_TX_LOCKED;
571 #ifdef DEBUG_DRIVER
572 /* dump the packet */
573 print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
574 16, 1, skb->data, 64, true);
575 #endif
576 entry = lp->tx_new & lp->tx_ring_mod_mask;
577 ib->btx_ring [entry].length = (-skblen) | 0xf000;
578 ib->btx_ring [entry].misc = 0;
580 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf [entry][0], skblen);
582 /* Now, give the packet to the lance */
583 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
584 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
585 dev->stats.tx_bytes += skblen;
587 if (TX_BUFFS_AVAIL <= 0)
588 netif_stop_queue(dev);
590 /* Kick the lance: transmit now */
591 ll->rdp = LE_C0_INEA | LE_C0_TDMD;
592 dev->trans_start = jiffies;
593 dev_kfree_skb (skb);
595 local_irq_restore(flags);
597 return status;
600 /* taken from the depca driver */
601 static void lance_load_multicast (struct net_device *dev)
603 struct lance_private *lp = netdev_priv(dev);
604 volatile struct lance_init_block *ib = lp->init_block;
605 volatile u16 *mcast_table = (u16 *)&ib->filter;
606 struct dev_mc_list *dmi=dev->mc_list;
607 char *addrs;
608 int i;
609 u32 crc;
611 /* set all multicast bits */
612 if (dev->flags & IFF_ALLMULTI){
613 ib->filter [0] = 0xffffffff;
614 ib->filter [1] = 0xffffffff;
615 return;
617 /* clear the multicast filter */
618 ib->filter [0] = 0;
619 ib->filter [1] = 0;
621 /* Add addresses */
622 for (i = 0; i < dev->mc_count; i++){
623 addrs = dmi->dmi_addr;
624 dmi = dmi->next;
626 /* multicast address? */
627 if (!(*addrs & 1))
628 continue;
630 crc = ether_crc_le(6, addrs);
631 crc = crc >> 26;
632 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
634 return;
637 static void lance_set_multicast (struct net_device *dev)
639 struct lance_private *lp = netdev_priv(dev);
640 volatile struct lance_init_block *ib = lp->init_block;
641 volatile struct lance_regs *ll = lp->ll;
643 if (!netif_running(dev))
644 return;
646 if (lp->tx_old != lp->tx_new) {
647 mod_timer(&lp->multicast_timer, jiffies + 4);
648 netif_wake_queue(dev);
649 return;
652 netif_stop_queue(dev);
654 ll->rap = LE_CSR0;
655 ll->rdp = LE_C0_STOP;
656 lance_init_ring (dev);
658 if (dev->flags & IFF_PROMISC) {
659 ib->mode |= LE_MO_PROM;
660 } else {
661 ib->mode &= ~LE_MO_PROM;
662 lance_load_multicast (dev);
664 load_csrs (lp);
665 init_restart_lance (lp);
666 netif_wake_queue(dev);
669 static int __devinit a2065_init_one(struct zorro_dev *z,
670 const struct zorro_device_id *ent);
671 static void __devexit a2065_remove_one(struct zorro_dev *z);
674 static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
675 { ZORRO_PROD_CBM_A2065_1 },
676 { ZORRO_PROD_CBM_A2065_2 },
677 { ZORRO_PROD_AMERISTAR_A2065 },
678 { 0 }
681 static struct zorro_driver a2065_driver = {
682 .name = "a2065",
683 .id_table = a2065_zorro_tbl,
684 .probe = a2065_init_one,
685 .remove = __devexit_p(a2065_remove_one),
688 static const struct net_device_ops lance_netdev_ops = {
689 .ndo_open = lance_open,
690 .ndo_stop = lance_close,
691 .ndo_start_xmit = lance_start_xmit,
692 .ndo_tx_timeout = lance_tx_timeout,
693 .ndo_set_multicast_list = lance_set_multicast,
694 .ndo_validate_addr = eth_validate_addr,
695 .ndo_change_mtu = eth_change_mtu,
696 .ndo_set_mac_address = eth_mac_addr,
699 static int __devinit a2065_init_one(struct zorro_dev *z,
700 const struct zorro_device_id *ent)
702 struct net_device *dev;
703 struct lance_private *priv;
704 unsigned long board, base_addr, mem_start;
705 struct resource *r1, *r2;
706 int err;
708 board = z->resource.start;
709 base_addr = board+A2065_LANCE;
710 mem_start = board+A2065_RAM;
712 r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
713 "Am7990");
714 if (!r1)
715 return -EBUSY;
716 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
717 if (!r2) {
718 release_resource(r1);
719 return -EBUSY;
722 dev = alloc_etherdev(sizeof(struct lance_private));
723 if (dev == NULL) {
724 release_resource(r1);
725 release_resource(r2);
726 return -ENOMEM;
729 priv = netdev_priv(dev);
731 r1->name = dev->name;
732 r2->name = dev->name;
734 dev->dev_addr[0] = 0x00;
735 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
736 dev->dev_addr[1] = 0x80;
737 dev->dev_addr[2] = 0x10;
738 } else { /* Ameristar */
739 dev->dev_addr[1] = 0x00;
740 dev->dev_addr[2] = 0x9f;
742 dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
743 dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
744 dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
745 dev->base_addr = ZTWO_VADDR(base_addr);
746 dev->mem_start = ZTWO_VADDR(mem_start);
747 dev->mem_end = dev->mem_start+A2065_RAM_SIZE;
749 priv->ll = (volatile struct lance_regs *)dev->base_addr;
750 priv->init_block = (struct lance_init_block *)dev->mem_start;
751 priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
752 priv->auto_select = 0;
753 priv->busmaster_regval = LE_C3_BSWP;
755 priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
756 priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
757 priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
758 priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
760 dev->netdev_ops = &lance_netdev_ops;
761 dev->watchdog_timeo = 5*HZ;
762 dev->dma = 0;
764 init_timer(&priv->multicast_timer);
765 priv->multicast_timer.data = (unsigned long) dev;
766 priv->multicast_timer.function =
767 (void (*)(unsigned long)) &lance_set_multicast;
769 err = register_netdev(dev);
770 if (err) {
771 release_resource(r1);
772 release_resource(r2);
773 free_netdev(dev);
774 return err;
776 zorro_set_drvdata(z, dev);
778 printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address "
779 "%pM\n", dev->name, board, dev->dev_addr);
781 return 0;
785 static void __devexit a2065_remove_one(struct zorro_dev *z)
787 struct net_device *dev = zorro_get_drvdata(z);
789 unregister_netdev(dev);
790 release_mem_region(ZTWO_PADDR(dev->base_addr),
791 sizeof(struct lance_regs));
792 release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
793 free_netdev(dev);
796 static int __init a2065_init_module(void)
798 return zorro_register_driver(&a2065_driver);
801 static void __exit a2065_cleanup_module(void)
803 zorro_unregister_driver(&a2065_driver);
806 module_init(a2065_init_module);
807 module_exit(a2065_cleanup_module);
809 MODULE_LICENSE("GPL");