1 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
2 * Once again I am out to prove that every ethernet
3 * controller out there can be most efficiently programmed
4 * if you make it look like a LANCE.
6 * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
10 "sunqe.c:v2.0 9/9/99 David S. Miller (davem@redhat.com)\n";
12 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/ptrace.h>
20 #include <linux/ioport.h>
22 #include <linux/malloc.h>
23 #include <linux/string.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
27 #include <asm/system.h>
28 #include <asm/bitops.h>
31 #include <linux/errno.h>
32 #include <asm/byteorder.h>
34 #include <asm/idprom.h>
36 #include <asm/openprom.h>
37 #include <asm/oplib.h>
38 #include <asm/auxio.h>
39 #include <asm/pgtable.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
49 static struct sunqec
*root_qec_dev
= NULL
;
52 static void qe_set_multicast(struct net_device
*dev
);
54 #define QEC_RESET_TRIES 200
56 static inline int qec_global_reset(struct qe_globreg
*gregs
)
58 int tries
= QEC_RESET_TRIES
;
60 gregs
->ctrl
= GLOB_CTRL_RESET
;
62 if(gregs
->ctrl
& GLOB_CTRL_RESET
) {
70 printk("QuadEther: AIEEE cannot reset the QEC!\n");
74 #define MACE_RESET_RETRIES 200
75 #define QE_RESET_RETRIES 200
77 static inline int qe_stop(struct sunqe
*qep
)
79 struct qe_creg
*cregs
= qep
->qcregs
;
80 struct qe_mregs
*mregs
= qep
->mregs
;
83 /* Reset the MACE, then the QEC channel. */
84 mregs
->bconfig
= MREGS_BCONFIG_RESET
;
85 tries
= MACE_RESET_RETRIES
;
87 if(mregs
->bconfig
& MREGS_BCONFIG_RESET
) {
94 printk("QuadEther: AIEEE cannot reset the MACE!\n");
98 cregs
->ctrl
= CREG_CTRL_RESET
;
99 tries
= QE_RESET_RETRIES
;
101 if(cregs
->ctrl
& CREG_CTRL_RESET
) {
108 printk("QuadEther: Cannot reset QE channel!\n");
114 static void qe_init_rings(struct sunqe
*qep
)
116 struct qe_init_block
*qb
= qep
->qe_block
;
117 struct sunqe_buffers
*qbufs
= qep
->buffers
;
118 __u32 qbufs_dvma
= qep
->buffers_dvma
;
121 qep
->rx_new
= qep
->rx_old
= qep
->tx_new
= qep
->tx_old
= 0;
122 memset(qb
, 0, sizeof(struct qe_init_block
));
123 memset(qbufs
, 0, sizeof(struct sunqe_buffers
));
124 for(i
= 0; i
< RX_RING_SIZE
; i
++) {
125 qb
->qe_rxd
[i
].rx_addr
= qbufs_dvma
+ qebuf_offset(rx_buf
, i
);
126 qb
->qe_rxd
[i
].rx_flags
=
127 (RXD_OWN
| ((RXD_PKT_SZ
) & RXD_LENGTH
));
131 static int qe_init(struct sunqe
*qep
, int from_irq
)
133 struct sunqec
*qecp
= qep
->parent
;
134 struct qe_creg
*cregs
= qep
->qcregs
;
135 struct qe_mregs
*mregs
= qep
->mregs
;
136 struct qe_globreg
*gregs
= qecp
->gregs
;
137 unsigned char *e
= &qep
->dev
->dev_addr
[0];
138 volatile unsigned char garbage
;
145 /* Setup initial rx/tx init block pointers. */
146 cregs
->rxds
= qep
->qblock_dvma
+ qib_offset(qe_rxd
, 0);
147 cregs
->txds
= qep
->qblock_dvma
+ qib_offset(qe_txd
, 0);
149 /* Enable/mask the various irq's. */
154 cregs
->mmask
= CREG_MMASK_RXCOLL
;
156 /* Setup the FIFO pointers into QEC local memory. */
157 cregs
->rxwbufptr
= cregs
->rxrbufptr
= qep
->channel
* gregs
->msize
;
158 cregs
->txwbufptr
= cregs
->txrbufptr
= cregs
->rxrbufptr
+ gregs
->rsize
;
160 /* Clear the channel collision counter. */
163 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
166 /* Now dork with the AMD MACE. */
167 mregs
->phyconfig
= MREGS_PHYCONFIG_AUTO
;
168 mregs
->txfcntl
= MREGS_TXFCNTL_AUTOPAD
; /* Save us some tx work. */
171 /* The QEC dma's the rx'd packets from local memory out to main memory,
172 * and therefore it interrupts when the packet reception is "complete".
173 * So don't listen for the MACE talking about it.
175 mregs
->imask
= (MREGS_IMASK_COLL
| MREGS_IMASK_RXIRQ
);
177 mregs
->bconfig
= (MREGS_BCONFIG_BSWAP
| MREGS_BCONFIG_64TS
);
178 mregs
->fconfig
= (MREGS_FCONFIG_TXF16
| MREGS_FCONFIG_RXF32
|
179 MREGS_FCONFIG_RFWU
| MREGS_FCONFIG_TFWU
);
181 /* Only usable interface on QuadEther is twisted pair. */
182 mregs
->plsconfig
= (MREGS_PLSCONFIG_TP
);
184 /* Tell MACE we are changing the ether address. */
185 mregs
->iaconfig
= (MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_PARESET
);
186 while ((mregs
->iaconfig
& MREGS_IACONFIG_ACHNGE
) != 0)
188 mregs
->ethaddr
= e
[0];
189 mregs
->ethaddr
= e
[1];
190 mregs
->ethaddr
= e
[2];
191 mregs
->ethaddr
= e
[3];
192 mregs
->ethaddr
= e
[4];
193 mregs
->ethaddr
= e
[5];
195 /* Clear out the address filter. */
196 mregs
->iaconfig
= (MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
);
197 while ((mregs
->iaconfig
& MREGS_IACONFIG_ACHNGE
) != 0)
199 for(i
= 0; i
< 8; i
++)
202 /* Address changes are now complete. */
207 /* Wait a little bit for the link to come up... */
209 if(!(mregs
->phyconfig
& MREGS_PHYCONFIG_LTESTDIS
)) {
215 if((mregs
->phyconfig
& MREGS_PHYCONFIG_LSTAT
) != 0)
219 printk("%s: Warning, link state is down.\n", qep
->dev
->name
);
222 /* Missed packet counter is cleared on a read. */
223 garbage
= mregs
->mpcnt
;
225 /* Reload multicast information, this will enable the receiver
226 * and transmitter. But set the base mconfig value right now.
228 qe_set_multicast(qep
->dev
);
230 /* QEC should now start to show interrupts. */
234 /* Grrr, certain error conditions completely lock up the AMD MACE,
235 * so when we get these we _must_ reset the chip.
237 static int qe_is_bolixed(struct sunqe
*qep
, unsigned int qe_status
)
239 struct net_device
*dev
= qep
->dev
;
240 int mace_hwbug_workaround
= 0;
242 if(qe_status
& CREG_STAT_EDEFER
) {
243 printk("%s: Excessive transmit defers.\n", dev
->name
);
244 qep
->net_stats
.tx_errors
++;
247 if(qe_status
& CREG_STAT_CLOSS
) {
248 printk("%s: Carrier lost, link down?\n", dev
->name
);
249 qep
->net_stats
.tx_errors
++;
250 qep
->net_stats
.tx_carrier_errors
++;
253 if(qe_status
& CREG_STAT_ERETRIES
) {
254 printk("%s: Excessive transmit retries (more than 16).\n", dev
->name
);
255 qep
->net_stats
.tx_errors
++;
256 mace_hwbug_workaround
= 1;
259 if(qe_status
& CREG_STAT_LCOLL
) {
260 printk("%s: Late transmit collision.\n", dev
->name
);
261 qep
->net_stats
.tx_errors
++;
262 qep
->net_stats
.collisions
++;
263 mace_hwbug_workaround
= 1;
266 if(qe_status
& CREG_STAT_FUFLOW
) {
267 printk("%s: Transmit fifo underflow, driver bug.\n", dev
->name
);
268 qep
->net_stats
.tx_errors
++;
269 mace_hwbug_workaround
= 1;
272 if(qe_status
& CREG_STAT_JERROR
) {
273 printk("%s: Jabber error.\n", dev
->name
);
276 if(qe_status
& CREG_STAT_BERROR
) {
277 printk("%s: Babble error.\n", dev
->name
);
280 if(qe_status
& CREG_STAT_CCOFLOW
) {
281 qep
->net_stats
.tx_errors
+= 256;
282 qep
->net_stats
.collisions
+= 256;
285 if(qe_status
& CREG_STAT_TXDERROR
) {
286 printk("%s: Transmit descriptor is bogus, driver bug.\n", dev
->name
);
287 qep
->net_stats
.tx_errors
++;
288 qep
->net_stats
.tx_aborted_errors
++;
289 mace_hwbug_workaround
= 1;
292 if(qe_status
& CREG_STAT_TXLERR
) {
293 printk("%s: Transmit late error.\n", dev
->name
);
294 qep
->net_stats
.tx_errors
++;
295 mace_hwbug_workaround
= 1;
298 if(qe_status
& CREG_STAT_TXPERR
) {
299 printk("%s: Transmit DMA parity error.\n", dev
->name
);
300 qep
->net_stats
.tx_errors
++;
301 qep
->net_stats
.tx_aborted_errors
++;
302 mace_hwbug_workaround
= 1;
305 if(qe_status
& CREG_STAT_TXSERR
) {
306 printk("%s: Transmit DMA sbus error ack.\n", dev
->name
);
307 qep
->net_stats
.tx_errors
++;
308 qep
->net_stats
.tx_aborted_errors
++;
309 mace_hwbug_workaround
= 1;
312 if(qe_status
& CREG_STAT_RCCOFLOW
) {
313 qep
->net_stats
.rx_errors
+= 256;
314 qep
->net_stats
.collisions
+= 256;
317 if(qe_status
& CREG_STAT_RUOFLOW
) {
318 qep
->net_stats
.rx_errors
+= 256;
319 qep
->net_stats
.rx_over_errors
+= 256;
322 if(qe_status
& CREG_STAT_MCOFLOW
) {
323 qep
->net_stats
.rx_errors
+= 256;
324 qep
->net_stats
.rx_missed_errors
+= 256;
327 if(qe_status
& CREG_STAT_RXFOFLOW
) {
328 printk("%s: Receive fifo overflow.\n", dev
->name
);
329 qep
->net_stats
.rx_errors
++;
330 qep
->net_stats
.rx_over_errors
++;
333 if(qe_status
& CREG_STAT_RLCOLL
) {
334 printk("%s: Late receive collision.\n", dev
->name
);
335 qep
->net_stats
.rx_errors
++;
336 qep
->net_stats
.collisions
++;
339 if(qe_status
& CREG_STAT_FCOFLOW
) {
340 qep
->net_stats
.rx_errors
+= 256;
341 qep
->net_stats
.rx_frame_errors
+= 256;
344 if(qe_status
& CREG_STAT_CECOFLOW
) {
345 qep
->net_stats
.rx_errors
+= 256;
346 qep
->net_stats
.rx_crc_errors
+= 256;
349 if(qe_status
& CREG_STAT_RXDROP
) {
350 printk("%s: Receive packet dropped.\n", dev
->name
);
351 qep
->net_stats
.rx_errors
++;
352 qep
->net_stats
.rx_dropped
++;
353 qep
->net_stats
.rx_missed_errors
++;
356 if(qe_status
& CREG_STAT_RXSMALL
) {
357 printk("%s: Receive buffer too small, driver bug.\n", dev
->name
);
358 qep
->net_stats
.rx_errors
++;
359 qep
->net_stats
.rx_length_errors
++;
362 if(qe_status
& CREG_STAT_RXLERR
) {
363 printk("%s: Receive late error.\n", dev
->name
);
364 qep
->net_stats
.rx_errors
++;
365 mace_hwbug_workaround
= 1;
368 if(qe_status
& CREG_STAT_RXPERR
) {
369 printk("%s: Receive DMA parity error.\n", dev
->name
);
370 qep
->net_stats
.rx_errors
++;
371 qep
->net_stats
.rx_missed_errors
++;
372 mace_hwbug_workaround
= 1;
375 if(qe_status
& CREG_STAT_RXSERR
) {
376 printk("%s: Receive DMA sbus error ack.\n", dev
->name
);
377 qep
->net_stats
.rx_errors
++;
378 qep
->net_stats
.rx_missed_errors
++;
379 mace_hwbug_workaround
= 1;
382 if(mace_hwbug_workaround
)
384 return mace_hwbug_workaround
;
387 /* Per-QE receive interrupt service routine. Just like on the happy meal
388 * we receive directly into skb's with a small packet copy water mark.
390 static void qe_rx(struct sunqe
*qep
)
392 struct qe_rxd
*rxbase
= &qep
->qe_block
->qe_rxd
[0];
394 struct sunqe_buffers
*qbufs
= qep
->buffers
;
395 __u32 qbufs_dvma
= qep
->buffers_dvma
;
396 int elem
= qep
->rx_new
, drops
= 0;
399 this = &rxbase
[elem
];
400 while(!((flags
= this->rx_flags
) & RXD_OWN
)) {
402 unsigned char *this_qbuf
=
403 &qbufs
->rx_buf
[elem
& (RX_RING_SIZE
- 1)][0];
404 __u32 this_qbuf_dvma
= qbufs_dvma
+
405 qebuf_offset(rx_buf
, (elem
& (RX_RING_SIZE
- 1)));
406 struct qe_rxd
*end_rxd
=
407 &rxbase
[(elem
+RX_RING_SIZE
)&(RX_RING_MAXSIZE
-1)];
408 int len
= (flags
& RXD_LENGTH
) - 4; /* QE adds ether FCS size to len */
410 /* Check for errors. */
412 qep
->net_stats
.rx_errors
++;
413 qep
->net_stats
.rx_length_errors
++;
414 qep
->net_stats
.rx_dropped
++;
416 skb
= dev_alloc_skb(len
+ 2);
419 qep
->net_stats
.rx_dropped
++;
424 eth_copy_and_sum(skb
, (unsigned char *)this_qbuf
,
426 skb
->protocol
= eth_type_trans(skb
, qep
->dev
);
428 qep
->net_stats
.rx_packets
++;
429 qep
->net_stats
.rx_bytes
+=len
;
432 end_rxd
->rx_addr
= this_qbuf_dvma
;
433 end_rxd
->rx_flags
= (RXD_OWN
| ((RXD_PKT_SZ
) & RXD_LENGTH
));
435 elem
= NEXT_RX(elem
);
436 this = &rxbase
[elem
];
440 printk("%s: Memory squeeze, deferring packet.\n", qep
->dev
->name
);
443 /* Interrupts for all QE's get filtered out via the QEC master controller,
444 * so we just run through each qe and check to see who is signaling
445 * and thus needs to be serviced.
447 static void qec_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
449 struct sunqec
*qecp
= (struct sunqec
*) dev_id
;
450 unsigned int qec_status
;
453 /* Latch the status now. */
454 qec_status
= qecp
->gregs
->stat
;
456 if(qec_status
& 0xf) {
457 struct sunqe
*qep
= qecp
->qes
[channel
];
458 struct net_device
*dev
= qep
->dev
;
459 unsigned int qe_status
;
463 qe_status
= qep
->qcregs
->stat
;
464 if(qe_status
& CREG_STAT_ERRORS
)
465 if(qe_is_bolixed(qep
, qe_status
))
468 if(qe_status
& CREG_STAT_RXIRQ
)
478 static int qe_open(struct net_device
*dev
)
480 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
483 qep
->mconfig
= (MREGS_MCONFIG_TXENAB
|
484 MREGS_MCONFIG_RXENAB
|
485 MREGS_MCONFIG_MBAENAB
);
486 res
= qe_init(qep
, 0);
493 static int qe_close(struct net_device
*dev
)
495 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
502 /* Reclaim TX'd frames from the ring. */
503 static void qe_tx_reclaim(struct sunqe
*qep
)
505 struct qe_txd
*txbase
= &qep
->qe_block
->qe_txd
[0];
506 struct net_device
*dev
= qep
->dev
;
507 int elem
= qep
->tx_old
;
509 while(elem
!= qep
->tx_new
) {
510 unsigned int flags
= txbase
[elem
].tx_flags
;
514 qep
->net_stats
.tx_packets
++;
515 qep
->net_stats
.tx_bytes
+=(flags
& TXD_LENGTH
);
516 elem
= NEXT_TX(elem
);
520 if(dev
->tbusy
&& (TX_BUFFS_AVAIL(qep
) > 0)) {
526 /* Get a packet queued to go onto the wire. */
527 static int qe_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
529 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
530 struct sunqe_buffers
*qbufs
= qep
->buffers
;
531 __u32 txbuf_dvma
, qbufs_dvma
= qep
->buffers_dvma
;
532 unsigned char *txbuf
;
537 if(test_and_set_bit(0, (void *) &dev
->tbusy
) != 0) {
538 long tickssofar
= jiffies
- dev
->trans_start
;
540 if (tickssofar
>= 40) {
541 printk("%s: transmit timed out, resetting\n", dev
->name
);
544 dev
->trans_start
= jiffies
;
549 if(!TX_BUFFS_AVAIL(qep
))
555 txbuf
= &qbufs
->tx_buf
[entry
& (TX_RING_SIZE
- 1)][0];
556 txbuf_dvma
= qbufs_dvma
+
557 qebuf_offset(tx_buf
, (entry
& (TX_RING_SIZE
- 1)));
559 /* Avoid a race... */
560 qep
->qe_block
->qe_txd
[entry
].tx_flags
= TXD_UPDATE
;
562 memcpy(txbuf
, skb
->data
, len
);
564 qep
->qe_block
->qe_txd
[entry
].tx_addr
= txbuf_dvma
;
565 qep
->qe_block
->qe_txd
[entry
].tx_flags
=
566 (TXD_OWN
| TXD_SOP
| TXD_EOP
| (len
& TXD_LENGTH
));
567 qep
->tx_new
= NEXT_TX(entry
);
570 dev
->trans_start
= jiffies
;
571 qep
->qcregs
->ctrl
= CREG_CTRL_TWAKEUP
;
575 if(TX_BUFFS_AVAIL(qep
))
581 static struct net_device_stats
*qe_get_stats(struct net_device
*dev
)
583 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
585 return &qep
->net_stats
;
588 #define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
589 #define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
591 static void qe_set_multicast(struct net_device
*dev
)
593 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
594 struct dev_mc_list
*dmi
= dev
->mc_list
;
595 unsigned char new_mconfig
= qep
->mconfig
;
598 u32 crc
, poly
= CRC_POLYNOMIAL_LE
;
600 /* Lock out others. */
601 set_bit(0, (void *) &dev
->tbusy
);
603 if((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 64)) {
604 qep
->mregs
->iaconfig
= MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
;
605 while ((qep
->mregs
->iaconfig
& MREGS_IACONFIG_ACHNGE
) != 0)
607 for(i
= 0; i
< 8; i
++)
608 qep
->mregs
->filter
= 0xff;
609 qep
->mregs
->iaconfig
= 0;
610 } else if(dev
->flags
& IFF_PROMISC
) {
611 new_mconfig
|= MREGS_MCONFIG_PROMISC
;
614 unsigned char *hbytes
= (unsigned char *) &hash_table
[0];
616 for(i
= 0; i
< 4; i
++)
619 for(i
= 0; i
< dev
->mc_count
; i
++) {
620 addrs
= dmi
->dmi_addr
;
627 for(byte
= 0; byte
< 6; byte
++) {
628 for(bit
= *addrs
++, j
= 0; j
< 8; j
++, bit
>>= 1) {
631 test
= ((bit
^ crc
) & 0x01);
638 hash_table
[crc
>> 4] |= 1 << (crc
& 0xf);
640 /* Program the qe with the new filter value. */
641 qep
->mregs
->iaconfig
= MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
;
642 while ((qep
->mregs
->iaconfig
& MREGS_IACONFIG_ACHNGE
) != 0)
644 for(i
= 0; i
< 8; i
++)
645 qep
->mregs
->filter
= *hbytes
++;
646 qep
->mregs
->iaconfig
= 0;
649 /* Any change of the logical address filter, the physical address,
650 * or enabling/disabling promiscuous mode causes the MACE to disable
651 * the receiver. So we must re-enable them here or else the MACE
652 * refuses to listen to anything on the network. Sheesh, took
653 * me a day or two to find this bug.
655 qep
->mconfig
= new_mconfig
;
656 qep
->mregs
->mconfig
= qep
->mconfig
;
658 /* Let us get going again. */
662 /* This is only called once at boot time for each card probed. */
663 static inline void qec_init_once(struct sunqec
*qecp
, struct linux_sbus_device
*qsdev
)
665 unsigned char bsizes
= qecp
->qec_bursts
;
668 if (bsizes
& DMA_BURST64
) {
669 qecp
->gregs
->ctrl
= GLOB_CTRL_B64
;
672 if(bsizes
& DMA_BURST32
) {
673 qecp
->gregs
->ctrl
= GLOB_CTRL_B32
;
675 qecp
->gregs
->ctrl
= GLOB_CTRL_B16
;
678 /* Packetsize only used in 100baseT BigMAC configurations,
679 * set it to zero just to be on the safe side.
681 qecp
->gregs
->psize
= 0;
683 /* Set the local memsize register, divided up to one piece per QE channel. */
684 qecp
->gregs
->msize
= (qsdev
->reg_addrs
[1].reg_size
>> 2);
686 /* Divide up the local QEC memory amongst the 4 QE receiver and
687 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
689 qecp
->gregs
->rsize
= qecp
->gregs
->tsize
=
690 (qsdev
->reg_addrs
[1].reg_size
>> 2) >> 1;
694 /* Four QE's per QEC card. */
695 static inline int qec_ether_init(struct net_device
*dev
, struct linux_sbus_device
*sdev
)
697 static unsigned version_printed
= 0;
698 struct net_device
*qe_devs
[4];
699 struct sunqe
*qeps
[4];
700 struct linux_sbus_device
*qesdevs
[4];
702 struct linux_prom_ranges qranges
[8];
703 unsigned char bsizes
, bsizes_more
, num_qranges
;
704 int i
, j
, res
= ENOMEM
;
706 dev
= init_etherdev(0, sizeof(struct sunqe
));
708 qeps
[0] = (struct sunqe
*) dev
->priv
;
709 qeps
[0]->channel
= 0;
710 for(j
= 0; j
< 6; j
++)
711 qe_devs
[0]->dev_addr
[j
] = idprom
->id_ethaddr
[j
];
713 if(version_printed
++ == 0)
716 qe_devs
[1] = qe_devs
[2] = qe_devs
[3] = NULL
;
717 for(i
= 1; i
< 4; i
++) {
718 qe_devs
[i
] = init_etherdev(0, sizeof(struct sunqe
));
719 if(qe_devs
[i
] == NULL
|| qe_devs
[i
]->priv
== NULL
)
721 qeps
[i
] = (struct sunqe
*) qe_devs
[i
]->priv
;
722 for(j
= 0; j
< 6; j
++)
723 qe_devs
[i
]->dev_addr
[j
] = idprom
->id_ethaddr
[j
];
724 qeps
[i
]->channel
= i
;
726 qecp
= kmalloc(sizeof(struct sunqec
), GFP_KERNEL
);
729 qecp
->qec_sbus_dev
= sdev
;
731 for(i
= 0; i
< 4; i
++) {
732 qecp
->qes
[i
] = qeps
[i
];
733 qeps
[i
]->dev
= qe_devs
[i
];
734 qeps
[i
]->parent
= qecp
;
737 /* Link in channel 0. */
738 i
= prom_getintdefault(sdev
->child
->prom_node
, "channel#", -1);
739 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
740 qesdevs
[i
] = sdev
->child
;
741 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
743 /* Link in channel 1. */
744 i
= prom_getintdefault(sdev
->child
->next
->prom_node
, "channel#", -1);
745 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
746 qesdevs
[i
] = sdev
->child
->next
;
747 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
749 /* Link in channel 2. */
750 i
= prom_getintdefault(sdev
->child
->next
->next
->prom_node
, "channel#", -1);
751 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
752 qesdevs
[i
] = sdev
->child
->next
->next
;
753 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
755 /* Link in channel 3. */
756 i
= prom_getintdefault(sdev
->child
->next
->next
->next
->prom_node
, "channel#", -1);
757 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
758 qesdevs
[i
] = sdev
->child
->next
->next
->next
;
759 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
761 for(i
= 0; i
< 4; i
++)
762 qeps
[i
]->qe_sbusdev
= qesdevs
[i
];
764 /* This is a bit of fun, get QEC ranges. */
765 i
= prom_getproperty(sdev
->prom_node
, "ranges",
766 (char *) &qranges
[0], sizeof(qranges
));
767 num_qranges
= (i
/ sizeof(struct linux_prom_ranges
));
769 /* Now, apply all the ranges, QEC ranges then the SBUS ones for each QE. */
770 if (sdev
->ranges_applied
== 0) {
771 for(i
= 0; i
< 4; i
++) {
772 for(j
= 0; j
< 2; j
++) {
775 for(k
= 0; k
< num_qranges
; k
++)
776 if(qesdevs
[i
]->reg_addrs
[j
].which_io
==
777 qranges
[k
].ot_child_space
)
780 printk("QuadEther: Aieee, bogus QEC range for "
781 "space %08x\n",qesdevs
[i
]->reg_addrs
[j
].which_io
);
782 qesdevs
[i
]->reg_addrs
[j
].which_io
= qranges
[k
].ot_parent_space
;
783 qesdevs
[i
]->reg_addrs
[j
].phys_addr
+= qranges
[k
].ot_parent_base
;
786 prom_apply_sbus_ranges(qesdevs
[i
]->my_bus
, &qesdevs
[i
]->reg_addrs
[0],
789 prom_apply_sbus_ranges(sdev
->my_bus
, &sdev
->reg_addrs
[0],
790 sdev
->num_registers
, sdev
);
793 /* Now map in the registers, QEC globals first. */
794 qecp
->gregs
= sparc_alloc_io(sdev
->reg_addrs
[0].phys_addr
, 0,
795 sizeof(struct qe_globreg
),
796 "QEC Global Registers",
797 sdev
->reg_addrs
[0].which_io
, 0);
799 printk("QuadEther: Cannot map QEC global registers.\n");
804 /* Make sure the QEC is in MACE mode. */
805 if((qecp
->gregs
->ctrl
& 0xf0000000) != GLOB_CTRL_MMODE
) {
806 printk("QuadEther: AIEEE, QEC is not in MACE mode!\n");
812 if(qec_global_reset(qecp
->gregs
)) {
817 /* Find and set the burst sizes for the QEC, since it does
818 * the actual dma for all 4 channels.
820 bsizes
= prom_getintdefault(sdev
->prom_node
, "burst-sizes", 0xff);
822 bsizes_more
= prom_getintdefault(sdev
->my_bus
->prom_node
, "burst-sizes", 0xff);
824 if(bsizes_more
!= 0xff)
825 bsizes
&= bsizes_more
;
826 if(bsizes
== 0xff || (bsizes
& DMA_BURST16
) == 0 ||
827 (bsizes
& DMA_BURST32
)==0)
828 bsizes
= (DMA_BURST32
- 1);
830 qecp
->qec_bursts
= bsizes
;
832 /* Perform one time QEC initialization, we never touch the QEC
833 * globals again after this.
835 qec_init_once(qecp
, sdev
);
837 for(i
= 0; i
< 4; i
++) {
838 /* Map in QEC per-channel control registers. */
839 qeps
[i
]->qcregs
= sparc_alloc_io(qesdevs
[i
]->reg_addrs
[0].phys_addr
, 0,
840 sizeof(struct qe_creg
),
841 "QEC Per-Channel Registers",
842 qesdevs
[i
]->reg_addrs
[0].which_io
, 0);
843 if(!qeps
[i
]->qcregs
) {
844 printk("QuadEther: Cannot map QE %d's channel registers.\n", i
);
849 /* Map in per-channel AMD MACE registers. */
850 qeps
[i
]->mregs
= sparc_alloc_io(qesdevs
[i
]->reg_addrs
[1].phys_addr
, 0,
851 sizeof(struct qe_mregs
),
853 qesdevs
[i
]->reg_addrs
[1].which_io
, 0);
854 if(!qeps
[i
]->mregs
) {
855 printk("QuadEther: Cannot map QE %d's MACE registers.\n", i
);
860 qeps
[i
]->qe_block
= (struct qe_init_block
*)
861 sparc_dvma_malloc(PAGE_SIZE
, "QE Init Block",
862 &qeps
[i
]->qblock_dvma
);
864 qeps
[i
]->buffers
= (struct sunqe_buffers
*)
865 sparc_dvma_malloc(sizeof(struct sunqe_buffers
),
867 &qeps
[i
]->buffers_dvma
);
873 for(i
= 0; i
< 4; i
++) {
874 qe_devs
[i
]->open
= qe_open
;
875 qe_devs
[i
]->stop
= qe_close
;
876 qe_devs
[i
]->hard_start_xmit
= qe_start_xmit
;
877 qe_devs
[i
]->get_stats
= qe_get_stats
;
878 qe_devs
[i
]->set_multicast_list
= qe_set_multicast
;
879 qe_devs
[i
]->irq
= sdev
->irqs
[0];
881 ether_setup(qe_devs
[i
]);
884 /* QEC receives interrupts from each QE, then it sends the actual
885 * IRQ to the cpu itself. Since QEC is the single point of
886 * interrupt for all QE channels we register the IRQ handler
889 if(request_irq(sdev
->irqs
[0], &qec_interrupt
,
890 SA_SHIRQ
, "QuadEther", (void *) qecp
)) {
891 printk("QuadEther: Can't register QEC master irq handler.\n");
896 /* Report the QE channels. */
897 for(i
= 0; i
< 4; i
++) {
898 printk("%s: QuadEthernet channel[%d] ", qe_devs
[i
]->name
, i
);
899 for(j
= 0; j
< 6; j
++)
901 qe_devs
[i
]->dev_addr
[j
],
907 /* We are home free at this point, link the qe's into
908 * the master list for later module unloading.
910 for(i
= 0; i
< 4; i
++)
911 qe_devs
[i
]->ifindex
= dev_new_index();
912 qecp
->next_module
= root_qec_dev
;
919 for(i
= 0; i
< 4; i
++) {
922 kfree(qe_devs
[i
]->priv
);
929 int __init
qec_probe(struct net_device
*dev
)
931 struct linux_sbus
*bus
;
932 struct linux_sbus_device
*sdev
= 0;
933 static int called
= 0;
941 for_each_sbusdev(sdev
, bus
) {
942 if(cards
) dev
= NULL
;
944 /* QEC can be parent of either QuadEthernet or BigMAC
947 if(!strcmp(sdev
->prom_name
, "qec") && sdev
->child
&&
948 !strcmp(sdev
->child
->prom_name
, "qe") &&
950 !strcmp(sdev
->child
->next
->prom_name
, "qe") &&
951 sdev
->child
->next
->next
&&
952 !strcmp(sdev
->child
->next
->next
->prom_name
, "qe") &&
953 sdev
->child
->next
->next
->next
&&
954 !strcmp(sdev
->child
->next
->next
->next
->prom_name
, "qe")) {
956 if((v
= qec_ether_init(dev
, sdev
)))
972 return qec_probe(NULL
);
978 struct sunqec
*next_qec
;
981 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
982 while (root_qec_dev
) {
983 next_qec
= root_qec_dev
->next_module
;
985 /* Release all four QE channels, then the QEC itself. */
986 for(i
= 0; i
< 4; i
++) {
987 unregister_netdev(root_qec_dev
->qes
[i
]->dev
);
988 sparc_free_io(root_qec_dev
->qes
[i
]->qcregs
, sizeof(struct qe_creg
));
989 sparc_free_io(root_qec_dev
->qes
[i
]->mregs
, sizeof(struct qe_mregs
));
990 kfree(root_qec_dev
->qes
[i
]->dev
);
992 free_irq(root_qec_dev
->qec_sbus_dev
->irqs
[0], (void *)root_qec_dev
);
993 sparc_free_io(root_qec_dev
->gregs
, sizeof(struct qe_globreg
));
995 root_qec_dev
= next_qec
;