1 /* $NetBSD: qe.c,v 1.55 2009/09/19 11:53:42 tsutsui Exp $ */
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1998 Jason L. Wright.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. The name of the authors may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 * Driver for the SBus qec+qe QuadEthernet board.
62 * This driver was written using the AMD MACE Am79C940 documentation, some
63 * ideas gleaned from the S/Linux driver for this card, Solaris header files,
64 * and a loan of a card from Paul Southworth of the Internet Engineering
65 * Group (www.ieng.com).
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.55 2009/09/19 11:53:42 tsutsui Exp $");
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/errno.h>
82 #include <sys/ioctl.h>
84 #include <sys/socket.h>
85 #include <sys/syslog.h>
86 #include <sys/device.h>
87 #include <sys/malloc.h>
93 #include <net/if_dl.h>
94 #include <net/if_types.h>
95 #include <net/netisr.h>
96 #include <net/if_media.h>
97 #include <net/if_ether.h>
100 #include <netinet/in.h>
101 #include <netinet/if_inarp.h>
102 #include <netinet/in_systm.h>
103 #include <netinet/in_var.h>
104 #include <netinet/ip.h>
110 #include <net/bpfdesc.h>
114 #include <sys/intr.h>
115 #include <machine/autoconf.h>
117 #include <dev/sbus/sbusvar.h>
118 #include <dev/sbus/qecreg.h>
119 #include <dev/sbus/qecvar.h>
120 #include <dev/sbus/qereg.h>
123 struct device sc_dev
; /* base device */
124 bus_space_tag_t sc_bustag
; /* bus & DMA tags */
125 bus_dma_tag_t sc_dmatag
;
126 bus_dmamap_t sc_dmamap
;
127 struct ethercom sc_ethercom
;
128 struct ifmedia sc_ifmedia
; /* interface media */
130 struct qec_softc
*sc_qec
; /* QEC parent */
132 bus_space_handle_t sc_qr
; /* QEC registers */
133 bus_space_handle_t sc_mr
; /* MACE registers */
134 bus_space_handle_t sc_cr
; /* channel registers */
136 int sc_channel
; /* channel number */
137 u_int sc_rev
; /* board revision */
141 struct qec_ring sc_rb
; /* Packet Ring Buffer */
144 uint8_t sc_enaddr
[6];
151 int qematch(device_t
, cfdata_t
, void *);
152 void qeattach(device_t
, device_t
, void *);
154 void qeinit(struct qe_softc
*);
155 void qestart(struct ifnet
*);
156 void qestop(struct qe_softc
*);
157 void qewatchdog(struct ifnet
*);
158 int qeioctl(struct ifnet
*, u_long
, void *);
159 void qereset(struct qe_softc
*);
162 int qe_eint(struct qe_softc
*, uint32_t);
163 int qe_rint(struct qe_softc
*);
164 int qe_tint(struct qe_softc
*);
165 void qe_mcreset(struct qe_softc
*);
167 static int qe_put(struct qe_softc
*, int, struct mbuf
*);
168 static void qe_read(struct qe_softc
*, int, int);
169 static struct mbuf
*qe_get(struct qe_softc
*, int, int);
171 /* ifmedia callbacks */
172 void qe_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
173 int qe_ifmedia_upd(struct ifnet
*);
175 CFATTACH_DECL(qe
, sizeof(struct qe_softc
),
176 qematch
, qeattach
, NULL
, NULL
);
179 qematch(device_t parent
, cfdata_t cf
, void *aux
)
181 struct sbus_attach_args
*sa
= aux
;
183 return (strcmp(cf
->cf_name
, sa
->sa_name
) == 0);
187 qeattach(device_t parent
, device_t self
, void *aux
)
189 struct sbus_attach_args
*sa
= aux
;
190 struct qec_softc
*qec
= device_private(parent
);
191 struct qe_softc
*sc
= device_private(self
);
192 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
193 int node
= sa
->sa_node
;
194 bus_dma_tag_t dmatag
= sa
->sa_dmatag
;
195 bus_dma_segment_t seg
;
199 if (sa
->sa_nreg
< 2) {
200 printf("%s: only %d register sets\n",
201 device_xname(self
), sa
->sa_nreg
);
205 if (bus_space_map(sa
->sa_bustag
,
206 (bus_addr_t
)BUS_ADDR(
207 sa
->sa_reg
[0].oa_space
,
208 sa
->sa_reg
[0].oa_base
),
209 (bus_size_t
)sa
->sa_reg
[0].oa_size
,
210 0, &sc
->sc_cr
) != 0) {
211 aprint_error_dev(self
, "cannot map registers\n");
215 if (bus_space_map(sa
->sa_bustag
,
216 (bus_addr_t
)BUS_ADDR(
217 sa
->sa_reg
[1].oa_space
,
218 sa
->sa_reg
[1].oa_base
),
219 (bus_size_t
)sa
->sa_reg
[1].oa_size
,
220 0, &sc
->sc_mr
) != 0) {
221 aprint_error_dev(self
, "cannot map registers\n");
225 sc
->sc_rev
= prom_getpropint(node
, "mace-version", -1);
226 printf(" rev %x", sc
->sc_rev
);
228 sc
->sc_bustag
= sa
->sa_bustag
;
229 sc
->sc_dmatag
= sa
->sa_dmatag
;
231 sc
->sc_qr
= qec
->sc_regs
;
233 sc
->sc_channel
= prom_getpropint(node
, "channel#", -1);
234 sc
->sc_burst
= qec
->sc_burst
;
238 /* Note: no interrupt level passed */
239 (void)bus_intr_establish(sa
->sa_bustag
, 0, IPL_NET
, qeintr
, sc
);
240 prom_getether(node
, sc
->sc_enaddr
);
243 * Allocate descriptor ring and buffers.
246 /* for now, allocate as many bufs as there are ring descriptors */
247 sc
->sc_rb
.rb_ntbuf
= QEC_XD_RING_MAXSIZE
;
248 sc
->sc_rb
.rb_nrbuf
= QEC_XD_RING_MAXSIZE
;
250 size
= QEC_XD_RING_MAXSIZE
* sizeof(struct qec_xd
) +
251 QEC_XD_RING_MAXSIZE
* sizeof(struct qec_xd
) +
252 sc
->sc_rb
.rb_ntbuf
* QE_PKT_BUF_SZ
+
253 sc
->sc_rb
.rb_nrbuf
* QE_PKT_BUF_SZ
;
255 /* Get a DMA handle */
256 if ((error
= bus_dmamap_create(dmatag
, size
, 1, size
, 0,
257 BUS_DMA_NOWAIT
, &sc
->sc_dmamap
)) != 0) {
258 aprint_error_dev(self
, "DMA map create error %d\n",
263 /* Allocate DMA buffer */
264 if ((error
= bus_dmamem_alloc(dmatag
, size
, 0, 0,
265 &seg
, 1, &rseg
, BUS_DMA_NOWAIT
)) != 0) {
266 aprint_error_dev(self
, "DMA buffer alloc error %d\n",
271 /* Map DMA buffer in CPU addressable space */
272 if ((error
= bus_dmamem_map(dmatag
, &seg
, rseg
, size
,
273 &sc
->sc_rb
.rb_membase
,
274 BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) != 0) {
275 aprint_error_dev(self
, "DMA buffer map error %d\n",
277 bus_dmamem_free(dmatag
, &seg
, rseg
);
281 /* Load the buffer */
282 if ((error
= bus_dmamap_load(dmatag
, sc
->sc_dmamap
,
283 sc
->sc_rb
.rb_membase
, size
, NULL
,
284 BUS_DMA_NOWAIT
)) != 0) {
285 aprint_error_dev(self
, "DMA buffer map load error %d\n",
287 bus_dmamem_unmap(dmatag
, sc
->sc_rb
.rb_membase
, size
);
288 bus_dmamem_free(dmatag
, &seg
, rseg
);
291 sc
->sc_rb
.rb_dmabase
= sc
->sc_dmamap
->dm_segs
[0].ds_addr
;
293 /* Initialize media properties */
294 ifmedia_init(&sc
->sc_ifmedia
, 0, qe_ifmedia_upd
, qe_ifmedia_sts
);
295 ifmedia_add(&sc
->sc_ifmedia
,
296 IFM_MAKEWORD(IFM_ETHER
,IFM_10_T
,0,0),
298 ifmedia_add(&sc
->sc_ifmedia
,
299 IFM_MAKEWORD(IFM_ETHER
,IFM_10_5
,0,0),
301 ifmedia_add(&sc
->sc_ifmedia
,
302 IFM_MAKEWORD(IFM_ETHER
,IFM_AUTO
,0,0),
304 ifmedia_set(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_AUTO
);
306 memcpy(ifp
->if_xname
, device_xname(self
), IFNAMSIZ
);
308 ifp
->if_start
= qestart
;
309 ifp
->if_ioctl
= qeioctl
;
310 ifp
->if_watchdog
= qewatchdog
;
311 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_NOTRAILERS
|
313 IFQ_SET_READY(&ifp
->if_snd
);
315 /* Attach the interface. */
317 ether_ifattach(ifp
, sc
->sc_enaddr
);
319 printf(" address %s\n", ether_sprintf(sc
->sc_enaddr
));
323 * Pull data off an interface.
324 * Len is the length of data, with local net header stripped.
325 * We copy the data into mbufs. When full cluster sized units are present,
326 * we copy into clusters.
328 static inline struct mbuf
*
329 qe_get(struct qe_softc
*sc
, int idx
, int totlen
)
331 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
333 struct mbuf
*top
, **mp
;
334 int len
, pad
, boff
= 0;
337 bp
= sc
->sc_rb
.rb_rxbuf
+ (idx
% sc
->sc_rb
.rb_nrbuf
) * QE_PKT_BUF_SZ
;
339 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
342 m
->m_pkthdr
.rcvif
= ifp
;
343 m
->m_pkthdr
.len
= totlen
;
344 pad
= ALIGN(sizeof(struct ether_header
)) - sizeof(struct ether_header
);
352 MGET(m
, M_DONTWAIT
, MT_DATA
);
359 if (top
&& totlen
>= MINCLSIZE
) {
360 MCLGET(m
, M_DONTWAIT
);
361 if (m
->m_flags
& M_EXT
)
364 m
->m_len
= len
= min(totlen
, len
);
365 memcpy(mtod(m
, void *), bp
+ boff
, len
);
376 * Routine to copy from mbuf chain to transmit buffer in
377 * network buffer memory.
380 qe_put(struct qe_softc
*sc
, int idx
, struct mbuf
*m
)
383 int len
, tlen
= 0, boff
= 0;
386 bp
= sc
->sc_rb
.rb_txbuf
+ (idx
% sc
->sc_rb
.rb_ntbuf
) * QE_PKT_BUF_SZ
;
394 memcpy(bp
+ boff
, mtod(m
, void *), len
);
403 * Pass a packet to the higher levels.
406 qe_read(struct qe_softc
*sc
, int idx
, int len
)
408 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
411 if (len
<= sizeof(struct ether_header
) ||
412 len
> ETHERMTU
+ sizeof(struct ether_header
)) {
414 printf("%s: invalid packet size %d; dropping\n",
422 * Pull packet off interface.
424 m
= qe_get(sc
, idx
, len
);
433 * Check if there's a BPF listener on this interface.
434 * If so, hand off the raw packet to BPF.
437 bpf_mtap(ifp
->if_bpf
, m
);
439 /* Pass the packet up. */
440 (*ifp
->if_input
)(ifp
, m
);
444 * Start output on interface.
445 * We make two assumptions here:
446 * 1) that the current priority is set to splnet _before_ this code
447 * is called *and* is returned to the appropriate priority after
449 * 2) that the IFF_OACTIVE flag is checked before this code is called
450 * (i.e. that the output part of the interface is idle)
453 qestart(struct ifnet
*ifp
)
455 struct qe_softc
*sc
= ifp
->if_softc
;
456 struct qec_xd
*txd
= sc
->sc_rb
.rb_txd
;
458 unsigned int bix
, len
;
459 unsigned int ntbuf
= sc
->sc_rb
.rb_ntbuf
;
461 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
464 bix
= sc
->sc_rb
.rb_tdhead
;
467 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
473 * If BPF is listening on this interface, let it see the
474 * packet before we commit it to the wire.
477 bpf_mtap(ifp
->if_bpf
, m
);
481 * Copy the mbuf chain into the transmit buffer.
483 len
= qe_put(sc
, bix
, m
);
486 * Initialize transmit registers and start transmission
488 txd
[bix
].xd_flags
= QEC_XD_OWN
| QEC_XD_SOP
| QEC_XD_EOP
|
489 (len
& QEC_XD_LENGTH
);
490 bus_space_write_4(sc
->sc_bustag
, sc
->sc_cr
, QE_CRI_CTRL
,
493 if (++bix
== QEC_XD_RING_MAXSIZE
)
496 if (++sc
->sc_rb
.rb_td_nbusy
== ntbuf
) {
497 ifp
->if_flags
|= IFF_OACTIVE
;
502 sc
->sc_rb
.rb_tdhead
= bix
;
506 qestop(struct qe_softc
*sc
)
508 bus_space_tag_t t
= sc
->sc_bustag
;
509 bus_space_handle_t mr
= sc
->sc_mr
;
510 bus_space_handle_t cr
= sc
->sc_cr
;
513 #if defined(SUN4U) || defined(__GNUC__)
516 /* Stop the schwurst */
517 bus_space_write_1(t
, mr
, QE_MRI_BIUCC
, QE_MR_BIUCC_SWRST
);
518 for (n
= 200; n
> 0; n
--) {
519 if ((bus_space_read_1(t
, mr
, QE_MRI_BIUCC
) &
520 QE_MR_BIUCC_SWRST
) == 0)
526 bus_space_write_4(t
, cr
, QE_CRI_CTRL
, QE_CR_CTRL_RESET
);
527 for (n
= 200; n
> 0; n
--) {
528 if ((bus_space_read_4(t
, cr
, QE_CRI_CTRL
) &
529 QE_CR_CTRL_RESET
) == 0)
539 qereset(struct qe_softc
*sc
)
550 qewatchdog(struct ifnet
*ifp
)
552 struct qe_softc
*sc
= ifp
->if_softc
;
554 log(LOG_ERR
, "%s: device timeout\n", device_xname(&sc
->sc_dev
));
561 * Interrupt dispatch.
566 struct qe_softc
*sc
= arg
;
567 bus_space_tag_t t
= sc
->sc_bustag
;
568 uint32_t qecstat
, qestat
;
571 #if defined(SUN4U) || defined(__GNUC__)
574 /* Read QEC status and channel status */
575 qecstat
= bus_space_read_4(t
, sc
->sc_qr
, QEC_QRI_STAT
);
578 printf("qe%d: intr: qecstat=%x\n", sc
->sc_channel
, qecstat
);
582 /* Filter out status for this channel */
583 qecstat
= qecstat
>> (4 * sc
->sc_channel
);
584 if ((qecstat
& 0xf) == 0)
587 qestat
= bus_space_read_4(t
, sc
->sc_cr
, QE_CRI_STAT
);
591 char bits
[64]; int i
;
592 bus_space_tag_t t1
= sc
->sc_bustag
;
593 bus_space_handle_t mr
= sc
->sc_mr
;
595 snprintb(bits
, sizeof(bits
), QE_CR_STAT_BITS
, qestat
);
596 printf("qe%d: intr: qestat=%s\n", sc
->sc_channel
, bits
);
598 printf("MACE registers:\n");
599 for (i
= 0 ; i
< 32; i
++) {
600 printf(" m[%d]=%x,", i
, bus_space_read_1(t1
, mr
, i
));
601 if (((i
+1) & 7) == 0)
607 if (qestat
& QE_CR_STAT_ALLERRORS
) {
611 snprintb(bits
, sizeof(bits
), QE_CR_STAT_BITS
, qestat
);
612 printf("qe%d: eint: qestat=%s\n", sc
->sc_channel
, bits
);
615 r
|= qe_eint(sc
, qestat
);
620 if (qestat
& QE_CR_STAT_TXIRQ
)
623 if (qestat
& QE_CR_STAT_RXIRQ
)
630 * Transmit interrupt.
633 qe_tint(struct qe_softc
*sc
)
635 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
636 unsigned int bix
, txflags
;
638 bix
= sc
->sc_rb
.rb_tdtail
;
641 if (sc
->sc_rb
.rb_td_nbusy
<= 0)
644 txflags
= sc
->sc_rb
.rb_txd
[bix
].xd_flags
;
646 if (txflags
& QEC_XD_OWN
)
649 ifp
->if_flags
&= ~IFF_OACTIVE
;
652 if (++bix
== QEC_XD_RING_MAXSIZE
)
655 --sc
->sc_rb
.rb_td_nbusy
;
658 sc
->sc_rb
.rb_tdtail
= bix
;
662 if (sc
->sc_rb
.rb_td_nbusy
== 0)
672 qe_rint(struct qe_softc
*sc
)
674 struct qec_xd
*xd
= sc
->sc_rb
.rb_rxd
;
675 unsigned int bix
, len
;
676 unsigned int nrbuf
= sc
->sc_rb
.rb_nrbuf
;
681 bix
= sc
->sc_rb
.rb_rdtail
;
684 * Process all buffers with valid data.
687 len
= xd
[bix
].xd_flags
;
688 if (len
& QEC_XD_OWN
)
695 len
&= QEC_XD_LENGTH
;
697 qe_read(sc
, bix
, len
);
700 xd
[(bix
+nrbuf
) % QEC_XD_RING_MAXSIZE
].xd_flags
=
701 QEC_XD_OWN
| (QE_PKT_BUF_SZ
& QEC_XD_LENGTH
);
703 if (++bix
== QEC_XD_RING_MAXSIZE
)
707 if (npackets
== 0 && sc
->sc_debug
)
708 printf("%s: rint: no packets; rb index %d; status 0x%x\n",
709 device_xname(&sc
->sc_dev
), bix
, len
);
712 sc
->sc_rb
.rb_rdtail
= bix
;
721 qe_eint(struct qe_softc
*sc
, uint32_t why
)
723 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
724 device_t self
= &sc
->sc_dev
;
727 if (why
& QE_CR_STAT_EDEFER
) {
728 printf("%s: excessive tx defers.\n", device_xname(self
));
733 if (why
& QE_CR_STAT_CLOSS
) {
734 printf("%s: no carrier, link down?\n", device_xname(self
));
739 if (why
& QE_CR_STAT_ERETRIES
) {
740 printf("%s: excessive tx retries\n", device_xname(self
));
747 if (why
& QE_CR_STAT_LCOLL
) {
748 printf("%s: late tx transmission\n", device_xname(self
));
754 if (why
& QE_CR_STAT_FUFLOW
) {
755 printf("%s: tx fifo underflow\n", device_xname(self
));
761 if (why
& QE_CR_STAT_JERROR
) {
762 printf("%s: jabber seen\n", device_xname(self
));
766 if (why
& QE_CR_STAT_BERROR
) {
767 printf("%s: babble seen\n", device_xname(self
));
771 if (why
& QE_CR_STAT_TCCOFLOW
) {
772 ifp
->if_collisions
+= 256;
773 ifp
->if_oerrors
+= 256;
777 if (why
& QE_CR_STAT_TXDERROR
) {
778 printf("%s: tx descriptor is bad\n", device_xname(self
));
783 if (why
& QE_CR_STAT_TXLERR
) {
784 printf("%s: tx late error\n", device_xname(self
));
790 if (why
& QE_CR_STAT_TXPERR
) {
791 printf("%s: tx DMA parity error\n", device_xname(self
));
797 if (why
& QE_CR_STAT_TXSERR
) {
798 printf("%s: tx DMA sbus error ack\n", device_xname(self
));
804 if (why
& QE_CR_STAT_RCCOFLOW
) {
805 ifp
->if_collisions
+= 256;
806 ifp
->if_ierrors
+= 256;
810 if (why
& QE_CR_STAT_RUOFLOW
) {
811 ifp
->if_ierrors
+= 256;
815 if (why
& QE_CR_STAT_MCOFLOW
) {
816 ifp
->if_ierrors
+= 256;
820 if (why
& QE_CR_STAT_RXFOFLOW
) {
821 printf("%s: rx fifo overflow\n", device_xname(self
));
826 if (why
& QE_CR_STAT_RLCOLL
) {
827 printf("%s: rx late collision\n", device_xname(self
));
829 ifp
->if_collisions
++;
833 if (why
& QE_CR_STAT_FCOFLOW
) {
834 ifp
->if_ierrors
+= 256;
838 if (why
& QE_CR_STAT_CECOFLOW
) {
839 ifp
->if_ierrors
+= 256;
843 if (why
& QE_CR_STAT_RXDROP
) {
844 printf("%s: rx packet dropped\n", device_xname(self
));
849 if (why
& QE_CR_STAT_RXSMALL
) {
850 printf("%s: rx buffer too small\n", device_xname(self
));
856 if (why
& QE_CR_STAT_RXLERR
) {
857 printf("%s: rx late error\n", device_xname(self
));
863 if (why
& QE_CR_STAT_RXPERR
) {
864 printf("%s: rx DMA parity error\n", device_xname(self
));
870 if (why
& QE_CR_STAT_RXSERR
) {
871 printf("%s: rx DMA sbus error ack\n", device_xname(self
));
878 aprint_error_dev(self
, "unexpected interrupt error: %08x\n",
882 printf("%s: resetting...\n", device_xname(self
));
891 qeioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
893 struct qe_softc
*sc
= ifp
->if_softc
;
894 struct ifaddr
*ifa
= data
;
895 struct ifreq
*ifr
= data
;
902 ifp
->if_flags
|= IFF_UP
;
904 switch (ifa
->ifa_addr
->sa_family
) {
907 arp_ifinit(ifp
, ifa
);
916 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
918 /* XXX re-use ether_ioctl() */
919 switch (ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
)) {
922 * If interface is marked down and it is running, then
926 ifp
->if_flags
&= ~IFF_RUNNING
;
930 * If interface is marked up and it is stopped, then
937 * Reset the interface to pick up changes in any other
938 * flags that affect hardware registers.
945 sc
->sc_debug
= (ifp
->if_flags
& IFF_DEBUG
) != 0 ? 1 : 0;
951 if ((error
= ether_ioctl(ifp
, cmd
, data
)) == ENETRESET
) {
953 * Multicast list has changed; set the hardware filter
956 if (ifp
->if_flags
& IFF_RUNNING
)
964 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_ifmedia
, cmd
);
978 qeinit(struct qe_softc
*sc
)
980 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
981 bus_space_tag_t t
= sc
->sc_bustag
;
982 bus_space_handle_t cr
= sc
->sc_cr
;
983 bus_space_handle_t mr
= sc
->sc_mr
;
984 struct qec_softc
*qec
= sc
->sc_qec
;
989 #if defined(SUN4U) || defined(__GNUC__)
997 * Allocate descriptor ring and buffers
999 qec_meminit(&sc
->sc_rb
, QE_PKT_BUF_SZ
);
1001 /* Channel registers: */
1002 bus_space_write_4(t
, cr
, QE_CRI_RXDS
, (uint32_t)sc
->sc_rb
.rb_rxddma
);
1003 bus_space_write_4(t
, cr
, QE_CRI_TXDS
, (uint32_t)sc
->sc_rb
.rb_txddma
);
1005 bus_space_write_4(t
, cr
, QE_CRI_RIMASK
, 0);
1006 bus_space_write_4(t
, cr
, QE_CRI_TIMASK
, 0);
1007 bus_space_write_4(t
, cr
, QE_CRI_QMASK
, 0);
1008 bus_space_write_4(t
, cr
, QE_CRI_MMASK
, QE_CR_MMASK_RXCOLL
);
1009 bus_space_write_4(t
, cr
, QE_CRI_CCNT
, 0);
1010 bus_space_write_4(t
, cr
, QE_CRI_PIPG
, 0);
1012 qecaddr
= sc
->sc_channel
* qec
->sc_msize
;
1013 bus_space_write_4(t
, cr
, QE_CRI_RXWBUF
, qecaddr
);
1014 bus_space_write_4(t
, cr
, QE_CRI_RXRBUF
, qecaddr
);
1015 bus_space_write_4(t
, cr
, QE_CRI_TXWBUF
, qecaddr
+ qec
->sc_rsize
);
1016 bus_space_write_4(t
, cr
, QE_CRI_TXRBUF
, qecaddr
+ qec
->sc_rsize
);
1018 /* MACE registers: */
1019 bus_space_write_1(t
, mr
, QE_MRI_PHYCC
, QE_MR_PHYCC_ASEL
);
1020 bus_space_write_1(t
, mr
, QE_MRI_XMTFC
, QE_MR_XMTFC_APADXMT
);
1021 bus_space_write_1(t
, mr
, QE_MRI_RCVFC
, 0);
1024 * Mask MACE's receive interrupt, since we're being notified
1025 * by the QEC after DMA completes.
1027 bus_space_write_1(t
, mr
, QE_MRI_IMR
,
1028 QE_MR_IMR_CERRM
| QE_MR_IMR_RCVINTM
);
1030 bus_space_write_1(t
, mr
, QE_MRI_BIUCC
,
1031 QE_MR_BIUCC_BSWAP
| QE_MR_BIUCC_64TS
);
1033 bus_space_write_1(t
, mr
, QE_MRI_FIFOFC
,
1034 QE_MR_FIFOCC_TXF16
| QE_MR_FIFOCC_RXF32
|
1035 QE_MR_FIFOCC_RFWU
| QE_MR_FIFOCC_TFWU
);
1037 bus_space_write_1(t
, mr
, QE_MRI_PLSCC
, QE_MR_PLSCC_TP
);
1043 bus_space_write_1(t
, mr
, QE_MRI_IAC
,
1044 QE_MR_IAC_ADDRCHG
| QE_MR_IAC_PHYADDR
);
1045 bus_space_write_multi_1(t
, mr
, QE_MRI_PADR
, ea
, 6);
1047 /* Apply media settings */
1048 qe_ifmedia_upd(ifp
);
1051 * Clear Logical address filter
1053 bus_space_write_1(t
, mr
, QE_MRI_IAC
,
1054 QE_MR_IAC_ADDRCHG
| QE_MR_IAC_LOGADDR
);
1055 bus_space_set_multi_1(t
, mr
, QE_MRI_LADRF
, 0, 8);
1056 bus_space_write_1(t
, mr
, QE_MRI_IAC
, 0);
1058 /* Clear missed packet count (register cleared on read) */
1059 (void)bus_space_read_1(t
, mr
, QE_MRI_MPC
);
1062 /* test register: */
1063 bus_space_write_1(t
, mr
, QE_MRI_UTR
, 0);
1066 /* Reset multicast filter */
1069 ifp
->if_flags
|= IFF_RUNNING
;
1070 ifp
->if_flags
&= ~IFF_OACTIVE
;
1075 * Reset multicast filter.
1078 qe_mcreset(struct qe_softc
*sc
)
1080 struct ethercom
*ec
= &sc
->sc_ethercom
;
1081 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1082 bus_space_tag_t t
= sc
->sc_bustag
;
1083 bus_space_handle_t mr
= sc
->sc_mr
;
1084 struct ether_multi
*enm
;
1085 struct ether_multistep step
;
1088 uint8_t octet
, maccc
, *ladrp
= (uint8_t *)&hash
[0];
1091 #if defined(SUN4U) || defined(__GNUC__)
1095 /* We also enable transmitter & receiver here */
1096 maccc
= QE_MR_MACCC_ENXMT
| QE_MR_MACCC_ENRCV
;
1098 if (ifp
->if_flags
& IFF_PROMISC
) {
1099 maccc
|= QE_MR_MACCC_PROM
;
1100 bus_space_write_1(t
, mr
, QE_MRI_MACCC
, maccc
);
1104 if (ifp
->if_flags
& IFF_ALLMULTI
) {
1105 bus_space_write_1(t
, mr
, QE_MRI_IAC
,
1106 QE_MR_IAC_ADDRCHG
| QE_MR_IAC_LOGADDR
);
1107 bus_space_set_multi_1(t
, mr
, QE_MRI_LADRF
, 0xff, 8);
1108 bus_space_write_1(t
, mr
, QE_MRI_IAC
, 0);
1109 bus_space_write_1(t
, mr
, QE_MRI_MACCC
, maccc
);
1113 hash
[3] = hash
[2] = hash
[1] = hash
[0] = 0;
1115 ETHER_FIRST_MULTI(step
, ec
, enm
);
1116 while (enm
!= NULL
) {
1117 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
,
1118 ETHER_ADDR_LEN
) != 0) {
1120 * We must listen to a range of multicast
1121 * addresses. For now, just accept all
1122 * multicasts, rather than trying to set only
1123 * those filter bits needed to match the range.
1124 * (At this time, the only use of address
1125 * ranges is for IP multicast routing, for
1126 * which the range is big enough to require
1129 bus_space_write_1(t
, mr
, QE_MRI_IAC
,
1130 QE_MR_IAC_ADDRCHG
| QE_MR_IAC_LOGADDR
);
1131 bus_space_set_multi_1(t
, mr
, QE_MRI_LADRF
, 0xff, 8);
1132 bus_space_write_1(t
, mr
, QE_MRI_IAC
, 0);
1133 ifp
->if_flags
|= IFF_ALLMULTI
;
1139 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1140 octet
= enm
->enm_addrlo
[i
];
1142 for (j
= 0; j
< 8; j
++) {
1143 if ((crc
& 1) ^ (octet
& 1)) {
1154 hash
[crc
>> 4] |= 1 << (crc
& 0xf);
1155 ETHER_NEXT_MULTI(step
, enm
);
1158 bus_space_write_1(t
, mr
, QE_MRI_IAC
,
1159 QE_MR_IAC_ADDRCHG
| QE_MR_IAC_LOGADDR
);
1160 bus_space_write_multi_1(t
, mr
, QE_MRI_LADRF
, ladrp
, 8);
1161 bus_space_write_1(t
, mr
, QE_MRI_IAC
, 0);
1162 bus_space_write_1(t
, mr
, QE_MRI_MACCC
, maccc
);
1166 * Get current media settings.
1169 qe_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1171 struct qe_softc
*sc
= ifp
->if_softc
;
1172 bus_space_tag_t t
= sc
->sc_bustag
;
1173 bus_space_handle_t mr
= sc
->sc_mr
;
1176 #if defined(SUN4U) || defined(__GNUC__)
1179 v
= bus_space_read_1(t
, mr
, QE_MRI_PLSCC
);
1181 switch (bus_space_read_1(t
, mr
, QE_MRI_PLSCC
) & QE_MR_PLSCC_PORTMASK
) {
1182 case QE_MR_PLSCC_TP
:
1183 ifmr
->ifm_active
= IFM_ETHER
| IFM_10_T
;
1185 case QE_MR_PLSCC_AUI
:
1186 ifmr
->ifm_active
= IFM_ETHER
| IFM_10_5
;
1188 case QE_MR_PLSCC_GPSI
:
1189 case QE_MR_PLSCC_DAI
:
1194 v
= bus_space_read_1(t
, mr
, QE_MRI_PHYCC
);
1195 ifmr
->ifm_status
|= IFM_AVALID
;
1196 if ((v
& QE_MR_PHYCC_LNKFL
) != 0)
1197 ifmr
->ifm_status
&= ~IFM_ACTIVE
;
1199 ifmr
->ifm_status
|= IFM_ACTIVE
;
1204 * Set media options.
1207 qe_ifmedia_upd(struct ifnet
*ifp
)
1209 struct qe_softc
*sc
= ifp
->if_softc
;
1210 struct ifmedia
*ifm
= &sc
->sc_ifmedia
;
1211 bus_space_tag_t t
= sc
->sc_bustag
;
1212 bus_space_handle_t mr
= sc
->sc_mr
;
1213 int newmedia
= ifm
->ifm_media
;
1214 uint8_t plscc
, phycc
;
1216 #if defined(SUN4U) || defined(__GNUC__)
1219 if (IFM_TYPE(newmedia
) != IFM_ETHER
)
1222 plscc
= bus_space_read_1(t
, mr
, QE_MRI_PLSCC
) & ~QE_MR_PLSCC_PORTMASK
;
1223 phycc
= bus_space_read_1(t
, mr
, QE_MRI_PHYCC
) & ~QE_MR_PHYCC_ASEL
;
1225 if (IFM_SUBTYPE(newmedia
) == IFM_AUTO
)
1226 phycc
|= QE_MR_PHYCC_ASEL
;
1227 else if (IFM_SUBTYPE(newmedia
) == IFM_10_T
)
1228 plscc
|= QE_MR_PLSCC_TP
;
1229 else if (IFM_SUBTYPE(newmedia
) == IFM_10_5
)
1230 plscc
|= QE_MR_PLSCC_AUI
;
1232 bus_space_write_1(t
, mr
, QE_MRI_PLSCC
, plscc
);
1233 bus_space_write_1(t
, mr
, QE_MRI_PHYCC
, phycc
);