1 /* $Id: if_ae.c,v 1.16 2009/11/12 19:18:55 dyoung Exp $ */
3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
4 * Copyright (c) 2006 Garrett D'Amore.
7 * This code was written by Garrett D'Amore for the Champaign-Urbana
8 * Community Wireless Network Project.
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 * 3. All advertising materials mentioning features or use of this
20 * software must display the following acknowledgements:
21 * This product includes software developed by the Urbana-Champaign
22 * Independent Media Center.
23 * This product includes software developed by Garrett D'Amore.
24 * 4. Urbana-Champaign Independent Media Center's name and Garrett
25 * D'Amore's name may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
44 * All rights reserved.
46 * This code is derived from software contributed to The NetBSD Foundation
47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
48 * NASA Ames Research Center; and by Charles M. Hannum.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 * POSSIBILITY OF SUCH DAMAGE.
73 * Device driver for the onboard ethernet MAC found on the AR5312
76 * This device is very simliar to the tulip in most regards, and
77 * the code is directly derived from NetBSD's tulip.c. However, it
78 * is different enough that it did not seem to be a good idea to
79 * add further complexity to the tulip driver, so we have our own.
81 * Also tulip has a lot of complexity in it for various parts/options
82 * that we don't need, and on these little boxes with only ~8MB RAM, we
83 * don't want any extra bloat.
89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align
90 * inbound packets on a half-word boundary, which would make life easier
91 * for TCP/IP. (Aligning IP headers on a word.)
93 * 2) There is stuff in original tulip to shut down the device when reacting
94 * to a a change in link status. Is that needed.
96 * 3) Test with variety of 10/100 HDX/FDX scenarios.
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.15 2008/11/07 00:20:02 dyoung Exp $");
103 #include "bpfilter.h"
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/callout.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/socket.h>
112 #include <sys/ioctl.h>
113 #include <sys/errno.h>
114 #include <sys/device.h>
116 #include <machine/endian.h>
118 #include <uvm/uvm_extern.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_ether.h>
129 #include <machine/bus.h>
130 #include <machine/intr.h>
132 #include <dev/mii/mii.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/mii_bitbang.h>
136 #include <mips/atheros/include/arbusvar.h>
137 #include <mips/atheros/dev/aereg.h>
138 #include <mips/atheros/dev/aevar.h>
140 static const struct {
141 u_int32_t txth_opmode
; /* OPMODE bits */
142 const char *txth_name
; /* name of mode */
144 { OPMODE_TR_32
, "32 words" },
145 { OPMODE_TR_64
, "64 words" },
146 { OPMODE_TR_128
, "128 words" },
147 { OPMODE_TR_256
, "256 words" },
148 { OPMODE_SF
, "store and forward mode" },
152 static int ae_match(device_t
, struct cfdata
*, void *);
153 static void ae_attach(device_t
, device_t
, void *);
154 static int ae_detach(device_t
, int);
155 static int ae_activate(device_t
, enum devact
);
157 static int ae_ifflags_cb(struct ethercom
*);
158 static void ae_reset(struct ae_softc
*);
159 static void ae_idle(struct ae_softc
*, u_int32_t
);
161 static void ae_start(struct ifnet
*);
162 static void ae_watchdog(struct ifnet
*);
163 static int ae_ioctl(struct ifnet
*, u_long
, void *);
164 static int ae_init(struct ifnet
*);
165 static void ae_stop(struct ifnet
*, int);
167 static void ae_shutdown(void *);
169 static void ae_rxdrain(struct ae_softc
*);
170 static int ae_add_rxbuf(struct ae_softc
*, int);
172 static int ae_enable(struct ae_softc
*);
173 static void ae_disable(struct ae_softc
*);
174 static void ae_power(int, void *);
176 static void ae_filter_setup(struct ae_softc
*);
178 static int ae_intr(void *);
179 static void ae_rxintr(struct ae_softc
*);
180 static void ae_txintr(struct ae_softc
*);
182 static void ae_mii_tick(void *);
183 static void ae_mii_statchg(device_t
);
185 static int ae_mii_readreg(device_t
, int, int);
186 static void ae_mii_writereg(device_t
, int, int, int);
189 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
192 #define DPRINTF(sc, x) /* nothing */
196 static void ae_print_stats(struct ae_softc
*);
199 CFATTACH_DECL(ae
, sizeof(struct ae_softc
),
200 ae_match
, ae_attach
, ae_detach
, ae_activate
);
205 * Check for a device match.
208 ae_match(device_t parent
, struct cfdata
*cf
, void *aux
)
210 struct arbus_attach_args
*aa
= aux
;
212 if (strcmp(aa
->aa_name
, cf
->cf_name
) == 0)
222 * Attach an ae interface to the system.
225 ae_attach(device_t parent
, device_t self
, void *aux
)
227 const uint8_t *enaddr
;
229 struct ae_softc
*sc
= device_private(self
);
230 struct arbus_attach_args
*aa
= aux
;
231 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
234 callout_init(&sc
->sc_tick_callout
, 0);
236 printf(": Atheros AR531X 10/100 Ethernet\n");
239 * Try to get MAC address.
241 ea
= prop_dictionary_get(device_properties(&sc
->sc_dev
), "mac-addr");
243 printf("%s: unable to get mac-addr property\n",
244 sc
->sc_dev
.dv_xname
);
247 KASSERT(prop_object_type(ea
) == PROP_TYPE_DATA
);
248 KASSERT(prop_data_size(ea
) == ETHER_ADDR_LEN
);
249 enaddr
= prop_data_data_nocopy(ea
);
251 /* Announce ourselves. */
252 printf("%s: Ethernet address %s\n", sc
->sc_dev
.dv_xname
,
253 ether_sprintf(enaddr
));
255 sc
->sc_cirq
= aa
->aa_cirq
;
256 sc
->sc_mirq
= aa
->aa_mirq
;
257 sc
->sc_st
= aa
->aa_bst
;
258 sc
->sc_dmat
= aa
->aa_dmat
;
260 SIMPLEQ_INIT(&sc
->sc_txfreeq
);
261 SIMPLEQ_INIT(&sc
->sc_txdirtyq
);
266 sc
->sc_size
= aa
->aa_size
;
267 if ((error
= bus_space_map(sc
->sc_st
, aa
->aa_addr
, sc
->sc_size
, 0,
269 printf("%s: unable to map registers, error = %d\n",
270 sc
->sc_dev
.dv_xname
, error
);
275 * Allocate the control data structures, and create and load the
278 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
,
279 sizeof(struct ae_control_data
), PAGE_SIZE
, 0, &sc
->sc_cdseg
,
280 1, &sc
->sc_cdnseg
, 0)) != 0) {
281 printf("%s: unable to allocate control data, error = %d\n",
282 sc
->sc_dev
.dv_xname
, error
);
286 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &sc
->sc_cdseg
, sc
->sc_cdnseg
,
287 sizeof(struct ae_control_data
), (void **)&sc
->sc_control_data
,
288 BUS_DMA_COHERENT
)) != 0) {
289 printf("%s: unable to map control data, error = %d\n",
290 sc
->sc_dev
.dv_xname
, error
);
294 if ((error
= bus_dmamap_create(sc
->sc_dmat
,
295 sizeof(struct ae_control_data
), 1,
296 sizeof(struct ae_control_data
), 0, 0, &sc
->sc_cddmamap
)) != 0) {
297 printf("%s: unable to create control data DMA map, "
298 "error = %d\n", sc
->sc_dev
.dv_xname
, error
);
302 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cddmamap
,
303 sc
->sc_control_data
, sizeof(struct ae_control_data
), NULL
,
305 printf("%s: unable to load control data DMA map, error = %d\n",
306 sc
->sc_dev
.dv_xname
, error
);
311 * Create the transmit buffer DMA maps.
313 for (i
= 0; i
< AE_TXQUEUELEN
; i
++) {
314 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
315 AE_NTXSEGS
, MCLBYTES
, 0, 0,
316 &sc
->sc_txsoft
[i
].txs_dmamap
)) != 0) {
317 printf("%s: unable to create tx DMA map %d, "
318 "error = %d\n", sc
->sc_dev
.dv_xname
, i
, error
);
324 * Create the receive buffer DMA maps.
326 for (i
= 0; i
< AE_NRXDESC
; i
++) {
327 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
328 MCLBYTES
, 0, 0, &sc
->sc_rxsoft
[i
].rxs_dmamap
)) != 0) {
329 printf("%s: unable to create rx DMA map %d, "
330 "error = %d\n", sc
->sc_dev
.dv_xname
, i
, error
);
333 sc
->sc_rxsoft
[i
].rxs_mbuf
= NULL
;
337 * Reset the chip to a known state.
342 * From this point forward, the attachment cannot fail. A failure
343 * before this point releases all resources that may have been
346 sc
->sc_flags
|= AE_ATTACHED
;
349 * Initialize our media structures. This may probe the MII, if
352 sc
->sc_mii
.mii_ifp
= ifp
;
353 sc
->sc_mii
.mii_readreg
= ae_mii_readreg
;
354 sc
->sc_mii
.mii_writereg
= ae_mii_writereg
;
355 sc
->sc_mii
.mii_statchg
= ae_mii_statchg
;
356 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
357 ifmedia_init(&sc
->sc_mii
.mii_media
, 0, ether_mediachange
,
359 mii_attach(&sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
362 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
363 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
364 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
366 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
368 sc
->sc_tick
= ae_mii_tick
;
370 strcpy(ifp
->if_xname
, sc
->sc_dev
.dv_xname
);
372 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
373 sc
->sc_if_flags
= ifp
->if_flags
;
374 ifp
->if_ioctl
= ae_ioctl
;
375 ifp
->if_start
= ae_start
;
376 ifp
->if_watchdog
= ae_watchdog
;
377 ifp
->if_init
= ae_init
;
378 ifp
->if_stop
= ae_stop
;
379 IFQ_SET_READY(&ifp
->if_snd
);
382 * We can support 802.1Q VLAN-sized frames.
384 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
387 * Attach the interface.
390 ether_ifattach(ifp
, enaddr
);
391 ether_set_ifflags_cb(&sc
->sc_ethercom
, ae_ifflags_cb
);
394 rnd_attach_source(&sc
->sc_rnd_source
, sc
->sc_dev
.dv_xname
,
399 * Make sure the interface is shutdown during reboot.
401 sc
->sc_sdhook
= shutdownhook_establish(ae_shutdown
, sc
);
402 if (sc
->sc_sdhook
== NULL
)
403 printf("%s: WARNING: unable to establish shutdown hook\n",
404 sc
->sc_dev
.dv_xname
);
407 * Add a suspend hook to make sure we come back up after a
410 sc
->sc_powerhook
= powerhook_establish(sc
->sc_dev
.dv_xname
,
412 if (sc
->sc_powerhook
== NULL
)
413 printf("%s: WARNING: unable to establish power hook\n",
414 sc
->sc_dev
.dv_xname
);
418 * Free any resources we've allocated during the failed attach
419 * attempt. Do this in reverse order and fall through.
422 for (i
= 0; i
< AE_NRXDESC
; i
++) {
423 if (sc
->sc_rxsoft
[i
].rxs_dmamap
!= NULL
)
424 bus_dmamap_destroy(sc
->sc_dmat
,
425 sc
->sc_rxsoft
[i
].rxs_dmamap
);
428 for (i
= 0; i
< AE_TXQUEUELEN
; i
++) {
429 if (sc
->sc_txsoft
[i
].txs_dmamap
!= NULL
)
430 bus_dmamap_destroy(sc
->sc_dmat
,
431 sc
->sc_txsoft
[i
].txs_dmamap
);
433 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cddmamap
);
435 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cddmamap
);
437 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_control_data
,
438 sizeof(struct ae_control_data
));
440 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_cdseg
, sc
->sc_cdnseg
);
442 bus_space_unmap(sc
->sc_st
, sc
->sc_sh
, sc
->sc_size
);
450 * Handle device activation/deactivation requests.
453 ae_activate(device_t self
, enum devact act
)
455 struct ae_softc
*sc
= device_private(self
);
458 case DVACT_DEACTIVATE
:
459 if_deactivate(&sc
->sc_ethercom
.ec_if
);
469 * Detach a device interface.
472 ae_detach(device_t self
, int flags
)
474 struct ae_softc
*sc
= device_private(self
);
475 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
476 struct ae_rxsoft
*rxs
;
477 struct ae_txsoft
*txs
;
481 * Succeed now if there isn't any work to do.
483 if ((sc
->sc_flags
& AE_ATTACHED
) == 0)
486 /* Unhook our tick handler. */
488 callout_stop(&sc
->sc_tick_callout
);
490 /* Detach all PHYs */
491 mii_detach(&sc
->sc_mii
, MII_PHY_ANY
, MII_OFFSET_ANY
);
493 /* Delete all remaining media. */
494 ifmedia_delete_instance(&sc
->sc_mii
.mii_media
, IFM_INST_ANY
);
497 rnd_detach_source(&sc
->sc_rnd_source
);
502 for (i
= 0; i
< AE_NRXDESC
; i
++) {
503 rxs
= &sc
->sc_rxsoft
[i
];
504 if (rxs
->rxs_mbuf
!= NULL
) {
505 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
506 m_freem(rxs
->rxs_mbuf
);
507 rxs
->rxs_mbuf
= NULL
;
509 bus_dmamap_destroy(sc
->sc_dmat
, rxs
->rxs_dmamap
);
511 for (i
= 0; i
< AE_TXQUEUELEN
; i
++) {
512 txs
= &sc
->sc_txsoft
[i
];
513 if (txs
->txs_mbuf
!= NULL
) {
514 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
515 m_freem(txs
->txs_mbuf
);
516 txs
->txs_mbuf
= NULL
;
518 bus_dmamap_destroy(sc
->sc_dmat
, txs
->txs_dmamap
);
520 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cddmamap
);
521 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cddmamap
);
522 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_control_data
,
523 sizeof(struct ae_control_data
));
524 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_cdseg
, sc
->sc_cdnseg
);
526 shutdownhook_disestablish(sc
->sc_sdhook
);
527 powerhook_disestablish(sc
->sc_powerhook
);
529 bus_space_unmap(sc
->sc_st
, sc
->sc_sh
, sc
->sc_size
);
538 * Make sure the interface is stopped at reboot time.
541 ae_shutdown(void *arg
)
543 struct ae_softc
*sc
= arg
;
545 ae_stop(&sc
->sc_ethercom
.ec_if
, 1);
549 * ae_start: [ifnet interface function]
551 * Start packet transmission on the interface.
554 ae_start(struct ifnet
*ifp
)
556 struct ae_softc
*sc
= ifp
->if_softc
;
558 struct ae_txsoft
*txs
, *last_txs
= NULL
;
560 int error
, firsttx
, nexttx
, lasttx
= 1, ofree
, seg
;
562 DPRINTF(sc
, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
563 sc
->sc_dev
.dv_xname
, sc
->sc_flags
, ifp
->if_flags
));
566 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
570 * Remember the previous number of free descriptors and
571 * the first descriptor we'll use.
573 ofree
= sc
->sc_txfree
;
574 firsttx
= sc
->sc_txnext
;
576 DPRINTF(sc
, ("%s: ae_start: txfree %d, txnext %d\n",
577 sc
->sc_dev
.dv_xname
, ofree
, firsttx
));
580 * Loop through the send queue, setting up transmit descriptors
581 * until we drain the queue, or use up all available transmit
584 while ((txs
= SIMPLEQ_FIRST(&sc
->sc_txfreeq
)) != NULL
&&
585 sc
->sc_txfree
!= 0) {
587 * Grab a packet off the queue.
589 IFQ_POLL(&ifp
->if_snd
, m0
);
594 dmamap
= txs
->txs_dmamap
;
597 * Load the DMA map. If this fails, the packet either
598 * didn't fit in the alloted number of segments, or we were
599 * short on resources. In this case, we'll copy and try
602 if (((mtod(m0
, uintptr_t) & 3) != 0) ||
603 bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
604 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
) != 0) {
605 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
607 printf("%s: unable to allocate Tx mbuf\n",
608 sc
->sc_dev
.dv_xname
);
611 MCLAIM(m
, &sc
->sc_ethercom
.ec_tx_mowner
);
612 if (m0
->m_pkthdr
.len
> MHLEN
) {
613 MCLGET(m
, M_DONTWAIT
);
614 if ((m
->m_flags
& M_EXT
) == 0) {
615 printf("%s: unable to allocate Tx "
616 "cluster\n", sc
->sc_dev
.dv_xname
);
621 m_copydata(m0
, 0, m0
->m_pkthdr
.len
, mtod(m
, void *));
622 m
->m_pkthdr
.len
= m
->m_len
= m0
->m_pkthdr
.len
;
623 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
,
624 m
, BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
626 printf("%s: unable to load Tx buffer, "
627 "error = %d\n", sc
->sc_dev
.dv_xname
,
634 * Ensure we have enough descriptors free to describe
637 if (dmamap
->dm_nsegs
> sc
->sc_txfree
) {
639 * Not enough free descriptors to transmit this
640 * packet. We haven't committed to anything yet,
641 * so just unload the DMA map, put the packet
642 * back on the queue, and punt. Notify the upper
643 * layer that there are no more slots left.
645 * XXX We could allocate an mbuf and copy, but
646 * XXX it is worth it?
648 ifp
->if_flags
|= IFF_OACTIVE
;
649 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
655 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
662 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
665 /* Sync the DMA map. */
666 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
667 BUS_DMASYNC_PREWRITE
);
670 * Initialize the transmit descriptors.
672 for (nexttx
= sc
->sc_txnext
, seg
= 0;
673 seg
< dmamap
->dm_nsegs
;
674 seg
++, nexttx
= AE_NEXTTX(nexttx
)) {
676 * If this is the first descriptor we're
677 * enqueueing, don't set the OWN bit just
678 * yet. That could cause a race condition.
681 sc
->sc_txdescs
[nexttx
].ad_status
=
682 (nexttx
== firsttx
) ? 0 : ADSTAT_OWN
;
683 sc
->sc_txdescs
[nexttx
].ad_bufaddr1
=
684 dmamap
->dm_segs
[seg
].ds_addr
;
685 sc
->sc_txdescs
[nexttx
].ad_ctl
=
686 (dmamap
->dm_segs
[seg
].ds_len
<<
688 (nexttx
== (AE_NTXDESC
- 1) ?
693 KASSERT(lasttx
!= -1);
695 /* Set `first segment' and `last segment' appropriately. */
696 sc
->sc_txdescs
[sc
->sc_txnext
].ad_ctl
|= ADCTL_Tx_FS
;
697 sc
->sc_txdescs
[lasttx
].ad_ctl
|= ADCTL_Tx_LS
;
700 if (ifp
->if_flags
& IFF_DEBUG
) {
701 printf(" txsoft %p transmit chain:\n", txs
);
702 for (seg
= sc
->sc_txnext
;; seg
= AE_NEXTTX(seg
)) {
703 printf(" descriptor %d:\n", seg
);
704 printf(" ad_status: 0x%08x\n",
705 sc
->sc_txdescs
[seg
].ad_status
);
706 printf(" ad_ctl: 0x%08x\n",
707 sc
->sc_txdescs
[seg
].ad_ctl
);
708 printf(" ad_bufaddr1: 0x%08x\n",
709 sc
->sc_txdescs
[seg
].ad_bufaddr1
);
710 printf(" ad_bufaddr2: 0x%08x\n",
711 sc
->sc_txdescs
[seg
].ad_bufaddr2
);
718 /* Sync the descriptors we're using. */
719 AE_CDTXSYNC(sc
, sc
->sc_txnext
, dmamap
->dm_nsegs
,
720 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
723 * Store a pointer to the packet so we can free it later,
724 * and remember what txdirty will be once the packet is
728 txs
->txs_firstdesc
= sc
->sc_txnext
;
729 txs
->txs_lastdesc
= lasttx
;
730 txs
->txs_ndescs
= dmamap
->dm_nsegs
;
732 /* Advance the tx pointer. */
733 sc
->sc_txfree
-= dmamap
->dm_nsegs
;
734 sc
->sc_txnext
= nexttx
;
736 SIMPLEQ_REMOVE_HEAD(&sc
->sc_txfreeq
, txs_q
);
737 SIMPLEQ_INSERT_TAIL(&sc
->sc_txdirtyq
, txs
, txs_q
);
743 * Pass the packet to any BPF listeners.
746 bpf_mtap(ifp
->if_bpf
, m0
);
747 #endif /* NBPFILTER > 0 */
750 if (txs
== NULL
|| sc
->sc_txfree
== 0) {
751 /* No more slots left; notify upper layer. */
752 ifp
->if_flags
|= IFF_OACTIVE
;
755 if (sc
->sc_txfree
!= ofree
) {
756 DPRINTF(sc
, ("%s: packets enqueued, IC on %d, OWN on %d\n",
757 sc
->sc_dev
.dv_xname
, lasttx
, firsttx
));
759 * Cause a transmit interrupt to happen on the
760 * last packet we enqueued.
762 sc
->sc_txdescs
[lasttx
].ad_ctl
|= ADCTL_Tx_IC
;
763 AE_CDTXSYNC(sc
, lasttx
, 1,
764 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
767 * The entire packet chain is set up. Give the
768 * first descriptor to the chip now.
770 sc
->sc_txdescs
[firsttx
].ad_status
|= ADSTAT_OWN
;
771 AE_CDTXSYNC(sc
, firsttx
, 1,
772 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
774 /* Wake up the transmitter. */
775 /* XXX USE AUTOPOLLING? */
776 AE_WRITE(sc
, CSR_TXPOLL
, TXPOLL_TPD
);
779 /* Set a watchdog timer in case the chip flakes out. */
785 * ae_watchdog: [ifnet interface function]
787 * Watchdog timer handler.
790 ae_watchdog(struct ifnet
*ifp
)
792 struct ae_softc
*sc
= ifp
->if_softc
;
795 doing_transmit
= (! SIMPLEQ_EMPTY(&sc
->sc_txdirtyq
));
797 if (doing_transmit
) {
798 printf("%s: transmit timeout\n", sc
->sc_dev
.dv_xname
);
802 printf("%s: spurious watchdog timeout\n", sc
->sc_dev
.dv_xname
);
806 /* Try to get more packets going. */
810 /* If the interface is up and running, only modify the receive
811 * filter when changing to/from promiscuous mode. Otherwise return
812 * ENETRESET so that ether_ioctl will reset the chip.
815 ae_ifflags_cb(struct ethercom
*ec
)
817 struct ifnet
*ifp
= &ec
->ec_if
;
818 struct ae_softc
*sc
= ifp
->if_softc
;
819 int change
= ifp
->if_flags
^ sc
->sc_if_flags
;
821 if ((change
& ~(IFF_CANTCHANGE
|IFF_DEBUG
)) != 0)
823 else if ((change
& IFF_PROMISC
) != 0)
829 * ae_ioctl: [ifnet interface function]
831 * Handle control requests from the operator.
834 ae_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
836 struct ae_softc
*sc
= ifp
->if_softc
;
841 error
= ether_ioctl(ifp
, cmd
, data
);
842 if (error
== ENETRESET
) {
843 if (ifp
->if_flags
& IFF_RUNNING
) {
845 * Multicast list has changed. Set the
846 * hardware filter accordingly.
853 /* Try to get more packets going. */
854 if (AE_IS_ENABLED(sc
))
857 sc
->sc_if_flags
= ifp
->if_flags
;
865 * Interrupt service routine.
870 struct ae_softc
*sc
= arg
;
871 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
872 u_int32_t status
, rxstatus
, txstatus
;
873 int handled
= 0, txthresh
;
875 DPRINTF(sc
, ("%s: ae_intr\n", sc
->sc_dev
.dv_xname
));
878 if (AE_IS_ENABLED(sc
) == 0)
879 panic("%s: ae_intr: not enabled", sc
->sc_dev
.dv_xname
);
883 * If the interface isn't running, the interrupt couldn't
884 * possibly have come from us.
886 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 ||
887 !device_is_active(&sc
->sc_dev
)) {
888 printf("spurious?!?\n");
893 status
= AE_READ(sc
, CSR_STATUS
);
895 AE_WRITE(sc
, CSR_STATUS
, status
);
899 if ((status
& sc
->sc_inten
) == 0)
904 rxstatus
= status
& sc
->sc_rxint_mask
;
905 txstatus
= status
& sc
->sc_txint_mask
;
908 /* Grab new any new packets. */
911 if (rxstatus
& STATUS_RU
) {
912 printf("%s: receive ring overrun\n",
913 sc
->sc_dev
.dv_xname
);
914 /* Get the receive process going again. */
915 AE_WRITE(sc
, CSR_RXPOLL
, RXPOLL_RPD
);
922 /* Sweep up transmit descriptors. */
925 if (txstatus
& STATUS_TJT
)
926 printf("%s: transmit jabber timeout\n",
927 sc
->sc_dev
.dv_xname
);
929 if (txstatus
& STATUS_UNF
) {
931 * Increase our transmit threshold if
932 * another is available.
934 txthresh
= sc
->sc_txthresh
+ 1;
935 if (ae_txthresh
[txthresh
].txth_name
!= NULL
) {
937 /* Idle the transmit process. */
938 opmode
= AE_READ(sc
, CSR_OPMODE
);
939 ae_idle(sc
, OPMODE_ST
);
941 sc
->sc_txthresh
= txthresh
;
943 ~(OPMODE_TR
|OPMODE_SF
);
945 ae_txthresh
[txthresh
].txth_opmode
;
946 printf("%s: transmit underrun; new "
949 ae_txthresh
[txthresh
].txth_name
);
952 * Set the new threshold and restart
953 * the transmit process.
955 AE_WRITE(sc
, CSR_OPMODE
, opmode
);
959 * XXX Log every Nth underrun from
965 if (status
& (STATUS_TPS
|STATUS_RPS
)) {
966 if (status
& STATUS_TPS
)
967 printf("%s: transmit process stopped\n",
968 sc
->sc_dev
.dv_xname
);
969 if (status
& STATUS_RPS
)
970 printf("%s: receive process stopped\n",
971 sc
->sc_dev
.dv_xname
);
976 if (status
& STATUS_SE
) {
979 if (status
& STATUS_TX_ABORT
)
981 else if (status
& STATUS_RX_ABORT
)
984 str
= "unknown error";
986 printf("%s: fatal system error: %s\n",
987 sc
->sc_dev
.dv_xname
, str
);
995 * Transmit buffer unavailable -- normal
996 * condition, nothing to do, really.
998 * General purpose timer experied -- we don't
999 * use the general purpose timer.
1001 * Early receive interrupt -- not available on
1002 * all chips, we just use RI. We also only
1003 * use single-segment receive DMA, so this
1004 * is mostly useless.
1008 /* Try to get more packets going. */
1013 rnd_add_uint32(&sc
->sc_rnd_source
, status
);
1021 * Helper; handle receive interrupts.
1024 ae_rxintr(struct ae_softc
*sc
)
1026 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1027 struct ether_header
*eh
;
1028 struct ae_rxsoft
*rxs
;
1033 for (i
= sc
->sc_rxptr
;; i
= AE_NEXTRX(i
)) {
1034 rxs
= &sc
->sc_rxsoft
[i
];
1037 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1039 rxstat
= sc
->sc_rxdescs
[i
].ad_status
;
1041 if (rxstat
& ADSTAT_OWN
) {
1043 * We have processed all of the receive buffers.
1049 * If any collisions were seen on the wire, count one.
1051 if (rxstat
& ADSTAT_Rx_CS
)
1052 ifp
->if_collisions
++;
1055 * If an error occurred, update stats, clear the status
1056 * word, and leave the packet buffer in place. It will
1057 * simply be reused the next time the ring comes around.
1058 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long
1061 if (rxstat
& ADSTAT_ES
&&
1062 ((sc
->sc_ethercom
.ec_capenable
& ETHERCAP_VLAN_MTU
) == 0 ||
1063 (rxstat
& (ADSTAT_Rx_DE
| ADSTAT_Rx_RF
|
1064 ADSTAT_Rx_DB
| ADSTAT_Rx_CE
)) != 0)) {
1065 #define PRINTERR(bit, str) \
1066 if (rxstat & (bit)) \
1067 printf("%s: receive error: %s\n", \
1068 sc->sc_dev.dv_xname, str)
1070 PRINTERR(ADSTAT_Rx_DE
, "descriptor error");
1071 PRINTERR(ADSTAT_Rx_RF
, "runt frame");
1072 PRINTERR(ADSTAT_Rx_TL
, "frame too long");
1073 PRINTERR(ADSTAT_Rx_RE
, "MII error");
1074 PRINTERR(ADSTAT_Rx_DB
, "dribbling bit");
1075 PRINTERR(ADSTAT_Rx_CE
, "CRC error");
1077 AE_INIT_RXDESC(sc
, i
);
1081 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1082 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
1085 * No errors; receive the packet. Note the chip
1086 * includes the CRC with every packet.
1088 len
= ADSTAT_Rx_LENGTH(rxstat
) - ETHER_CRC_LEN
;
1091 * XXX: the Atheros part can align on half words. what
1092 * is the performance implication of this? Probably
1093 * minimal, and we should use it...
1095 #ifdef __NO_STRICT_ALIGNMENT
1097 * Allocate a new mbuf cluster. If that fails, we are
1098 * out of memory, and must drop the packet and recycle
1099 * the buffer that's already attached to this descriptor.
1102 if (ae_add_rxbuf(sc
, i
) != 0) {
1104 AE_INIT_RXDESC(sc
, i
);
1105 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1106 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1111 * The chip's receive buffers must be 4-byte aligned.
1112 * But this means that the data after the Ethernet header
1113 * is misaligned. We must allocate a new buffer and
1114 * copy the data, shifted forward 2 bytes.
1116 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1120 AE_INIT_RXDESC(sc
, i
);
1121 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1122 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1125 MCLAIM(m
, &sc
->sc_ethercom
.ec_rx_mowner
);
1126 if (len
> (MHLEN
- 2)) {
1127 MCLGET(m
, M_DONTWAIT
);
1128 if ((m
->m_flags
& M_EXT
) == 0) {
1136 * Note that we use clusters for incoming frames, so the
1137 * buffer is virtually contiguous.
1139 memcpy(mtod(m
, void *), mtod(rxs
->rxs_mbuf
, void *), len
);
1141 /* Allow the receive descriptor to continue using its mbuf. */
1142 AE_INIT_RXDESC(sc
, i
);
1143 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1144 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1145 #endif /* __NO_STRICT_ALIGNMENT */
1148 eh
= mtod(m
, struct ether_header
*);
1149 m
->m_pkthdr
.rcvif
= ifp
;
1150 m
->m_pkthdr
.len
= m
->m_len
= len
;
1154 * Pass this up to any BPF listeners, but only
1155 * pass it up the stack if its for us.
1158 bpf_mtap(ifp
->if_bpf
, m
);
1159 #endif /* NBPFILTER > 0 */
1162 (*ifp
->if_input
)(ifp
, m
);
1165 /* Update the receive pointer. */
1172 * Helper; handle transmit interrupts.
1175 ae_txintr(struct ae_softc
*sc
)
1177 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1178 struct ae_txsoft
*txs
;
1181 DPRINTF(sc
, ("%s: ae_txintr: sc_flags 0x%08x\n",
1182 sc
->sc_dev
.dv_xname
, sc
->sc_flags
));
1184 ifp
->if_flags
&= ~IFF_OACTIVE
;
1187 * Go through our Tx list and free mbufs for those
1188 * frames that have been transmitted.
1190 while ((txs
= SIMPLEQ_FIRST(&sc
->sc_txdirtyq
)) != NULL
) {
1191 AE_CDTXSYNC(sc
, txs
->txs_lastdesc
,
1193 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1196 if (ifp
->if_flags
& IFF_DEBUG
) {
1198 printf(" txsoft %p transmit chain:\n", txs
);
1199 for (i
= txs
->txs_firstdesc
;; i
= AE_NEXTTX(i
)) {
1200 printf(" descriptor %d:\n", i
);
1201 printf(" ad_status: 0x%08x\n",
1202 sc
->sc_txdescs
[i
].ad_status
);
1203 printf(" ad_ctl: 0x%08x\n",
1204 sc
->sc_txdescs
[i
].ad_ctl
);
1205 printf(" ad_bufaddr1: 0x%08x\n",
1206 sc
->sc_txdescs
[i
].ad_bufaddr1
);
1207 printf(" ad_bufaddr2: 0x%08x\n",
1208 sc
->sc_txdescs
[i
].ad_bufaddr2
);
1209 if (i
== txs
->txs_lastdesc
)
1215 txstat
= sc
->sc_txdescs
[txs
->txs_lastdesc
].ad_status
;
1216 if (txstat
& ADSTAT_OWN
)
1219 SIMPLEQ_REMOVE_HEAD(&sc
->sc_txdirtyq
, txs_q
);
1221 sc
->sc_txfree
+= txs
->txs_ndescs
;
1223 bus_dmamap_sync(sc
->sc_dmat
, txs
->txs_dmamap
,
1224 0, txs
->txs_dmamap
->dm_mapsize
,
1225 BUS_DMASYNC_POSTWRITE
);
1226 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
1227 m_freem(txs
->txs_mbuf
);
1228 txs
->txs_mbuf
= NULL
;
1230 SIMPLEQ_INSERT_TAIL(&sc
->sc_txfreeq
, txs
, txs_q
);
1233 * Check for errors and collisions.
1236 if (txstat
& ADSTAT_Tx_UF
)
1237 sc
->sc_stats
.ts_tx_uf
++;
1238 if (txstat
& ADSTAT_Tx_TO
)
1239 sc
->sc_stats
.ts_tx_to
++;
1240 if (txstat
& ADSTAT_Tx_EC
)
1241 sc
->sc_stats
.ts_tx_ec
++;
1242 if (txstat
& ADSTAT_Tx_LC
)
1243 sc
->sc_stats
.ts_tx_lc
++;
1246 if (txstat
& (ADSTAT_Tx_UF
|ADSTAT_Tx_TO
))
1249 if (txstat
& ADSTAT_Tx_EC
)
1250 ifp
->if_collisions
+= 16;
1252 ifp
->if_collisions
+= ADSTAT_Tx_COLLISIONS(txstat
);
1253 if (txstat
& ADSTAT_Tx_LC
)
1254 ifp
->if_collisions
++;
1260 * If there are no more pending transmissions, cancel the watchdog
1269 ae_print_stats(struct ae_softc
*sc
)
1272 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n",
1273 sc
->sc_dev
.dv_xname
,
1274 sc
->sc_stats
.ts_tx_uf
, sc
->sc_stats
.ts_tx_to
,
1275 sc
->sc_stats
.ts_tx_ec
, sc
->sc_stats
.ts_tx_lc
);
1282 * Perform a soft reset on the chip.
1285 ae_reset(struct ae_softc
*sc
)
1289 AE_WRITE(sc
, CSR_BUSMODE
, BUSMODE_SWR
);
1293 * The chip doesn't take itself out of reset automatically.
1294 * We need to do so after 2us.
1297 AE_WRITE(sc
, CSR_BUSMODE
, 0);
1300 for (i
= 0; i
< 1000; i
++) {
1302 * Wait a bit for the reset to complete before peeking
1303 * at the chip again.
1306 if (AE_ISSET(sc
, CSR_BUSMODE
, BUSMODE_SWR
) == 0)
1310 if (AE_ISSET(sc
, CSR_BUSMODE
, BUSMODE_SWR
))
1311 printf("%s: reset failed to complete\n", sc
->sc_dev
.dv_xname
);
1317 * ae_init: [ ifnet interface function ]
1319 * Initialize the interface. Must be called at splnet().
1322 ae_init(struct ifnet
*ifp
)
1324 struct ae_softc
*sc
= ifp
->if_softc
;
1325 struct ae_txsoft
*txs
;
1326 struct ae_rxsoft
*rxs
;
1327 const uint8_t *enaddr
;
1330 if ((error
= ae_enable(sc
)) != 0)
1334 * Cancel any pending I/O.
1339 * Reset the chip to a known state.
1344 * Initialize the BUSMODE register.
1346 AE_WRITE(sc
, CSR_BUSMODE
,
1347 /* XXX: not sure if this is a good thing or not... */
1348 //BUSMODE_ALIGN_16B |
1349 BUSMODE_BAR
| BUSMODE_BLE
| BUSMODE_PBL_4LW
);
1353 * Initialize the transmit descriptor ring.
1355 memset(sc
->sc_txdescs
, 0, sizeof(sc
->sc_txdescs
));
1356 for (i
= 0; i
< AE_NTXDESC
; i
++) {
1357 sc
->sc_txdescs
[i
].ad_ctl
= 0;
1358 sc
->sc_txdescs
[i
].ad_bufaddr2
=
1359 AE_CDTXADDR(sc
, AE_NEXTTX(i
));
1361 sc
->sc_txdescs
[AE_NTXDESC
- 1].ad_ctl
|= ADCTL_ER
;
1362 AE_CDTXSYNC(sc
, 0, AE_NTXDESC
,
1363 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1364 sc
->sc_txfree
= AE_NTXDESC
;
1368 * Initialize the transmit job descriptors.
1370 SIMPLEQ_INIT(&sc
->sc_txfreeq
);
1371 SIMPLEQ_INIT(&sc
->sc_txdirtyq
);
1372 for (i
= 0; i
< AE_TXQUEUELEN
; i
++) {
1373 txs
= &sc
->sc_txsoft
[i
];
1374 txs
->txs_mbuf
= NULL
;
1375 SIMPLEQ_INSERT_TAIL(&sc
->sc_txfreeq
, txs
, txs_q
);
1379 * Initialize the receive descriptor and receive job
1382 for (i
= 0; i
< AE_NRXDESC
; i
++) {
1383 rxs
= &sc
->sc_rxsoft
[i
];
1384 if (rxs
->rxs_mbuf
== NULL
) {
1385 if ((error
= ae_add_rxbuf(sc
, i
)) != 0) {
1386 printf("%s: unable to allocate or map rx "
1387 "buffer %d, error = %d\n",
1388 sc
->sc_dev
.dv_xname
, i
, error
);
1390 * XXX Should attempt to run with fewer receive
1391 * XXX buffers instead of just failing.
1397 AE_INIT_RXDESC(sc
, i
);
1402 * Initialize the interrupt mask and enable interrupts.
1404 /* normal interrupts */
1405 sc
->sc_inten
= STATUS_TI
| STATUS_TU
| STATUS_RI
| STATUS_NIS
;
1407 /* abnormal interrupts */
1408 sc
->sc_inten
|= STATUS_TPS
| STATUS_TJT
| STATUS_UNF
|
1409 STATUS_RU
| STATUS_RPS
| STATUS_SE
| STATUS_AIS
;
1411 sc
->sc_rxint_mask
= STATUS_RI
|STATUS_RU
;
1412 sc
->sc_txint_mask
= STATUS_TI
|STATUS_UNF
|STATUS_TJT
;
1414 sc
->sc_rxint_mask
&= sc
->sc_inten
;
1415 sc
->sc_txint_mask
&= sc
->sc_inten
;
1417 AE_WRITE(sc
, CSR_INTEN
, sc
->sc_inten
);
1418 AE_WRITE(sc
, CSR_STATUS
, 0xffffffff);
1421 * Give the transmit and receive rings to the chip.
1423 AE_WRITE(sc
, CSR_TXLIST
, AE_CDTXADDR(sc
, sc
->sc_txnext
));
1424 AE_WRITE(sc
, CSR_RXLIST
, AE_CDRXADDR(sc
, sc
->sc_rxptr
));
1428 * Set the station address.
1430 enaddr
= CLLADDR(ifp
->if_sadl
);
1431 AE_WRITE(sc
, CSR_MACHI
, enaddr
[5] << 16 | enaddr
[4]);
1432 AE_WRITE(sc
, CSR_MACLO
, enaddr
[3] << 24 | enaddr
[2] << 16 |
1433 enaddr
[1] << 8 | enaddr
[0]);
1437 * Set the receive filter. This will start the transmit and
1438 * receive processes.
1440 ae_filter_setup(sc
);
1443 * Set the current media.
1445 if ((error
= ether_mediachange(ifp
)) != 0)
1451 AE_SET(sc
, CSR_MACCTL
, MACCTL_RE
| MACCTL_TE
);
1455 * Write out the opmode.
1457 AE_WRITE(sc
, CSR_OPMODE
, OPMODE_SR
| OPMODE_ST
|
1458 ae_txthresh
[sc
->sc_txthresh
].txth_opmode
);
1460 * Start the receive process.
1462 AE_WRITE(sc
, CSR_RXPOLL
, RXPOLL_RPD
);
1465 if (sc
->sc_tick
!= NULL
) {
1466 /* Start the one second clock. */
1467 callout_reset(&sc
->sc_tick_callout
, hz
>> 3, sc
->sc_tick
, sc
);
1471 * Note that the interface is now running.
1473 ifp
->if_flags
|= IFF_RUNNING
;
1474 ifp
->if_flags
&= ~IFF_OACTIVE
;
1475 sc
->sc_if_flags
= ifp
->if_flags
;
1479 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1481 printf("%s: interface not running\n", sc
->sc_dev
.dv_xname
);
1492 ae_enable(struct ae_softc
*sc
)
1495 if (AE_IS_ENABLED(sc
) == 0) {
1496 sc
->sc_ih
= arbus_intr_establish(sc
->sc_cirq
, sc
->sc_mirq
,
1498 if (sc
->sc_ih
== NULL
) {
1499 printf("%s: unable to establish interrupt\n",
1500 sc
->sc_dev
.dv_xname
);
1503 sc
->sc_flags
|= AE_ENABLED
;
1514 ae_disable(struct ae_softc
*sc
)
1517 if (AE_IS_ENABLED(sc
)) {
1518 arbus_intr_disestablish(sc
->sc_ih
);
1519 sc
->sc_flags
&= ~AE_ENABLED
;
1526 * Power management (suspend/resume) hook.
1529 ae_power(int why
, void *arg
)
1531 struct ae_softc
*sc
= arg
;
1532 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1535 printf("power called: %d, %x\n", why
, (uint32_t)arg
);
1546 if (ifp
->if_flags
& IFF_UP
) {
1551 case PWR_SOFTSUSPEND
:
1552 case PWR_SOFTSTANDBY
:
1553 case PWR_SOFTRESUME
:
1562 * Drain the receive queue.
1565 ae_rxdrain(struct ae_softc
*sc
)
1567 struct ae_rxsoft
*rxs
;
1570 for (i
= 0; i
< AE_NRXDESC
; i
++) {
1571 rxs
= &sc
->sc_rxsoft
[i
];
1572 if (rxs
->rxs_mbuf
!= NULL
) {
1573 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
1574 m_freem(rxs
->rxs_mbuf
);
1575 rxs
->rxs_mbuf
= NULL
;
1581 * ae_stop: [ ifnet interface function ]
1583 * Stop transmission on the interface.
1586 ae_stop(struct ifnet
*ifp
, int disable
)
1588 struct ae_softc
*sc
= ifp
->if_softc
;
1589 struct ae_txsoft
*txs
;
1591 if (sc
->sc_tick
!= NULL
) {
1592 /* Stop the one second clock. */
1593 callout_stop(&sc
->sc_tick_callout
);
1597 mii_down(&sc
->sc_mii
);
1599 /* Disable interrupts. */
1600 AE_WRITE(sc
, CSR_INTEN
, 0);
1602 /* Stop the transmit and receive processes. */
1603 AE_WRITE(sc
, CSR_OPMODE
, 0);
1604 AE_WRITE(sc
, CSR_RXLIST
, 0);
1605 AE_WRITE(sc
, CSR_TXLIST
, 0);
1606 AE_CLR(sc
, CSR_MACCTL
, MACCTL_TE
| MACCTL_RE
);
1610 * Release any queued transmit buffers.
1612 while ((txs
= SIMPLEQ_FIRST(&sc
->sc_txdirtyq
)) != NULL
) {
1613 SIMPLEQ_REMOVE_HEAD(&sc
->sc_txdirtyq
, txs_q
);
1614 if (txs
->txs_mbuf
!= NULL
) {
1615 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
1616 m_freem(txs
->txs_mbuf
);
1617 txs
->txs_mbuf
= NULL
;
1619 SIMPLEQ_INSERT_TAIL(&sc
->sc_txfreeq
, txs
, txs_q
);
1623 * Mark the interface down and cancel the watchdog timer.
1625 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1626 sc
->sc_if_flags
= ifp
->if_flags
;
1635 * Reset the chip (needed on some flavors to actually disable it).
1643 * Add a receive buffer to the indicated descriptor.
1646 ae_add_rxbuf(struct ae_softc
*sc
, int idx
)
1648 struct ae_rxsoft
*rxs
= &sc
->sc_rxsoft
[idx
];
1652 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1656 MCLAIM(m
, &sc
->sc_ethercom
.ec_rx_mowner
);
1657 MCLGET(m
, M_DONTWAIT
);
1658 if ((m
->m_flags
& M_EXT
) == 0) {
1663 if (rxs
->rxs_mbuf
!= NULL
)
1664 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
1668 error
= bus_dmamap_load(sc
->sc_dmat
, rxs
->rxs_dmamap
,
1669 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
,
1670 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
1672 printf("%s: can't load rx DMA map %d, error = %d\n",
1673 sc
->sc_dev
.dv_xname
, idx
, error
);
1674 panic("ae_add_rxbuf"); /* XXX */
1677 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1678 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1680 AE_INIT_RXDESC(sc
, idx
);
1688 * Set the chip's receive filter.
1691 ae_filter_setup(struct ae_softc
*sc
)
1693 struct ethercom
*ec
= &sc
->sc_ethercom
;
1694 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1695 struct ether_multi
*enm
;
1696 struct ether_multistep step
;
1697 uint32_t hash
, mchash
[2];
1698 uint32_t macctl
= 0;
1701 * If the chip is running, we need to reset the interface,
1702 * and will revisit here (with IFF_RUNNING) clear. The
1703 * chip seems to really not like to have its multicast
1704 * filter programmed without a reset.
1706 if (ifp
->if_flags
& IFF_RUNNING
) {
1707 (void) ae_init(ifp
);
1711 DPRINTF(sc
, ("%s: ae_filter_setup: sc_flags 0x%08x\n",
1712 sc
->sc_dev
.dv_xname
, sc
->sc_flags
));
1714 macctl
= AE_READ(sc
, CSR_MACCTL
);
1715 macctl
&= ~(MACCTL_PR
| MACCTL_PM
);
1716 macctl
|= MACCTL_HASH
;
1717 macctl
|= MACCTL_HBD
;
1718 macctl
|= MACCTL_PR
;
1720 if (ifp
->if_flags
& IFF_PROMISC
) {
1721 macctl
|= MACCTL_PR
;
1725 mchash
[0] = mchash
[1] = 0;
1727 ETHER_FIRST_MULTI(step
, ec
, enm
);
1728 while (enm
!= NULL
) {
1729 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
1731 * We must listen to a range of multicast addresses.
1732 * For now, just accept all multicasts, rather than
1733 * trying to set only those filter bits needed to match
1734 * the range. (At this time, the only use of address
1735 * ranges is for IP multicast routing, for which the
1736 * range is big enough to require all bits set.)
1741 /* Verify whether we use big or little endian hashes */
1742 hash
= ether_crc32_be(enm
->enm_addrlo
, ETHER_ADDR_LEN
) & 0x3f;
1743 mchash
[hash
>> 5] |= 1 << (hash
& 0x1f);
1744 ETHER_NEXT_MULTI(step
, enm
);
1746 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1750 ifp
->if_flags
|= IFF_ALLMULTI
;
1751 mchash
[0] = mchash
[1] = 0xffffffff;
1752 macctl
|= MACCTL_PM
;
1755 AE_WRITE(sc
, CSR_HTHI
, mchash
[0]);
1756 AE_WRITE(sc
, CSR_HTHI
, mchash
[1]);
1758 AE_WRITE(sc
, CSR_MACCTL
, macctl
);
1761 DPRINTF(sc
, ("%s: ae_filter_setup: returning %x\n",
1762 sc
->sc_dev
.dv_xname
, macctl
));
1768 * Cause the transmit and/or receive processes to go idle.
1771 ae_idle(struct ae_softc
*sc
, u_int32_t bits
)
1773 static const char * const txstate_names
[] = {
1777 "RUNNING - READING",
1783 static const char * const rxstate_names
[] = {
1794 u_int32_t csr
, ackmask
= 0;
1797 if (bits
& OPMODE_ST
)
1798 ackmask
|= STATUS_TPS
;
1800 if (bits
& OPMODE_SR
)
1801 ackmask
|= STATUS_RPS
;
1803 AE_CLR(sc
, CSR_OPMODE
, bits
);
1805 for (i
= 0; i
< 1000; i
++) {
1806 if (AE_ISSET(sc
, CSR_STATUS
, ackmask
) == ackmask
)
1811 csr
= AE_READ(sc
, CSR_STATUS
);
1812 if ((csr
& ackmask
) != ackmask
) {
1813 if ((bits
& OPMODE_ST
) != 0 && (csr
& STATUS_TPS
) == 0 &&
1814 (csr
& STATUS_TS
) != STATUS_TS_STOPPED
) {
1815 printf("%s: transmit process failed to idle: "
1816 "state %s\n", sc
->sc_dev
.dv_xname
,
1817 txstate_names
[(csr
& STATUS_TS
) >> 20]);
1819 if ((bits
& OPMODE_SR
) != 0 && (csr
& STATUS_RPS
) == 0 &&
1820 (csr
& STATUS_RS
) != STATUS_RS_STOPPED
) {
1821 printf("%s: receive process failed to idle: "
1822 "state %s\n", sc
->sc_dev
.dv_xname
,
1823 rxstate_names
[(csr
& STATUS_RS
) >> 17]);
1828 /*****************************************************************************
1829 * Support functions for MII-attached media.
1830 *****************************************************************************/
1835 * One second timer, used to tick the MII.
1838 ae_mii_tick(void *arg
)
1840 struct ae_softc
*sc
= arg
;
1843 if (!device_is_active(&sc
->sc_dev
))
1847 mii_tick(&sc
->sc_mii
);
1850 callout_reset(&sc
->sc_tick_callout
, hz
, sc
->sc_tick
, sc
);
1854 * ae_mii_statchg: [mii interface function]
1856 * Callback from PHY when media changes.
1859 ae_mii_statchg(device_t self
)
1861 struct ae_softc
*sc
= device_private(self
);
1862 uint32_t macctl
, flowc
;
1864 //opmode = AE_READ(sc, CSR_OPMODE);
1865 macctl
= AE_READ(sc
, CSR_MACCTL
);
1867 /* XXX: do we need to do this? */
1868 /* Idle the transmit and receive processes. */
1869 //ae_idle(sc, OPMODE_ST|OPMODE_SR);
1871 if (sc
->sc_mii
.mii_media_active
& IFM_FDX
) {
1873 macctl
&= ~MACCTL_DRO
;
1874 macctl
|= MACCTL_FDX
;
1876 flowc
= 0; /* cannot do flow control in HDX */
1877 macctl
|= MACCTL_DRO
;
1878 macctl
&= ~MACCTL_FDX
;
1881 AE_WRITE(sc
, CSR_FLOWC
, flowc
);
1882 AE_WRITE(sc
, CSR_MACCTL
, macctl
);
1884 /* restore operational mode */
1885 //AE_WRITE(sc, CSR_OPMODE, opmode);
1892 * Read a PHY register.
1895 ae_mii_readreg(device_t self
, int phy
, int reg
)
1897 struct ae_softc
*sc
= device_private(self
);
1901 addr
= (phy
<< MIIADDR_PHY_SHIFT
) | (reg
<< MIIADDR_REG_SHIFT
);
1902 AE_WRITE(sc
, CSR_MIIADDR
, addr
);
1904 for (i
= 0; i
< 100000000; i
++) {
1905 if ((AE_READ(sc
, CSR_MIIADDR
) & MIIADDR_BUSY
) == 0)
1909 return (AE_READ(sc
, CSR_MIIDATA
) & 0xffff);
1915 * Write a PHY register.
1918 ae_mii_writereg(device_t self
, int phy
, int reg
, int val
)
1920 struct ae_softc
*sc
= device_private(self
);
1924 /* write the data register */
1925 AE_WRITE(sc
, CSR_MIIDATA
, val
);
1927 /* write the address to latch it in */
1928 addr
= (phy
<< MIIADDR_PHY_SHIFT
) | (reg
<< MIIADDR_REG_SHIFT
) |
1930 AE_WRITE(sc
, CSR_MIIADDR
, addr
);
1933 for (i
= 0; i
< 100000000; i
++) {
1934 if ((AE_READ(sc
, CSR_MIIADDR
) & MIIADDR_BUSY
) == 0)