Sync usage with man page.
[netbsd-mini2440.git] / sys / dev / ic / aic6915.c
blobda7d3909b62483cb64a1e6dfe04ebd22ef951aef
1 /* $NetBSD: aic6915.c,v 1.24 2009/05/12 14:25:17 cegger Exp $ */
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.24 2009/05/12 14:25:17 cegger Exp $");
40 #include "bpfilter.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
53 #include <uvm/uvm_extern.h>
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #endif
64 #include <sys/bus.h>
65 #include <sys/intr.h>
67 #include <dev/mii/miivar.h>
69 #include <dev/ic/aic6915reg.h>
70 #include <dev/ic/aic6915var.h>
72 static void sf_start(struct ifnet *);
73 static void sf_watchdog(struct ifnet *);
74 static int sf_ioctl(struct ifnet *, u_long, void *);
75 static int sf_init(struct ifnet *);
76 static void sf_stop(struct ifnet *, int);
78 static bool sf_shutdown(device_t, int);
80 static void sf_txintr(struct sf_softc *);
81 static void sf_rxintr(struct sf_softc *);
82 static void sf_stats_update(struct sf_softc *);
84 static void sf_reset(struct sf_softc *);
85 static void sf_macreset(struct sf_softc *);
86 static void sf_rxdrain(struct sf_softc *);
87 static int sf_add_rxbuf(struct sf_softc *, int);
88 static uint8_t sf_read_eeprom(struct sf_softc *, int);
89 static void sf_set_filter(struct sf_softc *);
91 static int sf_mii_read(device_t, int, int);
92 static void sf_mii_write(device_t, int, int, int);
93 static void sf_mii_statchg(device_t);
95 static void sf_tick(void *);
97 #define sf_funcreg_read(sc, reg) \
98 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
99 #define sf_funcreg_write(sc, reg, val) \
100 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
102 static inline uint32_t
103 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
106 if (__predict_false(sc->sc_iomapped)) {
107 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
108 reg);
109 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
110 SF_IndirectIoDataPort));
113 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
116 static inline void
117 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
120 if (__predict_false(sc->sc_iomapped)) {
121 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
122 reg);
123 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
124 val);
125 return;
128 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
131 #define sf_genreg_read(sc, reg) \
132 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
133 #define sf_genreg_write(sc, reg, val) \
134 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
137 * sf_attach:
139 * Attach a Starfire interface to the system.
141 void
142 sf_attach(struct sf_softc *sc)
144 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
145 int i, rseg, error;
146 bus_dma_segment_t seg;
147 u_int8_t enaddr[ETHER_ADDR_LEN];
149 callout_init(&sc->sc_tick_callout, 0);
152 * If we're I/O mapped, the functional register handle is
153 * the same as the base handle. If we're memory mapped,
154 * carve off a chunk of the register space for the functional
155 * registers, to save on arithmetic later.
157 if (sc->sc_iomapped)
158 sc->sc_sh_func = sc->sc_sh;
159 else {
160 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
161 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
162 aprint_error_dev(&sc->sc_dev, "unable to sub-region functional "
163 "registers, error = %d\n",
164 error);
165 return;
170 * Initialize the transmit threshold for this interface. The
171 * manual describes the default as 4 * 16 bytes. We start out
172 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
173 * several platforms.
175 sc->sc_txthresh = 10;
178 * Allocate the control data structures, and create and load the
179 * DMA map for it.
181 if ((error = bus_dmamem_alloc(sc->sc_dmat,
182 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
183 BUS_DMA_NOWAIT)) != 0) {
184 aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n",
185 error);
186 goto fail_0;
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
191 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
192 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
193 error);
194 goto fail_1;
197 if ((error = bus_dmamap_create(sc->sc_dmat,
198 sizeof(struct sf_control_data), 1,
199 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
200 &sc->sc_cddmamap)) != 0) {
201 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
202 "error = %d\n", error);
203 goto fail_2;
206 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
207 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
208 BUS_DMA_NOWAIT)) != 0) {
209 aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n",
210 error);
211 goto fail_3;
215 * Create the transmit buffer DMA maps.
217 for (i = 0; i < SF_NTXDESC; i++) {
218 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
219 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
220 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
221 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
222 "error = %d\n", i, error);
223 goto fail_4;
228 * Create the receive buffer DMA maps.
230 for (i = 0; i < SF_NRXDESC; i++) {
231 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
232 MCLBYTES, 0, BUS_DMA_NOWAIT,
233 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
234 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
235 "error = %d\n", i, error);
236 goto fail_5;
241 * Reset the chip to a known state.
243 sf_reset(sc);
246 * Read the Ethernet address from the EEPROM.
248 for (i = 0; i < ETHER_ADDR_LEN; i++)
249 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
251 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
252 ether_sprintf(enaddr));
254 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
255 printf("%s: 64-bit PCI slot detected\n", device_xname(&sc->sc_dev));
258 * Initialize our media structures and probe the MII.
260 sc->sc_mii.mii_ifp = ifp;
261 sc->sc_mii.mii_readreg = sf_mii_read;
262 sc->sc_mii.mii_writereg = sf_mii_write;
263 sc->sc_mii.mii_statchg = sf_mii_statchg;
264 sc->sc_ethercom.ec_mii = &sc->sc_mii;
265 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
266 ether_mediastatus);
267 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
268 MII_OFFSET_ANY, 0);
269 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
270 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
271 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
272 } else
273 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
275 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
276 ifp->if_softc = sc;
277 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 ifp->if_ioctl = sf_ioctl;
279 ifp->if_start = sf_start;
280 ifp->if_watchdog = sf_watchdog;
281 ifp->if_init = sf_init;
282 ifp->if_stop = sf_stop;
283 IFQ_SET_READY(&ifp->if_snd);
286 * Attach the interface.
288 if_attach(ifp);
289 ether_ifattach(ifp, enaddr);
292 * Make sure the interface is shutdown during reboot.
294 if (pmf_device_register1(&sc->sc_dev, NULL, NULL, sf_shutdown))
295 pmf_class_network_register(&sc->sc_dev, ifp);
296 else
297 aprint_error_dev(&sc->sc_dev,
298 "couldn't establish power handler\n");
299 return;
302 * Free any resources we've allocated during the failed attach
303 * attempt. Do this in reverse order an fall through.
305 fail_5:
306 for (i = 0; i < SF_NRXDESC; i++) {
307 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
308 bus_dmamap_destroy(sc->sc_dmat,
309 sc->sc_rxsoft[i].ds_dmamap);
311 fail_4:
312 for (i = 0; i < SF_NTXDESC; i++) {
313 if (sc->sc_txsoft[i].ds_dmamap != NULL)
314 bus_dmamap_destroy(sc->sc_dmat,
315 sc->sc_txsoft[i].ds_dmamap);
317 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
318 fail_3:
319 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
320 fail_2:
321 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
322 sizeof(struct sf_control_data));
323 fail_1:
324 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
325 fail_0:
326 return;
330 * sf_shutdown:
332 * Shutdown hook -- make sure the interface is stopped at reboot.
334 static bool
335 sf_shutdown(device_t self, int howto)
337 struct sf_softc *sc;
339 sc = device_private(self);
340 sf_stop(&sc->sc_ethercom.ec_if, 1);
342 return true;
346 * sf_start: [ifnet interface function]
348 * Start packet transmission on the interface.
350 static void
351 sf_start(struct ifnet *ifp)
353 struct sf_softc *sc = ifp->if_softc;
354 struct mbuf *m0, *m;
355 struct sf_txdesc0 *txd;
356 struct sf_descsoft *ds;
357 bus_dmamap_t dmamap;
358 int error, producer, last = -1, opending, seg;
361 * Remember the previous number of pending transmits.
363 opending = sc->sc_txpending;
366 * Find out where we're sitting.
368 producer = SF_TXDINDEX_TO_HOST(
369 TDQPI_HiPrTxProducerIndex_get(
370 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
373 * Loop through the send queue, setting up transmit descriptors
374 * until we drain the queue, or use up all available transmit
375 * descriptors. Leave a blank one at the end for sanity's sake.
377 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
379 * Grab a packet off the queue.
381 IFQ_POLL(&ifp->if_snd, m0);
382 if (m0 == NULL)
383 break;
384 m = NULL;
387 * Get the transmit descriptor.
389 txd = &sc->sc_txdescs[producer];
390 ds = &sc->sc_txsoft[producer];
391 dmamap = ds->ds_dmamap;
394 * Load the DMA map. If this fails, the packet either
395 * didn't fit in the allotted number of frags, or we were
396 * short on resources. In this case, we'll copy and try
397 * again.
399 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
400 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
401 MGETHDR(m, M_DONTWAIT, MT_DATA);
402 if (m == NULL) {
403 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
404 break;
406 if (m0->m_pkthdr.len > MHLEN) {
407 MCLGET(m, M_DONTWAIT);
408 if ((m->m_flags & M_EXT) == 0) {
409 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
410 "cluster\n");
411 m_freem(m);
412 break;
415 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
416 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
417 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
418 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
419 if (error) {
420 aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
421 "error = %d\n", error);
422 break;
427 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
429 IFQ_DEQUEUE(&ifp->if_snd, m0);
430 if (m != NULL) {
431 m_freem(m0);
432 m0 = m;
435 /* Initialize the descriptor. */
436 txd->td_word0 =
437 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
438 if (producer == (SF_NTXDESC - 1))
439 txd->td_word0 |= TD_W0_END;
440 txd->td_word1 = htole32(dmamap->dm_nsegs);
441 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
442 txd->td_frags[seg].fr_addr =
443 htole32(dmamap->dm_segs[seg].ds_addr);
444 txd->td_frags[seg].fr_len =
445 htole32(dmamap->dm_segs[seg].ds_len);
448 /* Sync the descriptor and the DMA map. */
449 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
450 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
451 BUS_DMASYNC_PREWRITE);
454 * Store a pointer to the packet so we can free it later.
456 ds->ds_mbuf = m0;
458 /* Advance the Tx pointer. */
459 sc->sc_txpending++;
460 last = producer;
461 producer = SF_NEXTTX(producer);
463 #if NBPFILTER > 0
465 * Pass the packet to any BPF listeners.
467 if (ifp->if_bpf)
468 bpf_mtap(ifp->if_bpf, m0);
469 #endif
472 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
473 /* No more slots left; notify upper layer. */
474 ifp->if_flags |= IFF_OACTIVE;
477 if (sc->sc_txpending != opending) {
478 KASSERT(last != -1);
480 * We enqueued packets. Cause a transmit interrupt to
481 * happen on the last packet we enqueued, and give the
482 * new descriptors to the chip by writing the new
483 * producer index.
485 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
486 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
488 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
489 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
491 /* Set a watchdog timer in case the chip flakes out. */
492 ifp->if_timer = 5;
497 * sf_watchdog: [ifnet interface function]
499 * Watchdog timer handler.
501 static void
502 sf_watchdog(struct ifnet *ifp)
504 struct sf_softc *sc = ifp->if_softc;
506 printf("%s: device timeout\n", device_xname(&sc->sc_dev));
507 ifp->if_oerrors++;
509 (void) sf_init(ifp);
511 /* Try to get more packets going. */
512 sf_start(ifp);
516 * sf_ioctl: [ifnet interface function]
518 * Handle control requests from the operator.
520 static int
521 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
523 struct sf_softc *sc = ifp->if_softc;
524 int s, error;
526 s = splnet();
528 error = ether_ioctl(ifp, cmd, data);
529 if (error == ENETRESET) {
531 * Multicast list has changed; set the hardware filter
532 * accordingly.
534 if (ifp->if_flags & IFF_RUNNING)
535 sf_set_filter(sc);
536 error = 0;
539 /* Try to get more packets going. */
540 sf_start(ifp);
542 splx(s);
543 return (error);
547 * sf_intr:
549 * Interrupt service routine.
552 sf_intr(void *arg)
554 struct sf_softc *sc = arg;
555 uint32_t isr;
556 int handled = 0, wantinit = 0;
558 for (;;) {
559 /* Reading clears all interrupts we're interested in. */
560 isr = sf_funcreg_read(sc, SF_InterruptStatus);
561 if ((isr & IS_PCIPadInt) == 0)
562 break;
564 handled = 1;
566 /* Handle receive interrupts. */
567 if (isr & IS_RxQ1DoneInt)
568 sf_rxintr(sc);
570 /* Handle transmit completion interrupts. */
571 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
572 sf_txintr(sc);
574 /* Handle abnormal interrupts. */
575 if (isr & IS_AbnormalInterrupt) {
576 /* Statistics. */
577 if (isr & IS_StatisticWrapInt)
578 sf_stats_update(sc);
580 /* DMA errors. */
581 if (isr & IS_DmaErrInt) {
582 wantinit = 1;
583 aprint_error_dev(&sc->sc_dev, "WARNING: DMA error\n");
586 /* Transmit FIFO underruns. */
587 if (isr & IS_TxDataLowInt) {
588 if (sc->sc_txthresh < 0xff)
589 sc->sc_txthresh++;
590 printf("%s: transmit FIFO underrun, new "
591 "threshold: %d bytes\n",
592 device_xname(&sc->sc_dev),
593 sc->sc_txthresh * 16);
594 sf_funcreg_write(sc, SF_TransmitFrameCSR,
595 sc->sc_TransmitFrameCSR |
596 TFCSR_TransmitThreshold(sc->sc_txthresh));
597 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
598 sc->sc_TxDescQueueCtrl |
599 TDQC_TxHighPriorityFifoThreshold(
600 sc->sc_txthresh));
605 if (handled) {
606 /* Reset the interface, if necessary. */
607 if (wantinit)
608 sf_init(&sc->sc_ethercom.ec_if);
610 /* Try and get more packets going. */
611 sf_start(&sc->sc_ethercom.ec_if);
614 return (handled);
618 * sf_txintr:
620 * Helper -- handle transmit completion interrupts.
622 static void
623 sf_txintr(struct sf_softc *sc)
625 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
626 struct sf_descsoft *ds;
627 uint32_t cqci, tcd;
628 int consumer, producer, txidx;
630 try_again:
631 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
633 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
634 producer = CQPI_TxCompletionProducerIndex_get(
635 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
637 if (consumer == producer)
638 return;
640 ifp->if_flags &= ~IFF_OACTIVE;
642 while (consumer != producer) {
643 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
644 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
646 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
647 #ifdef DIAGNOSTIC
648 if ((tcd & TCD_PR) == 0)
649 aprint_error_dev(&sc->sc_dev, "Tx queue mismatch, index %d\n",
650 txidx);
651 #endif
653 * NOTE: stats are updated later. We're just
654 * releasing packets that have been DMA'd to
655 * the chip.
657 ds = &sc->sc_txsoft[txidx];
658 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
659 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
660 0, ds->ds_dmamap->dm_mapsize,
661 BUS_DMASYNC_POSTWRITE);
662 m_freem(ds->ds_mbuf);
663 ds->ds_mbuf = NULL;
665 consumer = SF_NEXTTCD(consumer);
666 sc->sc_txpending--;
669 /* XXXJRT -- should be KDASSERT() */
670 KASSERT(sc->sc_txpending >= 0);
672 /* If all packets are done, cancel the watchdog timer. */
673 if (sc->sc_txpending == 0)
674 ifp->if_timer = 0;
676 /* Update the consumer index. */
677 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
678 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
679 CQCI_TxCompletionConsumerIndex(consumer));
681 /* Double check for new completions. */
682 goto try_again;
686 * sf_rxintr:
688 * Helper -- handle receive interrupts.
690 static void
691 sf_rxintr(struct sf_softc *sc)
693 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
694 struct sf_descsoft *ds;
695 struct sf_rcd_full *rcd;
696 struct mbuf *m;
697 uint32_t cqci, word0;
698 int consumer, producer, bufproducer, rxidx, len;
700 try_again:
701 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
703 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
704 producer = CQPI_RxCompletionQ1ProducerIndex_get(
705 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
706 bufproducer = RXQ1P_RxDescQ1Producer_get(
707 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
709 if (consumer == producer)
710 return;
712 while (consumer != producer) {
713 rcd = &sc->sc_rxcomp[consumer];
714 SF_CDRXCSYNC(sc, consumer,
715 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
716 SF_CDRXCSYNC(sc, consumer,
717 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
719 word0 = le32toh(rcd->rcd_word0);
720 rxidx = RCD_W0_EndIndex(word0);
722 ds = &sc->sc_rxsoft[rxidx];
724 consumer = SF_NEXTRCD(consumer);
725 bufproducer = SF_NEXTRX(bufproducer);
727 if ((word0 & RCD_W0_OK) == 0) {
728 SF_INIT_RXDESC(sc, rxidx);
729 continue;
732 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
733 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
736 * No errors; receive the packet. Note that we have
737 * configured the Starfire to NOT transfer the CRC
738 * with the packet.
740 len = RCD_W0_Length(word0);
742 #ifdef __NO_STRICT_ALIGNMENT
744 * Allocate a new mbuf cluster. If that fails, we are
745 * out of memory, and must drop the packet and recycle
746 * the buffer that's already attached to this descriptor.
748 m = ds->ds_mbuf;
749 if (sf_add_rxbuf(sc, rxidx) != 0) {
750 ifp->if_ierrors++;
751 SF_INIT_RXDESC(sc, rxidx);
752 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
753 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
754 continue;
756 #else
758 * The Starfire's receive buffer must be 4-byte aligned.
759 * But this means that the data after the Ethernet header
760 * is misaligned. We must allocate a new buffer and
761 * copy the data, shifted forward 2 bytes.
763 MGETHDR(m, M_DONTWAIT, MT_DATA);
764 if (m == NULL) {
765 dropit:
766 ifp->if_ierrors++;
767 SF_INIT_RXDESC(sc, rxidx);
768 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
769 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
770 continue;
772 if (len > (MHLEN - 2)) {
773 MCLGET(m, M_DONTWAIT);
774 if ((m->m_flags & M_EXT) == 0) {
775 m_freem(m);
776 goto dropit;
779 m->m_data += 2;
782 * Note that we use cluster for incoming frames, so the
783 * buffer is virtually contiguous.
785 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
787 /* Allow the receive descriptor to continue using its mbuf. */
788 SF_INIT_RXDESC(sc, rxidx);
789 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
790 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
791 #endif /* __NO_STRICT_ALIGNMENT */
793 m->m_pkthdr.rcvif = ifp;
794 m->m_pkthdr.len = m->m_len = len;
796 #if NBPFILTER > 0
798 * Pass this up to any BPF listeners.
800 if (ifp->if_bpf)
801 bpf_mtap(ifp->if_bpf, m);
802 #endif /* NBPFILTER > 0 */
804 /* Pass it on. */
805 (*ifp->if_input)(ifp, m);
808 /* Update the chip's pointers. */
809 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
810 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
811 CQCI_RxCompletionQ1ConsumerIndex(consumer));
812 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
813 RXQ1P_RxDescQ1Producer(bufproducer));
815 /* Double-check for any new completions. */
816 goto try_again;
820 * sf_tick:
822 * One second timer, used to tick the MII and update stats.
824 static void
825 sf_tick(void *arg)
827 struct sf_softc *sc = arg;
828 int s;
830 s = splnet();
831 mii_tick(&sc->sc_mii);
832 sf_stats_update(sc);
833 splx(s);
835 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
839 * sf_stats_update:
841 * Read the statitistics counters.
843 static void
844 sf_stats_update(struct sf_softc *sc)
846 struct sf_stats stats;
847 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
848 uint32_t *p;
849 u_int i;
851 p = &stats.TransmitOKFrames;
852 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
853 *p++ = sf_genreg_read(sc,
854 SF_STATS_BASE + (i * sizeof(uint32_t)));
855 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
858 ifp->if_opackets += stats.TransmitOKFrames;
860 ifp->if_collisions += stats.SingleCollisionFrames +
861 stats.MultipleCollisionFrames;
863 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
864 stats.TransmitAbortDueToExcessingDeferral +
865 stats.FramesLostDueToInternalTransmitErrors;
867 ifp->if_ipackets += stats.ReceiveOKFrames;
869 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
870 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
871 stats.ReceiveFramesJabbersError +
872 stats.FramesLostDueToInternalReceiveErrors;
876 * sf_reset:
878 * Perform a soft reset on the Starfire.
880 static void
881 sf_reset(struct sf_softc *sc)
883 int i;
885 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
887 sf_macreset(sc);
889 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
890 for (i = 0; i < 1000; i++) {
891 delay(10);
892 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
893 PDC_SoftReset) == 0)
894 break;
897 if (i == 1000) {
898 aprint_error_dev(&sc->sc_dev, "reset failed to complete\n");
899 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
902 delay(1000);
906 * sf_macreset:
908 * Reset the MAC portion of the Starfire.
910 static void
911 sf_macreset(struct sf_softc *sc)
914 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
915 delay(1000);
916 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
920 * sf_init: [ifnet interface function]
922 * Initialize the interface. Must be called at splnet().
924 static int
925 sf_init(struct ifnet *ifp)
927 struct sf_softc *sc = ifp->if_softc;
928 struct sf_descsoft *ds;
929 int error = 0;
930 u_int i;
933 * Cancel any pending I/O.
935 sf_stop(ifp, 0);
938 * Reset the Starfire to a known state.
940 sf_reset(sc);
942 /* Clear the stat counters. */
943 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
944 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
947 * Initialize the transmit descriptor ring.
949 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
950 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
951 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
952 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
955 * Initialize the transmit completion ring.
957 for (i = 0; i < SF_NTCD; i++) {
958 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
959 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
961 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
962 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
965 * Initialize the receive descriptor ring.
967 for (i = 0; i < SF_NRXDESC; i++) {
968 ds = &sc->sc_rxsoft[i];
969 if (ds->ds_mbuf == NULL) {
970 if ((error = sf_add_rxbuf(sc, i)) != 0) {
971 aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
972 "buffer %d, error = %d\n",
973 i, error);
975 * XXX Should attempt to run with fewer receive
976 * XXX buffers instead of just failing.
978 sf_rxdrain(sc);
979 goto out;
981 } else
982 SF_INIT_RXDESC(sc, i);
984 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
985 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
986 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
989 * Initialize the receive completion ring.
991 for (i = 0; i < SF_NRCD; i++) {
992 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
993 sc->sc_rxcomp[i].rcd_word1 = 0;
994 sc->sc_rxcomp[i].rcd_word2 = 0;
995 sc->sc_rxcomp[i].rcd_timestamp = 0;
996 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
998 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
999 RCQ1C_RxCompletionQ1Type(3));
1000 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1003 * Initialize the Tx CSR.
1005 sc->sc_TransmitFrameCSR = 0;
1006 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1007 sc->sc_TransmitFrameCSR |
1008 TFCSR_TransmitThreshold(sc->sc_txthresh));
1011 * Initialize the Tx descriptor control register.
1013 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1014 TDQC_TxDmaBurstSize(4) | /* default */
1015 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1016 TDQC_TxDescType(0);
1017 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1018 sc->sc_TxDescQueueCtrl |
1019 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1022 * Initialize the Rx descriptor control registers.
1024 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1025 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1026 RDQ1C_RxDescSpacing(0));
1027 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1030 * Initialize the Tx descriptor producer indices.
1032 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1033 TDQPI_HiPrTxProducerIndex(0) |
1034 TDQPI_LoPrTxProducerIndex(0));
1037 * Initialize the Rx descriptor producer indices.
1039 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1040 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1041 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1042 RXQ2P_RxDescQ2Producer(0));
1045 * Initialize the Tx and Rx completion queue consumer indices.
1047 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1048 CQCI_TxCompletionConsumerIndex(0) |
1049 CQCI_RxCompletionQ1ConsumerIndex(0));
1050 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1053 * Initialize the Rx DMA control register.
1055 sf_funcreg_write(sc, SF_RxDmaCtrl,
1056 RDC_RxHighPriorityThreshold(6) | /* default */
1057 RDC_RxBurstSize(4)); /* default */
1060 * Set the receive filter.
1062 sc->sc_RxAddressFilteringCtl = 0;
1063 sf_set_filter(sc);
1066 * Set MacConfig1. When we set the media, MacConfig1 will
1067 * actually be written and the MAC part reset.
1069 sc->sc_MacConfig1 = MC1_PadEn;
1072 * Set the media.
1074 if ((error = ether_mediachange(ifp)) != 0)
1075 goto out;
1078 * Initialize the interrupt register.
1080 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1081 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1082 IS_StatisticWrapInt;
1083 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1085 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1086 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1089 * Start the transmit and receive processes.
1091 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1092 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1094 /* Start the on second clock. */
1095 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1098 * Note that the interface is now running.
1100 ifp->if_flags |= IFF_RUNNING;
1101 ifp->if_flags &= ~IFF_OACTIVE;
1103 out:
1104 if (error) {
1105 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1106 ifp->if_timer = 0;
1107 printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1109 return (error);
1113 * sf_rxdrain:
1115 * Drain the receive queue.
1117 static void
1118 sf_rxdrain(struct sf_softc *sc)
1120 struct sf_descsoft *ds;
1121 int i;
1123 for (i = 0; i < SF_NRXDESC; i++) {
1124 ds = &sc->sc_rxsoft[i];
1125 if (ds->ds_mbuf != NULL) {
1126 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1127 m_freem(ds->ds_mbuf);
1128 ds->ds_mbuf = NULL;
1134 * sf_stop: [ifnet interface function]
1136 * Stop transmission on the interface.
1138 static void
1139 sf_stop(struct ifnet *ifp, int disable)
1141 struct sf_softc *sc = ifp->if_softc;
1142 struct sf_descsoft *ds;
1143 int i;
1145 /* Stop the one second clock. */
1146 callout_stop(&sc->sc_tick_callout);
1148 /* Down the MII. */
1149 mii_down(&sc->sc_mii);
1151 /* Disable interrupts. */
1152 sf_funcreg_write(sc, SF_InterruptEn, 0);
1154 /* Stop the transmit and receive processes. */
1155 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1158 * Release any queued transmit buffers.
1160 for (i = 0; i < SF_NTXDESC; i++) {
1161 ds = &sc->sc_txsoft[i];
1162 if (ds->ds_mbuf != NULL) {
1163 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1164 m_freem(ds->ds_mbuf);
1165 ds->ds_mbuf = NULL;
1170 * Mark the interface down and cancel the watchdog timer.
1172 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1173 ifp->if_timer = 0;
1175 if (disable)
1176 sf_rxdrain(sc);
1180 * sf_read_eeprom:
1182 * Read from the Starfire EEPROM.
1184 static uint8_t
1185 sf_read_eeprom(struct sf_softc *sc, int offset)
1187 uint32_t reg;
1189 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1191 return ((reg >> (8 * (offset & 3))) & 0xff);
1195 * sf_add_rxbuf:
1197 * Add a receive buffer to the indicated descriptor.
1199 static int
1200 sf_add_rxbuf(struct sf_softc *sc, int idx)
1202 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1203 struct mbuf *m;
1204 int error;
1206 MGETHDR(m, M_DONTWAIT, MT_DATA);
1207 if (m == NULL)
1208 return (ENOBUFS);
1210 MCLGET(m, M_DONTWAIT);
1211 if ((m->m_flags & M_EXT) == 0) {
1212 m_freem(m);
1213 return (ENOBUFS);
1216 if (ds->ds_mbuf != NULL)
1217 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1219 ds->ds_mbuf = m;
1221 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1222 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1223 BUS_DMA_READ|BUS_DMA_NOWAIT);
1224 if (error) {
1225 aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1226 idx, error);
1227 panic("sf_add_rxbuf"); /* XXX */
1230 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1231 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1233 SF_INIT_RXDESC(sc, idx);
1235 return (0);
1238 static void
1239 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1241 uint32_t reg0, reg1, reg2;
1243 reg0 = enaddr[5] | (enaddr[4] << 8);
1244 reg1 = enaddr[3] | (enaddr[2] << 8);
1245 reg2 = enaddr[1] | (enaddr[0] << 8);
1247 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1248 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1249 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1252 static void
1253 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1255 uint32_t hash, slot, reg;
1257 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1258 slot = hash >> 4;
1260 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1261 reg |= 1 << (hash & 0xf);
1262 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1266 * sf_set_filter:
1268 * Set the Starfire receive filter.
1270 static void
1271 sf_set_filter(struct sf_softc *sc)
1273 struct ethercom *ec = &sc->sc_ethercom;
1274 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1275 struct ether_multi *enm;
1276 struct ether_multistep step;
1277 int i;
1279 /* Start by clearing the perfect and hash tables. */
1280 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1281 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1283 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1284 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1287 * Clear the perfect and hash mode bits.
1289 sc->sc_RxAddressFilteringCtl &=
1290 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1292 if (ifp->if_flags & IFF_BROADCAST)
1293 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1294 else
1295 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1297 if (ifp->if_flags & IFF_PROMISC) {
1298 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1299 goto allmulti;
1300 } else
1301 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1304 * Set normal perfect filtering mode.
1306 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1309 * First, write the station address to the perfect filter
1310 * table.
1312 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1315 * Now set the hash bits for each multicast address in our
1316 * list.
1318 ETHER_FIRST_MULTI(step, ec, enm);
1319 if (enm == NULL)
1320 goto done;
1321 while (enm != NULL) {
1322 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1324 * We must listen to a range of multicast addresses.
1325 * For now, just accept all multicasts, rather than
1326 * trying to set only those filter bits needed to match
1327 * the range. (At this time, the only use of address
1328 * ranges is for IP multicast routing, for which the
1329 * range is big enough to require all bits set.)
1331 goto allmulti;
1333 sf_set_filter_hash(sc, enm->enm_addrlo);
1334 ETHER_NEXT_MULTI(step, enm);
1338 * Set "hash only multicast dest, match regardless of VLAN ID".
1340 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1341 goto done;
1343 allmulti:
1345 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1347 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1348 ifp->if_flags |= IFF_ALLMULTI;
1350 done:
1351 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1352 sc->sc_RxAddressFilteringCtl);
1356 * sf_mii_read: [mii interface function]
1358 * Read from the MII.
1360 static int
1361 sf_mii_read(device_t self, int phy, int reg)
1363 struct sf_softc *sc = (void *) self;
1364 uint32_t v;
1365 int i;
1367 for (i = 0; i < 1000; i++) {
1368 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1369 if (v & MiiDataValid)
1370 break;
1371 delay(1);
1374 if ((v & MiiDataValid) == 0)
1375 return (0);
1377 if (MiiRegDataPort(v) == 0xffff)
1378 return (0);
1380 return (MiiRegDataPort(v));
1384 * sf_mii_write: [mii interface function]
1386 * Write to the MII.
1388 static void
1389 sf_mii_write(device_t self, int phy, int reg, int val)
1391 struct sf_softc *sc = (void *) self;
1392 int i;
1394 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1396 for (i = 0; i < 1000; i++) {
1397 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1398 MiiBusy) == 0)
1399 return;
1400 delay(1);
1403 printf("%s: MII write timed out\n", device_xname(&sc->sc_dev));
1407 * sf_mii_statchg: [mii interface function]
1409 * Callback from the PHY when the media changes.
1411 static void
1412 sf_mii_statchg(device_t self)
1414 struct sf_softc *sc = (void *) self;
1415 uint32_t ipg;
1417 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1418 sc->sc_MacConfig1 |= MC1_FullDuplex;
1419 ipg = 0x15;
1420 } else {
1421 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1422 ipg = 0x11;
1425 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1426 sf_macreset(sc);
1428 sf_genreg_write(sc, SF_BkToBkIPG, ipg);