1 /* $NetBSD: smc83c170.c,v 1.76 2008/07/06 14:32:56 tsutsui Exp $ */
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * Device driver for the Standard Microsystems Corp. 83C170
35 * Ethernet PCI Integrated Controller (EPIC/100).
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: smc83c170.c,v 1.76 2008/07/06 14:32:56 tsutsui Exp $");
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/callout.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/device.h>
54 #include <uvm/uvm_extern.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
68 #include <dev/mii/miivar.h>
69 #include <dev/mii/lxtphyreg.h>
71 #include <dev/ic/smc83c170reg.h>
72 #include <dev/ic/smc83c170var.h>
74 void epic_start(struct ifnet
*);
75 void epic_watchdog(struct ifnet
*);
76 int epic_ioctl(struct ifnet
*, u_long
, void *);
77 int epic_init(struct ifnet
*);
78 void epic_stop(struct ifnet
*, int);
80 bool epic_shutdown(device_t
, int);
82 void epic_reset(struct epic_softc
*);
83 void epic_rxdrain(struct epic_softc
*);
84 int epic_add_rxbuf(struct epic_softc
*, int);
85 void epic_read_eeprom(struct epic_softc
*, int, int, uint16_t *);
86 void epic_set_mchash(struct epic_softc
*);
87 void epic_fixup_clock_source(struct epic_softc
*);
88 int epic_mii_read(device_t
, int, int);
89 void epic_mii_write(device_t
, int, int, int);
90 int epic_mii_wait(struct epic_softc
*, uint32_t);
91 void epic_tick(void *);
93 void epic_statchg(device_t
);
94 int epic_mediachange(struct ifnet
*);
96 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
97 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
99 int epic_copy_small
= 0;
101 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
104 * Attach an EPIC interface to the system.
107 epic_attach(struct epic_softc
*sc
)
109 bus_space_tag_t st
= sc
->sc_st
;
110 bus_space_handle_t sh
= sc
->sc_sh
;
111 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
112 int rseg
, error
, miiflags
;
114 bus_dma_segment_t seg
;
115 uint8_t enaddr
[ETHER_ADDR_LEN
], devname
[12 + 1];
116 uint16_t myea
[ETHER_ADDR_LEN
/ 2], mydevname
[6];
119 callout_init(&sc
->sc_mii_callout
, 0);
122 * Allocate the control data structures, and create and load the
125 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
,
126 sizeof(struct epic_control_data
) + ETHER_PAD_LEN
, PAGE_SIZE
, 0,
127 &seg
, 1, &rseg
, BUS_DMA_NOWAIT
)) != 0) {
128 aprint_error_dev(sc
->sc_dev
,
129 "unable to allocate control data, error = %d\n", error
);
133 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &seg
, rseg
,
134 sizeof(struct epic_control_data
) + ETHER_PAD_LEN
,
135 (void **)&sc
->sc_control_data
,
136 BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) != 0) {
137 aprint_error_dev(sc
->sc_dev
,
138 "unable to map control data, error = %d\n", error
);
142 (char *)sc
->sc_control_data
+ sizeof(struct epic_control_data
);
143 memset(nullbuf
, 0, ETHER_PAD_LEN
);
145 if ((error
= bus_dmamap_create(sc
->sc_dmat
,
146 sizeof(struct epic_control_data
), 1,
147 sizeof(struct epic_control_data
), 0, BUS_DMA_NOWAIT
,
148 &sc
->sc_cddmamap
)) != 0) {
149 aprint_error_dev(sc
->sc_dev
,
150 "unable to create control data DMA map, error = %d\n",
155 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cddmamap
,
156 sc
->sc_control_data
, sizeof(struct epic_control_data
), NULL
,
157 BUS_DMA_NOWAIT
)) != 0) {
158 aprint_error_dev(sc
->sc_dev
,
159 "unable to load control data DMA map, error = %d\n",
165 * Create the transmit buffer DMA maps.
167 for (i
= 0; i
< EPIC_NTXDESC
; i
++) {
168 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
169 EPIC_NFRAGS
, MCLBYTES
, 0, BUS_DMA_NOWAIT
,
170 &EPIC_DSTX(sc
, i
)->ds_dmamap
)) != 0) {
171 aprint_error_dev(sc
->sc_dev
,
172 "unable to create tx DMA map %d, error = %d\n",
179 * Create the receive buffer DMA maps.
181 for (i
= 0; i
< EPIC_NRXDESC
; i
++) {
182 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
183 MCLBYTES
, 0, BUS_DMA_NOWAIT
,
184 &EPIC_DSRX(sc
, i
)->ds_dmamap
)) != 0) {
185 aprint_error_dev(sc
->sc_dev
,
186 "unable to create rx DMA map %d, error = %d\n",
190 EPIC_DSRX(sc
, i
)->ds_mbuf
= NULL
;
194 * create and map the pad buffer
196 if ((error
= bus_dmamap_create(sc
->sc_dmat
, ETHER_PAD_LEN
, 1,
197 ETHER_PAD_LEN
, 0, BUS_DMA_NOWAIT
,&sc
->sc_nulldmamap
)) != 0) {
198 aprint_error_dev(sc
->sc_dev
,
199 "unable to create pad buffer DMA map, error = %d\n", error
);
203 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_nulldmamap
,
204 nullbuf
, ETHER_PAD_LEN
, NULL
, BUS_DMA_NOWAIT
)) != 0) {
205 aprint_error_dev(sc
->sc_dev
,
206 "unable to load pad buffer DMA map, error = %d\n", error
);
209 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_nulldmamap
, 0, ETHER_PAD_LEN
,
210 BUS_DMASYNC_PREWRITE
);
213 * Bring the chip out of low-power mode and reset it to a known state.
215 bus_space_write_4(st
, sh
, EPIC_GENCTL
, 0);
219 * Read the Ethernet address from the EEPROM.
221 epic_read_eeprom(sc
, 0, __arraycount(myea
), myea
);
222 for (i
= 0; i
< __arraycount(myea
); i
++) {
223 enaddr
[i
* 2] = myea
[i
] & 0xff;
224 enaddr
[i
* 2 + 1] = myea
[i
] >> 8;
228 * ...and the device name.
230 epic_read_eeprom(sc
, 0x2c, __arraycount(mydevname
), mydevname
);
231 for (i
= 0; i
< __arraycount(mydevname
); i
++) {
232 devname
[i
* 2] = mydevname
[i
] & 0xff;
233 devname
[i
* 2 + 1] = mydevname
[i
] >> 8;
236 devname
[sizeof(mydevname
)] = '\0';
237 for (i
= sizeof(mydevname
) ; i
> 0; i
--) {
238 if (devname
[i
- 1] == ' ')
239 devname
[i
- 1] = '\0';
244 aprint_normal_dev(sc
->sc_dev
, "%s, Ethernet address %s\n",
245 devname
, ether_sprintf(enaddr
));
248 if (sc
->sc_hwflags
& EPIC_HAS_MII_FIBER
)
249 miiflags
|= MIIF_HAVEFIBER
;
252 * Initialize our media structures and probe the MII.
254 sc
->sc_mii
.mii_ifp
= ifp
;
255 sc
->sc_mii
.mii_readreg
= epic_mii_read
;
256 sc
->sc_mii
.mii_writereg
= epic_mii_write
;
257 sc
->sc_mii
.mii_statchg
= epic_statchg
;
259 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
260 ifmedia_init(&sc
->sc_mii
.mii_media
, IFM_IMASK
, epic_mediachange
,
262 mii_attach(sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
263 MII_OFFSET_ANY
, miiflags
);
264 if (LIST_EMPTY(&sc
->sc_mii
.mii_phys
)) {
265 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
266 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
268 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
270 if (sc
->sc_hwflags
& EPIC_HAS_BNC
) {
271 /* use the next free media instance */
272 sc
->sc_serinst
= sc
->sc_mii
.mii_instance
++;
273 ifmedia_add(&sc
->sc_mii
.mii_media
,
274 IFM_MAKEWORD(IFM_ETHER
, IFM_10_2
, 0, sc
->sc_serinst
),
276 aprint_normal_dev(sc
->sc_dev
, "10base2/BNC\n");
280 strlcpy(ifp
->if_xname
, device_xname(sc
->sc_dev
), IFNAMSIZ
);
282 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
283 ifp
->if_ioctl
= epic_ioctl
;
284 ifp
->if_start
= epic_start
;
285 ifp
->if_watchdog
= epic_watchdog
;
286 ifp
->if_init
= epic_init
;
287 ifp
->if_stop
= epic_stop
;
288 IFQ_SET_READY(&ifp
->if_snd
);
291 * We can support 802.1Q VLAN-sized frames.
293 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
296 * Attach the interface.
299 ether_ifattach(ifp
, enaddr
);
302 * Make sure the interface is shutdown during reboot.
304 if (pmf_device_register1(sc
->sc_dev
, NULL
, NULL
, epic_shutdown
))
305 pmf_class_network_register(sc
->sc_dev
, ifp
);
307 aprint_error_dev(sc
->sc_dev
,
308 "couldn't establish power handler\n");
313 * Free any resources we've allocated during the failed attach
314 * attempt. Do this in reverse order and fall through.
317 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_nulldmamap
);
319 for (i
= 0; i
< EPIC_NRXDESC
; i
++) {
320 if (EPIC_DSRX(sc
, i
)->ds_dmamap
!= NULL
)
321 bus_dmamap_destroy(sc
->sc_dmat
,
322 EPIC_DSRX(sc
, i
)->ds_dmamap
);
325 for (i
= 0; i
< EPIC_NTXDESC
; i
++) {
326 if (EPIC_DSTX(sc
, i
)->ds_dmamap
!= NULL
)
327 bus_dmamap_destroy(sc
->sc_dmat
,
328 EPIC_DSTX(sc
, i
)->ds_dmamap
);
330 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cddmamap
);
332 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cddmamap
);
334 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_control_data
,
335 sizeof(struct epic_control_data
));
337 bus_dmamem_free(sc
->sc_dmat
, &seg
, rseg
);
343 * Shutdown hook. Make sure the interface is stopped at reboot.
346 epic_shutdown(device_t self
, int howto
)
348 struct epic_softc
*sc
= device_private(self
);
350 epic_stop(&sc
->sc_ethercom
.ec_if
, 1);
356 * Start packet transmission on the interface.
357 * [ifnet interface function]
360 epic_start(struct ifnet
*ifp
)
362 struct epic_softc
*sc
= ifp
->if_softc
;
364 struct epic_txdesc
*txd
;
365 struct epic_descsoft
*ds
;
366 struct epic_fraglist
*fr
;
368 int error
, firsttx
, nexttx
, opending
, seg
;
372 * Remember the previous txpending and the first transmit
375 opending
= sc
->sc_txpending
;
376 firsttx
= EPIC_NEXTTX(sc
->sc_txlast
);
379 * Loop through the send queue, setting up transmit descriptors
380 * until we drain the queue, or use up all available transmit
383 while (sc
->sc_txpending
< EPIC_NTXDESC
) {
385 * Grab a packet off the queue.
387 IFQ_POLL(&ifp
->if_snd
, m0
);
393 * Get the last and next available transmit descriptor.
395 nexttx
= EPIC_NEXTTX(sc
->sc_txlast
);
396 txd
= EPIC_CDTX(sc
, nexttx
);
397 fr
= EPIC_CDFL(sc
, nexttx
);
398 ds
= EPIC_DSTX(sc
, nexttx
);
399 dmamap
= ds
->ds_dmamap
;
402 * Load the DMA map. If this fails, the packet either
403 * didn't fit in the alloted number of frags, or we were
404 * short on resources. In this case, we'll copy and try
407 if ((error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
408 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
)) != 0 ||
409 (m0
->m_pkthdr
.len
< ETHER_PAD_LEN
&&
410 dmamap
-> dm_nsegs
== EPIC_NFRAGS
)) {
412 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
414 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
416 printf("%s: unable to allocate Tx mbuf\n",
417 device_xname(sc
->sc_dev
));
420 if (m0
->m_pkthdr
.len
> MHLEN
) {
421 MCLGET(m
, M_DONTWAIT
);
422 if ((m
->m_flags
& M_EXT
) == 0) {
423 printf("%s: unable to allocate Tx "
425 device_xname(sc
->sc_dev
));
430 m_copydata(m0
, 0, m0
->m_pkthdr
.len
, mtod(m
, void *));
431 m
->m_pkthdr
.len
= m
->m_len
= m0
->m_pkthdr
.len
;
432 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
,
433 m
, BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
435 printf("%s: unable to load Tx buffer, "
436 "error = %d\n", device_xname(sc
->sc_dev
),
441 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
447 /* Initialize the fraglist. */
448 for (seg
= 0; seg
< dmamap
->dm_nsegs
; seg
++) {
449 fr
->ef_frags
[seg
].ef_addr
=
450 dmamap
->dm_segs
[seg
].ds_addr
;
451 fr
->ef_frags
[seg
].ef_length
=
452 dmamap
->dm_segs
[seg
].ds_len
;
454 len
= m0
->m_pkthdr
.len
;
455 if (len
< ETHER_PAD_LEN
) {
456 fr
->ef_frags
[seg
].ef_addr
= sc
->sc_nulldma
;
457 fr
->ef_frags
[seg
].ef_length
= ETHER_PAD_LEN
- len
;
463 EPIC_CDFLSYNC(sc
, nexttx
, BUS_DMASYNC_PREWRITE
);
465 /* Sync the DMA map. */
466 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
467 BUS_DMASYNC_PREWRITE
);
470 * Store a pointer to the packet so we can free it later.
475 * Fill in the transmit descriptor.
477 txd
->et_control
= ET_TXCTL_LASTDESC
| ET_TXCTL_FRAGLIST
;
480 * If this is the first descriptor we're enqueueing,
481 * don't give it to the EPIC yet. That could cause
482 * a race condition. We'll do it below.
484 if (nexttx
== firsttx
)
485 txd
->et_txstatus
= TXSTAT_TXLENGTH(len
);
488 TXSTAT_TXLENGTH(len
) | ET_TXSTAT_OWNER
;
490 EPIC_CDTXSYNC(sc
, nexttx
,
491 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
493 /* Advance the tx pointer. */
495 sc
->sc_txlast
= nexttx
;
499 * Pass the packet to any BPF listeners.
502 bpf_mtap(ifp
->if_bpf
, m0
);
506 if (sc
->sc_txpending
== EPIC_NTXDESC
) {
507 /* No more slots left; notify upper layer. */
508 ifp
->if_flags
|= IFF_OACTIVE
;
511 if (sc
->sc_txpending
!= opending
) {
513 * We enqueued packets. If the transmitter was idle,
514 * reset the txdirty pointer.
517 sc
->sc_txdirty
= firsttx
;
520 * Cause a transmit interrupt to happen on the
521 * last packet we enqueued.
523 EPIC_CDTX(sc
, sc
->sc_txlast
)->et_control
|= ET_TXCTL_IAF
;
524 EPIC_CDTXSYNC(sc
, sc
->sc_txlast
,
525 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
528 * The entire packet chain is set up. Give the
529 * first descriptor to the EPIC now.
531 EPIC_CDTX(sc
, firsttx
)->et_txstatus
|= ET_TXSTAT_OWNER
;
532 EPIC_CDTXSYNC(sc
, firsttx
,
533 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
535 /* Start the transmitter. */
536 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_COMMAND
,
539 /* Set a watchdog timer in case the chip flakes out. */
545 * Watchdog timer handler.
546 * [ifnet interface function]
549 epic_watchdog(struct ifnet
*ifp
)
551 struct epic_softc
*sc
= ifp
->if_softc
;
553 printf("%s: device timeout\n", device_xname(sc
->sc_dev
));
556 (void)epic_init(ifp
);
560 * Handle control requests from the operator.
561 * [ifnet interface function]
564 epic_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
566 struct epic_softc
*sc
= ifp
->if_softc
;
571 error
= ether_ioctl(ifp
, cmd
, data
);
572 if (error
== ENETRESET
) {
574 * Multicast list has changed; set the hardware filter
575 * accordingly. Update our idea of the current media;
576 * epic_set_mchash() needs to know what it is.
578 if (ifp
->if_flags
& IFF_RUNNING
) {
579 mii_pollstat(&sc
->sc_mii
);
595 struct epic_softc
*sc
= arg
;
596 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
597 struct epic_rxdesc
*rxd
;
598 struct epic_txdesc
*txd
;
599 struct epic_descsoft
*ds
;
601 uint32_t intstat
, rxstatus
, txstatus
;
607 * Get the interrupt status from the EPIC.
609 intstat
= bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_INTSTAT
);
610 if ((intstat
& INTSTAT_INT_ACTV
) == 0)
616 * Acknowledge the interrupt.
618 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_INTSTAT
,
622 * Check for receive interrupts.
624 if (intstat
& (INTSTAT_RCC
| INTSTAT_RXE
| INTSTAT_RQE
)) {
625 for (i
= sc
->sc_rxptr
;; i
= EPIC_NEXTRX(i
)) {
626 rxd
= EPIC_CDRX(sc
, i
);
627 ds
= EPIC_DSRX(sc
, i
);
630 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
632 rxstatus
= rxd
->er_rxstatus
;
633 if (rxstatus
& ER_RXSTAT_OWNER
) {
635 * We have processed all of the
642 * Make sure the packet arrived intact. If an error
643 * occurred, update stats and reset the descriptor.
644 * The buffer will be reused the next time the
645 * descriptor comes up in the ring.
647 if ((rxstatus
& ER_RXSTAT_PKTINTACT
) == 0) {
648 if (rxstatus
& ER_RXSTAT_CRCERROR
)
649 printf("%s: CRC error\n",
650 device_xname(sc
->sc_dev
));
651 if (rxstatus
& ER_RXSTAT_ALIGNERROR
)
652 printf("%s: alignment error\n",
653 device_xname(sc
->sc_dev
));
655 EPIC_INIT_RXDESC(sc
, i
);
659 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
660 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
663 * The EPIC includes the CRC with every packet;
666 len
= RXSTAT_RXLENGTH(rxstatus
) - ETHER_CRC_LEN
;
668 if (len
< sizeof(struct ether_header
)) {
670 * Runt packet; drop it now.
673 EPIC_INIT_RXDESC(sc
, i
);
674 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
675 ds
->ds_dmamap
->dm_mapsize
,
676 BUS_DMASYNC_PREREAD
);
681 * If the packet is small enough to fit in a
682 * single header mbuf, allocate one and copy
683 * the data into it. This greatly reduces
684 * memory consumption when we receive lots
687 * Otherwise, we add a new buffer to the receive
688 * chain. If this fails, we drop the packet and
689 * recycle the old buffer.
691 if (epic_copy_small
!= 0 && len
<= MHLEN
) {
692 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
695 memcpy(mtod(m
, void *),
696 mtod(ds
->ds_mbuf
, void *), len
);
697 EPIC_INIT_RXDESC(sc
, i
);
698 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
699 ds
->ds_dmamap
->dm_mapsize
,
700 BUS_DMASYNC_PREREAD
);
703 if (epic_add_rxbuf(sc
, i
) != 0) {
706 EPIC_INIT_RXDESC(sc
, i
);
707 bus_dmamap_sync(sc
->sc_dmat
,
709 ds
->ds_dmamap
->dm_mapsize
,
710 BUS_DMASYNC_PREREAD
);
715 m
->m_pkthdr
.rcvif
= ifp
;
716 m
->m_pkthdr
.len
= m
->m_len
= len
;
720 * Pass this up to any BPF listeners, but only
721 * pass it up the stack if it's for us.
724 bpf_mtap(ifp
->if_bpf
, m
);
728 (*ifp
->if_input
)(ifp
, m
);
732 /* Update the receive pointer. */
736 * Check for receive queue underflow.
738 if (intstat
& INTSTAT_RQE
) {
739 printf("%s: receiver queue empty\n",
740 device_xname(sc
->sc_dev
));
742 * Ring is already built; just restart the
745 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_PRCDAR
,
746 EPIC_CDRXADDR(sc
, sc
->sc_rxptr
));
747 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_COMMAND
,
748 COMMAND_RXQUEUED
| COMMAND_START_RX
);
753 * Check for transmission complete interrupts.
755 if (intstat
& (INTSTAT_TXC
| INTSTAT_TXU
)) {
756 ifp
->if_flags
&= ~IFF_OACTIVE
;
757 for (i
= sc
->sc_txdirty
; sc
->sc_txpending
!= 0;
758 i
= EPIC_NEXTTX(i
), sc
->sc_txpending
--) {
759 txd
= EPIC_CDTX(sc
, i
);
760 ds
= EPIC_DSTX(sc
, i
);
763 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
765 txstatus
= txd
->et_txstatus
;
766 if (txstatus
& ET_TXSTAT_OWNER
)
769 EPIC_CDFLSYNC(sc
, i
, BUS_DMASYNC_POSTWRITE
);
771 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
,
772 0, ds
->ds_dmamap
->dm_mapsize
,
773 BUS_DMASYNC_POSTWRITE
);
774 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dmamap
);
775 m_freem(ds
->ds_mbuf
);
779 * Check for errors and collisions.
781 if ((txstatus
& ET_TXSTAT_PACKETTX
) == 0)
785 ifp
->if_collisions
+=
786 TXSTAT_COLLISIONS(txstatus
);
787 if (txstatus
& ET_TXSTAT_CARSENSELOST
)
788 printf("%s: lost carrier\n",
789 device_xname(sc
->sc_dev
));
792 /* Update the dirty transmit buffer pointer. */
796 * Cancel the watchdog timer if there are no pending
799 if (sc
->sc_txpending
== 0)
803 * Kick the transmitter after a DMA underrun.
805 if (intstat
& INTSTAT_TXU
) {
806 printf("%s: transmit underrun\n",
807 device_xname(sc
->sc_dev
));
808 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
,
809 EPIC_COMMAND
, COMMAND_TXUGO
);
810 if (sc
->sc_txpending
)
811 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
,
812 EPIC_COMMAND
, COMMAND_TXQUEUED
);
816 * Try to get more packets going.
822 * Check for fatal interrupts.
824 if (intstat
& INTSTAT_FATAL_INT
) {
825 if (intstat
& INTSTAT_PTA
)
826 printf("%s: PCI target abort error\n",
827 device_xname(sc
->sc_dev
));
828 else if (intstat
& INTSTAT_PMA
)
829 printf("%s: PCI master abort error\n",
830 device_xname(sc
->sc_dev
));
831 else if (intstat
& INTSTAT_APE
)
832 printf("%s: PCI address parity error\n",
833 device_xname(sc
->sc_dev
));
834 else if (intstat
& INTSTAT_DPE
)
835 printf("%s: PCI data parity error\n",
836 device_xname(sc
->sc_dev
));
838 printf("%s: unknown fatal error\n",
839 device_xname(sc
->sc_dev
));
840 (void)epic_init(ifp
);
844 * Check for more interrupts.
850 * One second timer, used to tick the MII.
855 struct epic_softc
*sc
= arg
;
859 mii_tick(&sc
->sc_mii
);
862 callout_reset(&sc
->sc_mii_callout
, hz
, epic_tick
, sc
);
866 * Fixup the clock source on the EPIC.
869 epic_fixup_clock_source(struct epic_softc
*sc
)
874 * According to SMC Application Note 7-15, the EPIC's clock
875 * source is incorrect following a reset. This manifests itself
876 * as failure to recognize when host software has written to
877 * a register on the EPIC. The appnote recommends issuing at
878 * least 16 consecutive writes to the CLOCK TEST bit to correctly
879 * configure the clock source.
881 for (i
= 0; i
< 16; i
++)
882 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_TEST
,
887 * Perform a soft reset on the EPIC.
890 epic_reset(struct epic_softc
*sc
)
893 epic_fixup_clock_source(sc
);
895 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_GENCTL
, 0);
897 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_GENCTL
, GENCTL_SOFTRESET
);
900 epic_fixup_clock_source(sc
);
904 * Initialize the interface. Must be called at splnet().
907 epic_init(struct ifnet
*ifp
)
909 struct epic_softc
*sc
= ifp
->if_softc
;
910 bus_space_tag_t st
= sc
->sc_st
;
911 bus_space_handle_t sh
= sc
->sc_sh
;
912 const uint8_t *enaddr
= CLLADDR(ifp
->if_sadl
);
913 struct epic_txdesc
*txd
;
914 struct epic_descsoft
*ds
;
915 uint32_t genctl
, reg0
;
919 * Cancel any pending I/O.
924 * Reset the EPIC to a known state.
929 * Magical mystery initialization.
931 bus_space_write_4(st
, sh
, EPIC_TXTEST
, 0);
934 * Initialize the EPIC genctl register:
936 * - 64 byte receive FIFO threshold
937 * - automatic advance to next receive frame
939 genctl
= GENCTL_RX_FIFO_THRESH0
| GENCTL_ONECOPY
;
940 #if BYTE_ORDER == BIG_ENDIAN
941 genctl
|= GENCTL_BIG_ENDIAN
;
943 bus_space_write_4(st
, sh
, EPIC_GENCTL
, genctl
);
946 * Reset the MII bus and PHY.
948 reg0
= bus_space_read_4(st
, sh
, EPIC_NVCTL
);
949 bus_space_write_4(st
, sh
, EPIC_NVCTL
, reg0
| NVCTL_GPIO1
| NVCTL_GPOE1
);
950 bus_space_write_4(st
, sh
, EPIC_MIICFG
, MIICFG_ENASER
);
951 bus_space_write_4(st
, sh
, EPIC_GENCTL
, genctl
| GENCTL_RESET_PHY
);
953 bus_space_write_4(st
, sh
, EPIC_GENCTL
, genctl
);
955 bus_space_write_4(st
, sh
, EPIC_NVCTL
, reg0
);
958 * Initialize Ethernet address.
960 reg0
= enaddr
[1] << 8 | enaddr
[0];
961 bus_space_write_4(st
, sh
, EPIC_LAN0
, reg0
);
962 reg0
= enaddr
[3] << 8 | enaddr
[2];
963 bus_space_write_4(st
, sh
, EPIC_LAN1
, reg0
);
964 reg0
= enaddr
[5] << 8 | enaddr
[4];
965 bus_space_write_4(st
, sh
, EPIC_LAN2
, reg0
);
968 * Initialize receive control. Remember the external buffer
971 reg0
= bus_space_read_4(st
, sh
, EPIC_RXCON
) &
972 (RXCON_EXTBUFSIZESEL1
| RXCON_EXTBUFSIZESEL0
);
973 reg0
|= (RXCON_RXMULTICAST
| RXCON_RXBROADCAST
);
974 if (ifp
->if_flags
& IFF_PROMISC
)
975 reg0
|= RXCON_PROMISCMODE
;
976 bus_space_write_4(st
, sh
, EPIC_RXCON
, reg0
);
978 /* Set the current media. */
979 if ((error
= epic_mediachange(ifp
)) != 0)
982 /* Set up the multicast hash table. */
986 * Initialize the transmit descriptor ring. txlast is initialized
987 * to the end of the list so that it will wrap around to the first
988 * descriptor when the first packet is transmitted.
990 for (i
= 0; i
< EPIC_NTXDESC
; i
++) {
991 txd
= EPIC_CDTX(sc
, i
);
992 memset(txd
, 0, sizeof(struct epic_txdesc
));
993 txd
->et_bufaddr
= EPIC_CDFLADDR(sc
, i
);
994 txd
->et_nextdesc
= EPIC_CDTXADDR(sc
, EPIC_NEXTTX(i
));
995 EPIC_CDTXSYNC(sc
, i
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
997 sc
->sc_txpending
= 0;
999 sc
->sc_txlast
= EPIC_NTXDESC
- 1;
1002 * Initialize the receive descriptor ring.
1004 for (i
= 0; i
< EPIC_NRXDESC
; i
++) {
1005 ds
= EPIC_DSRX(sc
, i
);
1006 if (ds
->ds_mbuf
== NULL
) {
1007 if ((error
= epic_add_rxbuf(sc
, i
)) != 0) {
1008 printf("%s: unable to allocate or map rx "
1009 "buffer %d error = %d\n",
1010 device_xname(sc
->sc_dev
), i
, error
);
1012 * XXX Should attempt to run with fewer receive
1013 * XXX buffers instead of just failing.
1019 EPIC_INIT_RXDESC(sc
, i
);
1024 * Initialize the interrupt mask and enable interrupts.
1026 bus_space_write_4(st
, sh
, EPIC_INTMASK
, INTMASK
);
1027 bus_space_write_4(st
, sh
, EPIC_GENCTL
, genctl
| GENCTL_INTENA
);
1030 * Give the transmit and receive rings to the EPIC.
1032 bus_space_write_4(st
, sh
, EPIC_PTCDAR
,
1033 EPIC_CDTXADDR(sc
, EPIC_NEXTTX(sc
->sc_txlast
)));
1034 bus_space_write_4(st
, sh
, EPIC_PRCDAR
,
1035 EPIC_CDRXADDR(sc
, sc
->sc_rxptr
));
1038 * Set the EPIC in motion.
1040 bus_space_write_4(st
, sh
, EPIC_COMMAND
,
1041 COMMAND_RXQUEUED
| COMMAND_START_RX
);
1046 ifp
->if_flags
|= IFF_RUNNING
;
1047 ifp
->if_flags
&= ~IFF_OACTIVE
;
1050 * Start the one second clock.
1052 callout_reset(&sc
->sc_mii_callout
, hz
, epic_tick
, sc
);
1055 * Attempt to start output on the interface.
1061 printf("%s: interface not running\n", device_xname(sc
->sc_dev
));
1066 * Drain the receive queue.
1069 epic_rxdrain(struct epic_softc
*sc
)
1071 struct epic_descsoft
*ds
;
1074 for (i
= 0; i
< EPIC_NRXDESC
; i
++) {
1075 ds
= EPIC_DSRX(sc
, i
);
1076 if (ds
->ds_mbuf
!= NULL
) {
1077 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dmamap
);
1078 m_freem(ds
->ds_mbuf
);
1085 * Stop transmission on the interface.
1088 epic_stop(struct ifnet
*ifp
, int disable
)
1090 struct epic_softc
*sc
= ifp
->if_softc
;
1091 bus_space_tag_t st
= sc
->sc_st
;
1092 bus_space_handle_t sh
= sc
->sc_sh
;
1093 struct epic_descsoft
*ds
;
1098 * Stop the one second clock.
1100 callout_stop(&sc
->sc_mii_callout
);
1103 mii_down(&sc
->sc_mii
);
1106 epic_fixup_clock_source(sc
);
1109 * Disable interrupts.
1111 reg
= bus_space_read_4(st
, sh
, EPIC_GENCTL
);
1112 bus_space_write_4(st
, sh
, EPIC_GENCTL
, reg
& ~GENCTL_INTENA
);
1113 bus_space_write_4(st
, sh
, EPIC_INTMASK
, 0);
1116 * Stop the DMA engine and take the receiver off-line.
1118 bus_space_write_4(st
, sh
, EPIC_COMMAND
, COMMAND_STOP_RDMA
|
1119 COMMAND_STOP_TDMA
| COMMAND_STOP_RX
);
1122 * Release any queued transmit buffers.
1124 for (i
= 0; i
< EPIC_NTXDESC
; i
++) {
1125 ds
= EPIC_DSTX(sc
, i
);
1126 if (ds
->ds_mbuf
!= NULL
) {
1127 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dmamap
);
1128 m_freem(ds
->ds_mbuf
);
1134 * Mark the interface down and cancel the watchdog timer.
1136 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1144 * Read the EPIC Serial EEPROM.
1147 epic_read_eeprom(struct epic_softc
*sc
, int word
, int wordcnt
, uint16_t *data
)
1149 bus_space_tag_t st
= sc
->sc_st
;
1150 bus_space_handle_t sh
= sc
->sc_sh
;
1154 #define EEPROM_WAIT_READY(st, sh) \
1155 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1159 * Enable the EEPROM.
1161 bus_space_write_4(st
, sh
, EPIC_EECTL
, EECTL_ENABLE
);
1162 EEPROM_WAIT_READY(st
, sh
);
1164 for (i
= 0; i
< wordcnt
; i
++) {
1165 /* Send CHIP SELECT for one clock tick. */
1166 bus_space_write_4(st
, sh
, EPIC_EECTL
, EECTL_ENABLE
|EECTL_EECS
);
1167 EEPROM_WAIT_READY(st
, sh
);
1169 /* Shift in the READ opcode. */
1170 for (x
= 3; x
> 0; x
--) {
1171 reg
= EECTL_ENABLE
|EECTL_EECS
;
1172 if (EPIC_EEPROM_OPC_READ
& (1 << (x
- 1)))
1174 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
);
1175 EEPROM_WAIT_READY(st
, sh
);
1176 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
|EECTL_EESK
);
1177 EEPROM_WAIT_READY(st
, sh
);
1178 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
);
1179 EEPROM_WAIT_READY(st
, sh
);
1182 /* Shift in address. */
1183 for (x
= 6; x
> 0; x
--) {
1184 reg
= EECTL_ENABLE
|EECTL_EECS
;
1185 if ((word
+ i
) & (1 << (x
- 1)))
1187 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
);
1188 EEPROM_WAIT_READY(st
, sh
);
1189 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
|EECTL_EESK
);
1190 EEPROM_WAIT_READY(st
, sh
);
1191 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
);
1192 EEPROM_WAIT_READY(st
, sh
);
1195 /* Shift out data. */
1196 reg
= EECTL_ENABLE
|EECTL_EECS
;
1198 for (x
= 16; x
> 0; x
--) {
1199 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
|EECTL_EESK
);
1200 EEPROM_WAIT_READY(st
, sh
);
1201 if (bus_space_read_4(st
, sh
, EPIC_EECTL
) & EECTL_EEDO
)
1202 data
[i
] |= (1 << (x
- 1));
1203 bus_space_write_4(st
, sh
, EPIC_EECTL
, reg
);
1204 EEPROM_WAIT_READY(st
, sh
);
1207 /* Clear CHIP SELECT. */
1208 bus_space_write_4(st
, sh
, EPIC_EECTL
, EECTL_ENABLE
);
1209 EEPROM_WAIT_READY(st
, sh
);
1213 * Disable the EEPROM.
1215 bus_space_write_4(st
, sh
, EPIC_EECTL
, 0);
1217 #undef EEPROM_WAIT_READY
1221 * Add a receive buffer to the indicated descriptor.
1224 epic_add_rxbuf(struct epic_softc
*sc
, int idx
)
1226 struct epic_descsoft
*ds
= EPIC_DSRX(sc
, idx
);
1230 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1234 MCLGET(m
, M_DONTWAIT
);
1235 if ((m
->m_flags
& M_EXT
) == 0) {
1240 if (ds
->ds_mbuf
!= NULL
)
1241 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dmamap
);
1245 error
= bus_dmamap_load(sc
->sc_dmat
, ds
->ds_dmamap
,
1246 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
,
1247 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
1249 printf("%s: can't load rx DMA map %d, error = %d\n",
1250 device_xname(sc
->sc_dev
), idx
, error
);
1251 panic("%s", __func__
); /* XXX */
1254 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
1255 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1257 EPIC_INIT_RXDESC(sc
, idx
);
1263 * Set the EPIC multicast hash table.
1265 * NOTE: We rely on a recently-updated mii_media_active here!
1268 epic_set_mchash(struct epic_softc
*sc
)
1270 struct ethercom
*ec
= &sc
->sc_ethercom
;
1271 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1272 struct ether_multi
*enm
;
1273 struct ether_multistep step
;
1274 uint32_t hash
, mchash
[4];
1277 * Set up the multicast address filter by passing all multicast
1278 * addresses through a CRC generator, and then using the low-order
1279 * 6 bits as an index into the 64 bit multicast hash table (only
1280 * the lower 16 bits of each 32 bit multicast hash register are
1281 * valid). The high order bits select the register, while the
1282 * rest of the bits select the bit within the register.
1285 if (ifp
->if_flags
& IFF_PROMISC
)
1288 if (IFM_SUBTYPE(sc
->sc_mii
.mii_media_active
) == IFM_10_T
) {
1289 /* XXX hardware bug in 10Mbps mode. */
1293 mchash
[0] = mchash
[1] = mchash
[2] = mchash
[3] = 0;
1295 ETHER_FIRST_MULTI(step
, ec
, enm
);
1296 while (enm
!= NULL
) {
1297 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
1299 * We must listen to a range of multicast addresses.
1300 * For now, just accept all multicasts, rather than
1301 * trying to set only those filter bits needed to match
1302 * the range. (At this time, the only use of address
1303 * ranges is for IP multicast routing, for which the
1304 * range is big enough to require all bits set.)
1309 hash
= ether_crc32_be(enm
->enm_addrlo
, ETHER_ADDR_LEN
);
1312 /* Set the corresponding bit in the hash table. */
1313 mchash
[hash
>> 4] |= 1 << (hash
& 0xf);
1315 ETHER_NEXT_MULTI(step
, enm
);
1318 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1322 ifp
->if_flags
|= IFF_ALLMULTI
;
1323 mchash
[0] = mchash
[1] = mchash
[2] = mchash
[3] = 0xffff;
1326 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MC0
, mchash
[0]);
1327 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MC1
, mchash
[1]);
1328 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MC2
, mchash
[2]);
1329 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MC3
, mchash
[3]);
1333 * Wait for the MII to become ready.
1336 epic_mii_wait(struct epic_softc
*sc
, uint32_t rw
)
1340 for (i
= 0; i
< 50; i
++) {
1341 if ((bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MMCTL
) & rw
)
1347 printf("%s: MII timed out\n", device_xname(sc
->sc_dev
));
1355 * Read from the MII.
1358 epic_mii_read(device_t self
, int phy
, int reg
)
1360 struct epic_softc
*sc
= device_private(self
);
1362 if (epic_mii_wait(sc
, MMCTL_WRITE
))
1365 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MMCTL
,
1366 MMCTL_ARG(phy
, reg
, MMCTL_READ
));
1368 if (epic_mii_wait(sc
, MMCTL_READ
))
1371 return bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MMDATA
) &
1379 epic_mii_write(device_t self
, int phy
, int reg
, int val
)
1381 struct epic_softc
*sc
= device_private(self
);
1383 if (epic_mii_wait(sc
, MMCTL_WRITE
))
1386 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MMDATA
, val
);
1387 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MMCTL
,
1388 MMCTL_ARG(phy
, reg
, MMCTL_WRITE
));
1392 * Callback from PHY when media changes.
1395 epic_statchg(device_t self
)
1397 struct epic_softc
*sc
= device_private(self
);
1398 uint32_t txcon
, miicfg
;
1401 * Update loopback bits in TXCON to reflect duplex mode.
1403 txcon
= bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_TXCON
);
1404 if (sc
->sc_mii
.mii_media_active
& IFM_FDX
)
1405 txcon
|= (TXCON_LOOPBACK_D1
|TXCON_LOOPBACK_D2
);
1407 txcon
&= ~(TXCON_LOOPBACK_D1
|TXCON_LOOPBACK_D2
);
1408 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_TXCON
, txcon
);
1410 /* On some cards we need manualy set fullduplex led */
1411 if (sc
->sc_hwflags
& EPIC_DUPLEXLED_ON_694
) {
1412 miicfg
= bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MIICFG
);
1413 if (IFM_OPTIONS(sc
->sc_mii
.mii_media_active
) & IFM_FDX
)
1414 miicfg
|= MIICFG_ENABLE
;
1416 miicfg
&= ~MIICFG_ENABLE
;
1417 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MIICFG
, miicfg
);
1421 * There is a multicast filter bug in 10Mbps mode. Kick the
1422 * multicast filter in case the speed changed.
1424 epic_set_mchash(sc
);
1428 * Callback from ifmedia to request new media setting.
1430 * XXX Looks to me like some of this complexity should move into
1431 * XXX one or two custom PHY drivers. --dyoung
1434 epic_mediachange(struct ifnet
*ifp
)
1436 struct epic_softc
*sc
= ifp
->if_softc
;
1437 struct mii_data
*mii
= &sc
->sc_mii
;
1438 struct ifmedia
*ifm
= &mii
->mii_media
;
1439 int media
= ifm
->ifm_cur
->ifm_media
;
1441 struct mii_softc
*miisc
;
1444 if ((ifp
->if_flags
& IFF_UP
) == 0)
1447 if (IFM_INST(media
) != sc
->sc_serinst
) {
1448 /* If we're not selecting serial interface, select MII mode */
1449 #ifdef EPICMEDIADEBUG
1450 printf("%s: parallel mode\n", ifp
->if_xname
);
1452 miicfg
= bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MIICFG
);
1453 miicfg
&= ~MIICFG_SERMODEENA
;
1454 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MIICFG
, miicfg
);
1457 if ((rc
= mii_mediachg(mii
)) == ENXIO
)
1460 if (IFM_INST(media
) == sc
->sc_serinst
) {
1461 /* select serial interface */
1462 #ifdef EPICMEDIADEBUG
1463 printf("%s: serial mode\n", ifp
->if_xname
);
1465 miicfg
= bus_space_read_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MIICFG
);
1466 miicfg
|= (MIICFG_SERMODEENA
| MIICFG_ENABLE
);
1467 bus_space_write_4(sc
->sc_st
, sc
->sc_sh
, EPIC_MIICFG
, miicfg
);
1469 /* There is no driver to fill this */
1470 mii
->mii_media_active
= media
;
1471 mii
->mii_media_status
= 0;
1473 epic_statchg(sc
->sc_dev
);
1477 /* Lookup selected PHY */
1478 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
) {
1479 if (IFM_INST(media
) == miisc
->mii_inst
)
1483 printf("%s: can't happen\n", __func__
); /* ??? panic */
1486 #ifdef EPICMEDIADEBUG
1487 printf("%s: using phy %s\n", ifp
->if_xname
,
1488 device_xname(miisc
->mii_dev
));
1491 if (miisc
->mii_flags
& MIIF_HAVEFIBER
) {
1492 /* XXX XXX assume it's a Level1 - should check */
1494 /* We have to powerup fiber transceivers */
1495 cfg
= PHY_READ(miisc
, MII_LXTPHY_CONFIG
);
1496 if (IFM_SUBTYPE(media
) == IFM_100_FX
) {
1497 #ifdef EPICMEDIADEBUG
1498 printf("%s: power up fiber\n", ifp
->if_xname
);
1500 cfg
|= (CONFIG_LEDC1
| CONFIG_LEDC0
);
1502 #ifdef EPICMEDIADEBUG
1503 printf("%s: power down fiber\n", ifp
->if_xname
);
1505 cfg
&= ~(CONFIG_LEDC1
| CONFIG_LEDC0
);
1507 PHY_WRITE(miisc
, MII_LXTPHY_CONFIG
, cfg
);