1 /* $NetBSD: if_admsw.c,v 1.4 2008/02/07 01:21:52 dyoung Exp $ */
4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
7 * Redistribution and use in source and binary forms, with or
8 * without modification, are permitted provided that the following
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 * 3. The names of the authors may not be used to endorse or promote
17 * products derived from this software without specific prior
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
34 * Copyright (c) 2001 Wasabi Systems, Inc.
35 * All rights reserved.
37 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed for the NetBSD Project by
50 * Wasabi Systems, Inc.
51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
52 * or promote products derived from this software without specific prior
55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
74 * Better Rx buffer management; we want to get new Rx buffers
75 * to the chip more quickly than we currently do.
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.4 2008/02/07 01:21:52 dyoung Exp $");
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
95 #include <prop/proplib.h>
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
100 #include <net/if_dl.h>
101 #include <net/if_media.h>
102 #include <net/if_ether.h>
108 #include <machine/bus.h>
109 #include <machine/intr.h>
110 #include <machine/endian.h>
112 #include <dev/mii/mii.h>
113 #include <dev/mii/miivar.h>
115 #include <sys/gpio.h>
116 #include <dev/gpio/gpiovar.h>
118 #include <mips/adm5120/include/adm5120reg.h>
119 #include <mips/adm5120/include/adm5120var.h>
120 #include <mips/adm5120/include/adm5120_obiovar.h>
121 #include <mips/adm5120/dev/if_admswreg.h>
122 #include <mips/adm5120/dev/if_admswvar.h>
124 static uint8_t vlan_matrix
[SW_DEVS
] = {
125 (1 << 6) | (1 << 0), /* CPU + port0 */
126 (1 << 6) | (1 << 1), /* CPU + port1 */
127 (1 << 6) | (1 << 2), /* CPU + port2 */
128 (1 << 6) | (1 << 3), /* CPU + port3 */
129 (1 << 6) | (1 << 4), /* CPU + port4 */
130 (1 << 6) | (1 << 5), /* CPU + port5 */
133 #ifdef ADMSW_EVENT_COUNTERS
134 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++
136 #define ADMSW_EVCNT_INCR(ev) /* nothing */
139 static void admsw_start(struct ifnet
*);
140 static void admsw_watchdog(struct ifnet
*);
141 static int admsw_ioctl(struct ifnet
*, u_long
, void *);
142 static int admsw_init(struct ifnet
*);
143 static void admsw_stop(struct ifnet
*, int);
145 static void admsw_shutdown(void *);
147 static void admsw_reset(struct admsw_softc
*);
148 static void admsw_set_filter(struct admsw_softc
*);
150 static int admsw_intr(void *);
151 static void admsw_txintr(struct admsw_softc
*, int);
152 static void admsw_rxintr(struct admsw_softc
*, int);
153 static int admsw_add_rxbuf(struct admsw_softc
*, int, int);
154 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1)
155 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0)
157 static int admsw_mediachange(struct ifnet
*);
158 static void admsw_mediastatus(struct ifnet
*, struct ifmediareq
*);
160 static int admsw_match(struct device
*, struct cfdata
*, void *);
161 static void admsw_attach(struct device
*, struct device
*, void *);
163 CFATTACH_DECL(admsw
, sizeof(struct admsw_softc
),
164 admsw_match
, admsw_attach
, NULL
, NULL
);
167 admsw_match(struct device
*parent
, struct cfdata
*cf
, void *aux
)
169 struct obio_attach_args
*aa
= aux
;
171 return strcmp(aa
->oba_name
, cf
->cf_name
) == 0;
174 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o))
175 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v))
179 admsw_init_bufs(struct admsw_softc
*sc
)
182 struct admsw_desc
*desc
;
184 for (i
= 0; i
< ADMSW_NTXHDESC
; i
++) {
185 if (sc
->sc_txhsoft
[i
].ds_mbuf
!= NULL
) {
186 m_freem(sc
->sc_txhsoft
[i
].ds_mbuf
);
187 sc
->sc_txhsoft
[i
].ds_mbuf
= NULL
;
189 desc
= &sc
->sc_txhdescs
[i
];
192 desc
->len
= MAC_BUFLEN
;
194 ADMSW_CDTXHSYNC(sc
, i
,
195 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
197 sc
->sc_txhdescs
[ADMSW_NTXHDESC
- 1].data
|= ADM5120_DMA_RINGEND
;
198 ADMSW_CDTXHSYNC(sc
, ADMSW_NTXHDESC
- 1,
199 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
201 for (i
= 0; i
< ADMSW_NRXHDESC
; i
++) {
202 if (sc
->sc_rxhsoft
[i
].ds_mbuf
== NULL
) {
203 if (admsw_add_rxhbuf(sc
, i
) != 0)
204 panic("admsw_init_bufs\n");
206 ADMSW_INIT_RXHDESC(sc
, i
);
209 for (i
= 0; i
< ADMSW_NTXLDESC
; i
++) {
210 if (sc
->sc_txlsoft
[i
].ds_mbuf
!= NULL
) {
211 m_freem(sc
->sc_txlsoft
[i
].ds_mbuf
);
212 sc
->sc_txlsoft
[i
].ds_mbuf
= NULL
;
214 desc
= &sc
->sc_txldescs
[i
];
217 desc
->len
= MAC_BUFLEN
;
219 ADMSW_CDTXLSYNC(sc
, i
,
220 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
222 sc
->sc_txldescs
[ADMSW_NTXLDESC
- 1].data
|= ADM5120_DMA_RINGEND
;
223 ADMSW_CDTXLSYNC(sc
, ADMSW_NTXLDESC
- 1,
224 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
226 for (i
= 0; i
< ADMSW_NRXLDESC
; i
++) {
227 if (sc
->sc_rxlsoft
[i
].ds_mbuf
== NULL
) {
228 if (admsw_add_rxlbuf(sc
, i
) != 0)
229 panic("admsw_init_bufs\n");
231 ADMSW_INIT_RXLDESC(sc
, i
);
234 REG_WRITE(SEND_HBADDR_REG
, ADMSW_CDTXHADDR(sc
, 0));
235 REG_WRITE(SEND_LBADDR_REG
, ADMSW_CDTXLADDR(sc
, 0));
236 REG_WRITE(RECV_HBADDR_REG
, ADMSW_CDRXHADDR(sc
, 0));
237 REG_WRITE(RECV_LBADDR_REG
, ADMSW_CDRXLADDR(sc
, 0));
239 sc
->sc_txfree
= ADMSW_NTXLDESC
;
246 admsw_setvlan(struct admsw_softc
*sc
, char matrix
[6])
250 i
= matrix
[0] + (matrix
[1] << 8) + (matrix
[2] << 16) + (matrix
[3] << 24);
251 REG_WRITE(VLAN_G1_REG
, i
);
252 i
= matrix
[4] + (matrix
[5] << 8);
253 REG_WRITE(VLAN_G2_REG
, i
);
257 admsw_reset(struct admsw_softc
*sc
)
262 REG_WRITE(PORT_CONF0_REG
,
263 REG_READ(PORT_CONF0_REG
) | PORT_CONF0_DP_MASK
);
264 REG_WRITE(CPUP_CONF_REG
,
265 REG_READ(CPUP_CONF_REG
) | CPUP_CONF_DCPUP
);
267 /* Wait for DMA to complete. Overkill. In 3ms, we can
268 * send at least two entire 1500-byte packets at 10 Mb/s.
272 /* The datasheet recommends that we move all PHYs to reset
273 * state prior to software reset.
275 REG_WRITE(PHY_CNTL2_REG
,
276 REG_READ(PHY_CNTL2_REG
) & ~PHY_CNTL2_PHYR_MASK
);
278 /* Reset the switch. */
279 REG_WRITE(ADMSW_SW_RES
, 0x1);
283 REG_WRITE(ADMSW_BOOT_DONE
, ADMSW_BOOT_DONE_BO
);
286 REG_WRITE(CPUP_CONF_REG
,
287 CPUP_CONF_DCPUP
| CPUP_CONF_CRCP
| CPUP_CONF_DUNP_MASK
|
288 CPUP_CONF_DMCP_MASK
);
290 REG_WRITE(PORT_CONF0_REG
, PORT_CONF0_EMCP_MASK
| PORT_CONF0_EMBP_MASK
);
292 REG_WRITE(PHY_CNTL2_REG
,
293 REG_READ(PHY_CNTL2_REG
) | PHY_CNTL2_ANE_MASK
| PHY_CNTL2_PHYR_MASK
|
294 PHY_CNTL2_AMDIX_MASK
);
296 REG_WRITE(PHY_CNTL3_REG
, REG_READ(PHY_CNTL3_REG
) | PHY_CNTL3_RNT
);
298 REG_WRITE(ADMSW_INT_MASK
, INT_MASK
);
299 REG_WRITE(ADMSW_INT_ST
, INT_MASK
);
302 * While in DDB, we stop servicing interrupts, RX ring
303 * fills up and when free block counter falls behind FC
304 * threshold, the switch starts to emit 802.3x PAUSE
305 * frames. This can upset peer switches.
307 * Stop this from happening by disabling FC and D2
311 REG_READ(FC_TH_REG
) & ~(FC_TH_FCS_MASK
| FC_TH_D2S_MASK
));
313 admsw_setvlan(sc
, vlan_matrix
);
315 for (i
= 0; i
< SW_DEVS
; i
++) {
316 REG_WRITE(MAC_WT1_REG
,
318 (sc
->sc_enaddr
[3]<<8) |
319 (sc
->sc_enaddr
[4]<<16) |
320 ((sc
->sc_enaddr
[5]+i
)<<24));
321 REG_WRITE(MAC_WT0_REG
, (i
<<MAC_WT0_VLANID_SHIFT
) |
322 (sc
->sc_enaddr
[0]<<16) | (sc
->sc_enaddr
[1]<<24) |
323 MAC_WT0_WRITE
| MAC_WT0_VLANID_EN
);
325 while (!(REG_READ(MAC_WT0_REG
) & MAC_WT0_WRITE_DONE
));
327 wdog1
= REG_READ(ADM5120_WDOG1
);
328 REG_WRITE(ADM5120_WDOG1
, wdog1
& ~ADM5120_WDOG1_WDE
);
332 admsw_attach(struct device
*parent
, struct device
*self
, void *aux
)
334 uint8_t enaddr
[ETHER_ADDR_LEN
];
335 struct admsw_softc
*sc
= (void *) self
;
336 struct obio_attach_args
*aa
= aux
;
338 bus_dma_segment_t seg
;
342 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS
);
344 sc
->sc_dmat
= aa
->oba_dt
;
345 sc
->sc_st
= aa
->oba_st
;
347 pd
= prop_dictionary_get(device_properties(&sc
->sc_dev
), "mac-addr");
357 memcpy(enaddr
, prop_data_data_nocopy(pd
), sizeof(enaddr
));
359 memcpy(sc
->sc_enaddr
, enaddr
, sizeof(sc
->sc_enaddr
));
361 printf("%s: base Ethernet address %s\n", sc
->sc_dev
.dv_xname
,
362 ether_sprintf(enaddr
));
364 /* Map the device. */
365 if (bus_space_map(sc
->sc_st
, aa
->oba_addr
, 512, 0, &sc
->sc_ioh
) != 0) {
366 printf("%s: unable to map device\n", device_xname(&sc
->sc_dev
));
370 /* Hook up the interrupt handler. */
371 sc
->sc_ih
= adm5120_intr_establish(aa
->oba_irq
, INTR_IRQ
, admsw_intr
, sc
);
373 if (sc
->sc_ih
== NULL
) {
374 printf("%s: unable to register interrupt handler\n",
375 sc
->sc_dev
.dv_xname
);
380 * Allocate the control data structures, and create and load the
383 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
,
384 sizeof(struct admsw_control_data
), PAGE_SIZE
, 0, &seg
, 1, &rseg
,
386 printf("%s: unable to allocate control data, error = %d\n",
387 sc
->sc_dev
.dv_xname
, error
);
390 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &seg
, rseg
,
391 sizeof(struct admsw_control_data
), (void *)&sc
->sc_control_data
,
393 printf("%s: unable to map control data, error = %d\n",
394 sc
->sc_dev
.dv_xname
, error
);
397 if ((error
= bus_dmamap_create(sc
->sc_dmat
,
398 sizeof(struct admsw_control_data
), 1,
399 sizeof(struct admsw_control_data
), 0, 0, &sc
->sc_cddmamap
)) != 0) {
400 printf("%s: unable to create control data DMA map, "
401 "error = %d\n", sc
->sc_dev
.dv_xname
, error
);
404 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cddmamap
,
405 sc
->sc_control_data
, sizeof(struct admsw_control_data
), NULL
,
407 printf("%s: unable to load control data DMA map, error = %d\n",
408 sc
->sc_dev
.dv_xname
, error
);
413 * Create the transmit buffer DMA maps.
415 for (i
= 0; i
< ADMSW_NTXHDESC
; i
++) {
416 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
418 &sc
->sc_txhsoft
[i
].ds_dmamap
)) != 0) {
419 printf("%s: unable to create txh DMA map %d, "
420 "error = %d\n", sc
->sc_dev
.dv_xname
, i
, error
);
423 sc
->sc_txhsoft
[i
].ds_mbuf
= NULL
;
425 for (i
= 0; i
< ADMSW_NTXLDESC
; i
++) {
426 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
428 &sc
->sc_txlsoft
[i
].ds_dmamap
)) != 0) {
429 printf("%s: unable to create txl DMA map %d, "
430 "error = %d\n", sc
->sc_dev
.dv_xname
, i
, error
);
433 sc
->sc_txlsoft
[i
].ds_mbuf
= NULL
;
437 * Create the receive buffer DMA maps.
439 for (i
= 0; i
< ADMSW_NRXHDESC
; i
++) {
440 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
441 MCLBYTES
, 0, 0, &sc
->sc_rxhsoft
[i
].ds_dmamap
)) != 0) {
442 printf("%s: unable to create rxh DMA map %d, "
443 "error = %d\n", sc
->sc_dev
.dv_xname
, i
, error
);
446 sc
->sc_rxhsoft
[i
].ds_mbuf
= NULL
;
448 for (i
= 0; i
< ADMSW_NRXLDESC
; i
++) {
449 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
450 MCLBYTES
, 0, 0, &sc
->sc_rxlsoft
[i
].ds_dmamap
)) != 0) {
451 printf("%s: unable to create rxl DMA map %d, "
452 "error = %d\n", sc
->sc_dev
.dv_xname
, i
, error
);
455 sc
->sc_rxlsoft
[i
].ds_mbuf
= NULL
;
462 for (i
= 0; i
< SW_DEVS
; i
++) {
463 ifmedia_init(&sc
->sc_ifmedia
[i
], 0, admsw_mediachange
, admsw_mediastatus
);
464 ifmedia_add(&sc
->sc_ifmedia
[i
], IFM_ETHER
|IFM_10_T
, 0, NULL
);
465 ifmedia_add(&sc
->sc_ifmedia
[i
], IFM_ETHER
|IFM_10_T
|IFM_FDX
, 0, NULL
);
466 ifmedia_add(&sc
->sc_ifmedia
[i
], IFM_ETHER
|IFM_100_TX
, 0, NULL
);
467 ifmedia_add(&sc
->sc_ifmedia
[i
], IFM_ETHER
|IFM_100_TX
|IFM_FDX
, 0, NULL
);
468 ifmedia_add(&sc
->sc_ifmedia
[i
], IFM_ETHER
|IFM_AUTO
, 0, NULL
);
469 ifmedia_set(&sc
->sc_ifmedia
[i
], IFM_ETHER
|IFM_AUTO
);
471 ifp
= &sc
->sc_ethercom
[i
].ec_if
;
472 strcpy(ifp
->if_xname
, sc
->sc_dev
.dv_xname
);
473 ifp
->if_xname
[5] += i
;
475 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
476 ifp
->if_ioctl
= admsw_ioctl
;
477 ifp
->if_start
= admsw_start
;
478 ifp
->if_watchdog
= admsw_watchdog
;
479 ifp
->if_init
= admsw_init
;
480 ifp
->if_stop
= admsw_stop
;
481 ifp
->if_capabilities
|= IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
;
482 IFQ_SET_MAXLEN(&ifp
->if_snd
, max(ADMSW_NTXLDESC
, IFQ_MAXLEN
));
483 IFQ_SET_READY(&ifp
->if_snd
);
485 /* Attach the interface. */
487 ether_ifattach(ifp
, enaddr
);
491 #ifdef ADMSW_EVENT_COUNTERS
492 evcnt_attach_dynamic(&sc
->sc_ev_txstall
, EVCNT_TYPE_MISC
,
493 NULL
, sc
->sc_dev
.dv_xname
, "txstall");
494 evcnt_attach_dynamic(&sc
->sc_ev_rxstall
, EVCNT_TYPE_MISC
,
495 NULL
, sc
->sc_dev
.dv_xname
, "rxstall");
496 evcnt_attach_dynamic(&sc
->sc_ev_txintr
, EVCNT_TYPE_MISC
,
497 NULL
, sc
->sc_dev
.dv_xname
, "txintr");
498 evcnt_attach_dynamic(&sc
->sc_ev_rxintr
, EVCNT_TYPE_MISC
,
499 NULL
, sc
->sc_dev
.dv_xname
, "rxintr");
501 evcnt_attach_dynamic(&sc
->sc_ev_rxsync
, EVCNT_TYPE_MISC
,
502 NULL
, sc
->sc_dev
.dv_xname
, "rxsync");
508 /* Make sure the interface is shutdown during reboot. */
509 sc
->sc_sdhook
= shutdownhook_establish(admsw_shutdown
, sc
);
510 if (sc
->sc_sdhook
== NULL
)
511 printf("%s: WARNING: unable to establish shutdown hook\n",
512 sc
->sc_dev
.dv_xname
);
514 /* leave interrupts and cpu port disabled */
522 * Make sure the interface is stopped at reboot time.
525 admsw_shutdown(void *arg
)
527 struct admsw_softc
*sc
= arg
;
530 for (i
= 0; i
< SW_DEVS
; i
++)
531 admsw_stop(&sc
->sc_ethercom
[i
].ec_if
, 1);
535 * admsw_start: [ifnet interface function]
537 * Start packet transmission on the interface.
540 admsw_start(struct ifnet
*ifp
)
542 struct admsw_softc
*sc
= ifp
->if_softc
;
544 struct admsw_descsoft
*ds
;
545 struct admsw_desc
*desc
;
547 struct ether_header
*eh
;
548 int error
, nexttx
, len
, i
;
552 * Loop through the send queues, setting up transmit descriptors
553 * unitl we drain the queues, or use up all available transmit
562 ifp
= &sc
->sc_ethercom
[i
].ec_if
;
563 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) ==
565 /* Grab a packet off the queue. */
566 IFQ_POLL(&ifp
->if_snd
, m0
);
579 /* Get a spare descriptor. */
580 if (sc
->sc_txfree
== 0) {
581 /* No more slots left; notify upper layer. */
582 ifp
->if_flags
|= IFF_OACTIVE
;
583 ADMSW_EVCNT_INCR(&sc
->sc_ev_txstall
);
586 nexttx
= sc
->sc_txnext
;
587 desc
= &sc
->sc_txldescs
[nexttx
];
588 ds
= &sc
->sc_txlsoft
[nexttx
];
589 dmamap
= ds
->ds_dmamap
;
592 * Load the DMA map. If this fails, the packet either
593 * didn't fit in the alloted number of segments, or we
594 * were short on resources. In this case, we'll copy
597 if (m0
->m_pkthdr
.len
< ETHER_MIN_LEN
||
598 bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
599 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
) != 0) {
600 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
602 printf("%s: unable to allocate Tx mbuf\n",
603 sc
->sc_dev
.dv_xname
);
606 if (m0
->m_pkthdr
.len
> MHLEN
) {
607 MCLGET(m
, M_DONTWAIT
);
608 if ((m
->m_flags
& M_EXT
) == 0) {
609 printf("%s: unable to allocate Tx "
610 "cluster\n", sc
->sc_dev
.dv_xname
);
615 m
->m_pkthdr
.csum_flags
= m0
->m_pkthdr
.csum_flags
;
616 m_copydata(m0
, 0, m0
->m_pkthdr
.len
, mtod(m
, void *));
617 m
->m_pkthdr
.len
= m
->m_len
= m0
->m_pkthdr
.len
;
618 if (m
->m_pkthdr
.len
< ETHER_MIN_LEN
) {
619 if (M_TRAILINGSPACE(m
) < ETHER_MIN_LEN
- m
->m_pkthdr
.len
)
620 panic("admsw_start: M_TRAILINGSPACE\n");
621 memset(mtod(m
, uint8_t *) + m
->m_pkthdr
.len
, 0,
622 ETHER_MIN_LEN
- ETHER_CRC_LEN
- m
->m_pkthdr
.len
);
623 m
->m_pkthdr
.len
= m
->m_len
= ETHER_MIN_LEN
;
625 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
,
626 m
, BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
628 printf("%s: unable to load Tx buffer, "
629 "error = %d\n", sc
->sc_dev
.dv_xname
, error
);
634 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
641 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
644 /* Sync the DMA map. */
645 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
646 BUS_DMASYNC_PREWRITE
);
648 if (dmamap
->dm_nsegs
!= 1 && dmamap
->dm_nsegs
!= 2)
649 panic("admsw_start: dm_nsegs == %d\n", dmamap
->dm_nsegs
);
650 desc
->data
= dmamap
->dm_segs
[0].ds_addr
;
651 desc
->len
= len
= dmamap
->dm_segs
[0].ds_len
;
652 if (dmamap
->dm_nsegs
> 1) {
653 len
+= dmamap
->dm_segs
[1].ds_len
;
654 desc
->cntl
= dmamap
->dm_segs
[1].ds_addr
| ADM5120_DMA_BUF2ENABLE
;
657 desc
->status
= (len
<< ADM5120_DMA_LENSHIFT
) | (1 << vlan
);
658 eh
= mtod(m0
, struct ether_header
*);
659 if (ntohs(eh
->ether_type
) == ETHERTYPE_IP
&&
660 m0
->m_pkthdr
.csum_flags
& M_CSUM_IPv4
)
661 desc
->status
|= ADM5120_DMA_CSUM
;
662 if (nexttx
== ADMSW_NTXLDESC
- 1)
663 desc
->data
|= ADM5120_DMA_RINGEND
;
664 desc
->data
|= ADM5120_DMA_OWN
;
666 /* Sync the descriptor. */
667 ADMSW_CDTXLSYNC(sc
, nexttx
,
668 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
670 REG_WRITE(SEND_TRIG_REG
, 1);
671 /* printf("send slot %d\n",nexttx); */
674 * Store a pointer to the packet so we can free it later.
678 /* Advance the Tx pointer. */
680 sc
->sc_txnext
= ADMSW_NEXTTXL(nexttx
);
683 /* Pass the packet to any BPF listeners. */
685 bpf_mtap(ifp
->if_bpf
, m0
);
686 #endif /* NBPFILTER */
688 /* Set a watchdog timer in case the chip flakes out. */
689 sc
->sc_ethercom
[0].ec_if
.if_timer
= 5;
694 * admsw_watchdog: [ifnet interface function]
696 * Watchdog timer handler.
699 admsw_watchdog(struct ifnet
*ifp
)
701 struct admsw_softc
*sc
= ifp
->if_softc
;
705 /* Check if an interrupt was lost. */
706 if (sc
->sc_txfree
== ADMSW_NTXLDESC
) {
707 printf("%s: watchdog false alarm\n", sc
->sc_dev
.dv_xname
);
710 if (sc
->sc_ethercom
[0].ec_if
.if_timer
!= 0)
711 printf("%s: watchdog timer is %d!\n", sc
->sc_dev
.dv_xname
, sc
->sc_ethercom
[0].ec_if
.if_timer
);
713 if (sc
->sc_txfree
== ADMSW_NTXLDESC
) {
714 printf("%s: tx IRQ lost (queue empty)\n", sc
->sc_dev
.dv_xname
);
717 if (sc
->sc_ethercom
[0].ec_if
.if_timer
!= 0) {
718 printf("%s: tx IRQ lost (timer recharged)\n", sc
->sc_dev
.dv_xname
);
723 printf("%s: device timeout, txfree = %d\n", sc
->sc_dev
.dv_xname
, sc
->sc_txfree
);
724 for (vlan
= 0; vlan
< SW_DEVS
; vlan
++)
725 admsw_stop(&sc
->sc_ethercom
[vlan
].ec_if
, 0);
726 for (vlan
= 0; vlan
< SW_DEVS
; vlan
++)
727 (void) admsw_init(&sc
->sc_ethercom
[vlan
].ec_if
);
729 /* Try to get more packets going. */
734 * admsw_ioctl: [ifnet interface function]
736 * Handle control requests from the operator.
739 admsw_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
741 struct admsw_softc
*sc
= ifp
->if_softc
;
749 if ((error
= ether_ioctl(ifp
, cmd
, data
)) == ENETRESET
)
754 port
= (struct ethercom
*)ifp
- sc
->sc_ethercom
; /* XXX */
758 error
= ifmedia_ioctl(ifp
, (struct ifreq
*)data
,
759 &sc
->sc_ifmedia
[port
], cmd
);
764 ifd
= (struct ifdrv
*) data
;
765 if (ifd
->ifd_cmd
!= 0 || ifd
->ifd_len
!= sizeof(vlan_matrix
)) {
769 if (cmd
== SIOCGDRVSPEC
) {
770 error
= copyout(vlan_matrix
, ifd
->ifd_data
,
771 sizeof(vlan_matrix
));
773 error
= copyin(ifd
->ifd_data
, vlan_matrix
,
774 sizeof(vlan_matrix
));
775 admsw_setvlan(sc
, vlan_matrix
);
780 error
= ether_ioctl(ifp
, cmd
, data
);
781 if (error
== ENETRESET
) {
783 * Multicast list has changed; set the hardware filter
786 admsw_set_filter(sc
);
792 /* Try to get more packets going. */
803 * Interrupt service routine.
806 admsw_intr(void *arg
)
808 struct admsw_softc
*sc
= arg
;
812 pending
= REG_READ(ADMSW_INT_ST
);
814 if ((pending
& ~(ADMSW_INTR_RHD
|ADMSW_INTR_RLD
|ADMSW_INTR_SHD
|ADMSW_INTR_SLD
|ADMSW_INTR_W1TE
|ADMSW_INTR_W0TE
)) != 0) {
815 snprintb(buf
, sizeof(buf
), ADMSW_INT_FMT
, pending
);
816 printf("%s: pending=%s\n", __func__
, buf
);
818 REG_WRITE(ADMSW_INT_ST
, pending
);
823 if ((pending
& ADMSW_INTR_RHD
) != 0)
826 if ((pending
& ADMSW_INTR_RLD
) != 0)
829 if ((pending
& ADMSW_INTR_SHD
) != 0)
832 if ((pending
& ADMSW_INTR_SLD
) != 0)
841 * Helper; handle transmit interrupts.
844 admsw_txintr(struct admsw_softc
*sc
, int prio
)
847 struct admsw_desc
*desc
;
848 struct admsw_descsoft
*ds
;
852 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
853 for (i
= sc
->sc_txdirty
; sc
->sc_txfree
!= ADMSW_NTXLDESC
;
854 i
= ADMSW_NEXTTXL(i
)) {
856 ADMSW_CDTXLSYNC(sc
, i
,
857 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
859 desc
= &sc
->sc_txldescs
[i
];
860 ds
= &sc
->sc_txlsoft
[i
];
861 if (desc
->data
& ADM5120_DMA_OWN
) {
862 ADMSW_CDTXLSYNC(sc
, i
,
863 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
867 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
,
868 0, ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
869 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dmamap
);
870 m_freem(ds
->ds_mbuf
);
873 vlan
= ffs(desc
->status
& 0x3f) - 1;
874 if (vlan
< 0 || vlan
>= SW_DEVS
)
875 panic("admsw_txintr: bad vlan\n");
876 ifp
= &sc
->sc_ethercom
[vlan
].ec_if
;
878 /* printf("clear tx slot %d\n",i); */
887 #ifdef ADMSW_EVENT_COUNTERS
888 ADMSW_EVCNT_INCR(&sc
->sc_ev_txintr
);
890 for (vlan
= 0; vlan
< SW_DEVS
; vlan
++)
891 sc
->sc_ethercom
[vlan
].ec_if
.if_flags
&= ~IFF_OACTIVE
;
893 ifp
= &sc
->sc_ethercom
[0].ec_if
;
895 /* Try to queue more packets. */
899 * If there are no more pending transmissions,
900 * cancel the watchdog timer.
902 if (sc
->sc_txfree
== ADMSW_NTXLDESC
)
907 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
913 * Helper; handle receive interrupts.
916 admsw_rxintr(struct admsw_softc
*sc
, int high
)
919 struct admsw_descsoft
*ds
;
922 int i
, len
, port
, vlan
;
924 /* printf("rxintr\n"); */
926 panic("admsw_rxintr: high priority packet\n");
928 #ifdef ADMSW_EVENT_COUNTERS
933 ADMSW_CDRXLSYNC(sc
, sc
->sc_rxptr
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
934 if ((sc
->sc_rxldescs
[sc
->sc_rxptr
].data
& ADM5120_DMA_OWN
) == 0)
935 ADMSW_CDRXLSYNC(sc
, sc
->sc_rxptr
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
939 ADMSW_CDRXLSYNC(sc
, i
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
940 i
= ADMSW_NEXTRXL(i
);
941 /* the ring is empty, just return. */
942 if (i
== sc
->sc_rxptr
)
944 ADMSW_CDRXLSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
945 } while (sc
->sc_rxldescs
[i
].data
& ADM5120_DMA_OWN
);
946 ADMSW_CDRXLSYNC(sc
, i
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
948 ADMSW_CDRXLSYNC(sc
, sc
->sc_rxptr
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
949 if ((sc
->sc_rxldescs
[sc
->sc_rxptr
].data
& ADM5120_DMA_OWN
) == 0)
950 ADMSW_CDRXLSYNC(sc
, sc
->sc_rxptr
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
952 ADMSW_CDRXLSYNC(sc
, sc
->sc_rxptr
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
953 /* We've fallen behind the chip: catch it. */
954 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n",
955 sc
->sc_dev
.dv_xname
, REG_READ(RECV_LBADDR_REG
),
956 REG_READ(RECV_LWADDR_REG
), sc
->sc_rxptr
, i
);
958 ADMSW_EVCNT_INCR(&sc
->sc_ev_rxsync
);
962 for (i
= sc
->sc_rxptr
;; i
= ADMSW_NEXTRXL(i
)) {
963 ds
= &sc
->sc_rxlsoft
[i
];
965 ADMSW_CDRXLSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
967 if (sc
->sc_rxldescs
[i
].data
& ADM5120_DMA_OWN
) {
968 ADMSW_CDRXLSYNC(sc
, i
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
972 /* printf("process slot %d\n",i); */
974 #ifdef ADMSW_EVENT_COUNTERS
978 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
979 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
981 stat
= sc
->sc_rxldescs
[i
].status
;
982 len
= (stat
& ADM5120_DMA_LEN
) >> ADM5120_DMA_LENSHIFT
;
983 len
-= ETHER_CRC_LEN
;
984 port
= (stat
& ADM5120_DMA_PORTID
) >> ADM5120_DMA_PORTSHIFT
;
985 for (vlan
= 0; vlan
< SW_DEVS
; vlan
++)
986 if ((1 << port
) & vlan_matrix
[vlan
])
990 ifp
= &sc
->sc_ethercom
[vlan
].ec_if
;
993 if (admsw_add_rxlbuf(sc
, i
) != 0) {
995 ADMSW_INIT_RXLDESC(sc
, i
);
996 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
997 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1001 m
->m_pkthdr
.rcvif
= ifp
;
1002 m
->m_pkthdr
.len
= m
->m_len
= len
;
1003 if ((stat
& ADM5120_DMA_TYPE
) == ADM5120_DMA_TYPE_IP
) {
1004 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4
;
1005 if (stat
& ADM5120_DMA_CSUMFAIL
)
1006 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4_BAD
;
1009 /* Pass this up to any BPF listeners. */
1011 bpf_mtap(ifp
->if_bpf
, m
);
1012 #endif /* NBPFILTER > 0 */
1015 (*ifp
->if_input
)(ifp
, m
);
1018 #ifdef ADMSW_EVENT_COUNTERS
1020 ADMSW_EVCNT_INCR(&sc
->sc_ev_rxintr
);
1022 if (pkts
== ADMSW_NRXLDESC
)
1023 ADMSW_EVCNT_INCR(&sc
->sc_ev_rxstall
);
1026 /* Update the receive pointer. */
1031 * admsw_init: [ifnet interface function]
1033 * Initialize the interface. Must be called at splnet().
1036 admsw_init(struct ifnet
*ifp
)
1038 struct admsw_softc
*sc
= ifp
->if_softc
;
1040 /* printf("admsw_init called\n"); */
1042 if ((ifp
->if_flags
& IFF_RUNNING
) == 0) {
1043 if (sc
->ndevs
== 0) {
1044 admsw_init_bufs(sc
);
1046 REG_WRITE(CPUP_CONF_REG
,
1047 CPUP_CONF_CRCP
| CPUP_CONF_DUNP_MASK
|
1048 CPUP_CONF_DMCP_MASK
);
1049 /* clear all pending interrupts */
1050 REG_WRITE(ADMSW_INT_ST
, INT_MASK
);
1052 /* enable needed interrupts */
1053 REG_WRITE(ADMSW_INT_MASK
, REG_READ(ADMSW_INT_MASK
) &
1054 ~(ADMSW_INTR_SHD
| ADMSW_INTR_SLD
| ADMSW_INTR_RHD
|
1055 ADMSW_INTR_RLD
| ADMSW_INTR_HDF
| ADMSW_INTR_LDF
));
1060 /* Set the receive filter. */
1061 admsw_set_filter(sc
);
1063 /* mark iface as running */
1064 ifp
->if_flags
|= IFF_RUNNING
;
1065 ifp
->if_flags
&= ~IFF_OACTIVE
;
1071 * admsw_stop: [ifnet interface function]
1073 * Stop transmission on the interface.
1076 admsw_stop(struct ifnet
*ifp
, int disable
)
1078 struct admsw_softc
*sc
= ifp
->if_softc
;
1080 /* printf("admsw_stop: %d\n",disable); */
1082 if (!(ifp
->if_flags
& IFF_RUNNING
))
1085 if (--sc
->ndevs
== 0) {
1086 /* printf("debug: de-initializing hardware\n"); */
1088 /* disable cpu port */
1089 REG_WRITE(CPUP_CONF_REG
,
1090 CPUP_CONF_DCPUP
| CPUP_CONF_CRCP
|
1091 CPUP_CONF_DUNP_MASK
| CPUP_CONF_DMCP_MASK
);
1093 /* XXX We should disable, then clear? --dyoung */
1094 /* clear all pending interrupts */
1095 REG_WRITE(ADMSW_INT_ST
, INT_MASK
);
1097 /* disable interrupts */
1098 REG_WRITE(ADMSW_INT_MASK
, INT_MASK
);
1101 /* Mark the interface as down and cancel the watchdog timer. */
1102 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1111 * Set up the receive filter.
1114 admsw_set_filter(struct admsw_softc
*sc
)
1117 uint32_t allmc
, anymc
, conf
, promisc
;
1118 struct ether_multi
*enm
;
1119 struct ethercom
*ec
;
1121 struct ether_multistep step
;
1123 /* Find which ports should be operated in promisc mode. */
1124 allmc
= anymc
= promisc
= 0;
1125 for (i
= 0; i
< SW_DEVS
; i
++) {
1126 ec
= &sc
->sc_ethercom
[i
];
1128 if (ifp
->if_flags
& IFF_PROMISC
)
1129 promisc
|= vlan_matrix
[i
];
1131 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1133 ETHER_FIRST_MULTI(step
, ec
, enm
);
1134 while (enm
!= NULL
) {
1135 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
,
1136 ETHER_ADDR_LEN
) != 0) {
1137 printf("%s: punting on mcast range\n",
1139 ifp
->if_flags
|= IFF_ALLMULTI
;
1140 allmc
|= vlan_matrix
[i
];
1144 anymc
|= vlan_matrix
[i
];
1147 /* XXX extract subroutine --dyoung */
1148 REG_WRITE(MAC_WT1_REG
,
1149 enm
->enm_addrlo
[2] |
1150 (enm
->enm_addrlo
[3] << 8) |
1151 (enm
->enm_addrlo
[4] << 16) |
1152 (enm
->enm_addrlo
[5] << 24));
1153 REG_WRITE(MAC_WT0_REG
,
1154 (i
<< MAC_WT0_VLANID_SHIFT
) |
1155 (enm
->enm_addrlo
[0] << 16) |
1156 (enm
->enm_addrlo
[1] << 24) |
1157 MAC_WT0_WRITE
| MAC_WT0_VLANID_EN
);
1159 while (!(REG_READ(MAC_WT0_REG
) & MAC_WT0_WRITE_DONE
));
1162 /* load h/w with mcast address, port = CPU */
1163 ETHER_NEXT_MULTI(step
, enm
);
1167 conf
= REG_READ(CPUP_CONF_REG
);
1168 /* 1 Disable forwarding of unknown & multicast packets to
1170 * 2 Enable forwarding of unknown & multicast packets to
1171 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
1173 conf
|= CPUP_CONF_DUNP_MASK
| CPUP_CONF_DMCP_MASK
;
1174 /* Enable forwarding of unknown packets to CPU on selected ports. */
1175 conf
^= ((promisc
<< CPUP_CONF_DUNP_SHIFT
) & CPUP_CONF_DUNP_MASK
);
1176 conf
^= ((allmc
<< CPUP_CONF_DMCP_SHIFT
) & CPUP_CONF_DMCP_MASK
);
1177 conf
^= ((anymc
<< CPUP_CONF_DMCP_SHIFT
) & CPUP_CONF_DMCP_MASK
);
1178 REG_WRITE(CPUP_CONF_REG
, conf
);
1184 * Add a receive buffer to the indicated descriptor.
1187 admsw_add_rxbuf(struct admsw_softc
*sc
, int idx
, int high
)
1189 struct admsw_descsoft
*ds
;
1194 ds
= &sc
->sc_rxhsoft
[idx
];
1196 ds
= &sc
->sc_rxlsoft
[idx
];
1198 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1202 MCLGET(m
, M_DONTWAIT
);
1203 if ((m
->m_flags
& M_EXT
) == 0) {
1208 if (ds
->ds_mbuf
!= NULL
)
1209 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dmamap
);
1213 error
= bus_dmamap_load(sc
->sc_dmat
, ds
->ds_dmamap
,
1214 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
,
1215 BUS_DMA_READ
| BUS_DMA_NOWAIT
);
1217 printf("%s: can't load rx DMA map %d, error = %d\n",
1218 sc
->sc_dev
.dv_xname
, idx
, error
);
1219 panic("admsw_add_rxbuf"); /* XXX */
1222 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dmamap
, 0,
1223 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1226 ADMSW_INIT_RXHDESC(sc
, idx
);
1228 ADMSW_INIT_RXLDESC(sc
, idx
);
1234 admsw_mediachange(struct ifnet
*ifp
)
1236 struct admsw_softc
*sc
= ifp
->if_softc
;
1237 int port
= (struct ethercom
*)ifp
- sc
->sc_ethercom
; /* XXX */
1238 struct ifmedia
*ifm
= &sc
->sc_ifmedia
[port
];
1241 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
1244 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_AUTO
) {
1245 val
= PHY_CNTL2_AUTONEG
|PHY_CNTL2_100M
|PHY_CNTL2_FDX
;
1246 } else if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_100_TX
) {
1247 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1248 val
= PHY_CNTL2_100M
|PHY_CNTL2_FDX
;
1250 val
= PHY_CNTL2_100M
;
1251 } else if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_10_T
) {
1252 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1253 val
= PHY_CNTL2_FDX
;
1259 old
= REG_READ(PHY_CNTL2_REG
);
1260 new = old
& ~((PHY_CNTL2_AUTONEG
|PHY_CNTL2_100M
|PHY_CNTL2_FDX
) << port
);
1261 new |= (val
<< port
);
1264 REG_WRITE(PHY_CNTL2_REG
, new);
1270 admsw_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1272 struct admsw_softc
*sc
= ifp
->if_softc
;
1273 int port
= (struct ethercom
*)ifp
- sc
->sc_ethercom
; /* XXX */
1276 ifmr
->ifm_status
= IFM_AVALID
;
1277 ifmr
->ifm_active
= IFM_ETHER
;
1279 status
= REG_READ(PHY_ST_REG
) >> port
;
1281 if ((status
& PHY_ST_LINKUP
) == 0) {
1282 ifmr
->ifm_active
|= IFM_NONE
;
1286 ifmr
->ifm_status
|= IFM_ACTIVE
;
1287 ifmr
->ifm_active
|= (status
& PHY_ST_100M
) ? IFM_100_TX
: IFM_10_T
;
1288 if (status
& PHY_ST_FDX
)
1289 ifmr
->ifm_active
|= IFM_FDX
;