Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / arm / at91 / at91emac.c
blobf54f530298c5f7959ac6fb80a41bfd345f162345
1 /* $Id: at91emac.c,v 1.6 2009/10/23 06:53:13 snj Exp $ */
2 /* $NetBSD: at91emac.c,v 1.5 2009/03/18 16:00:09 cegger Exp $ */
4 /*
5 * Copyright (c) 2007 Embedtronics Oy
6 * All rights reserved.
8 * Based on arch/arm/ep93xx/epe.c
10 * Copyright (c) 2004 Jesse Off
11 * All rights reserved.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: at91emac.c,v 1.5 2009/03/18 16:00:09 cegger Exp $");
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/ioctl.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/time.h>
46 #include <sys/device.h>
47 #include <uvm/uvm_extern.h>
49 #include <machine/bus.h>
50 #include <machine/intr.h>
52 #include <arm/cpufunc.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_types.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/if_inarp.h>
69 #endif
71 #ifdef NS
72 #include <netns/ns.h>
73 #include <netns/ns_if.h>
74 #endif
76 #include "bpfilter.h"
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
82 #ifdef IPKDB_AT91 // @@@
83 #include <ipkdb/ipkdb.h>
84 #endif
86 #include <arm/at91/at91var.h>
87 #include <arm/at91/at91emacreg.h>
88 #include <arm/at91/at91emacvar.h>
90 #define DEFAULT_MDCDIV 32
92 #ifndef EMAC_FAST
93 #define EMAC_FAST
94 #endif
96 #ifndef EMAC_FAST
97 #define EMAC_READ(x) \
98 bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x))
99 #define EMAC_WRITE(x, y) \
100 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y))
101 #else
102 #define EMAC_READ(x) ETHREG(x)
103 #define EMAC_WRITE(x, y) ETHREG(x) = (y)
104 #endif /* ! EMAC_FAST */
106 static int emac_match(device_t, cfdata_t, void *);
107 static void emac_attach(device_t, device_t, void *);
108 static void emac_init(struct emac_softc *);
109 static int emac_intr(void* arg);
110 static int emac_gctx(struct emac_softc *);
111 static int emac_mediachange(struct ifnet *);
112 static void emac_mediastatus(struct ifnet *, struct ifmediareq *);
113 int emac_mii_readreg (device_t, int, int);
114 void emac_mii_writereg (device_t, int, int, int);
115 void emac_statchg (device_t );
116 void emac_tick (void *);
117 static int emac_ifioctl (struct ifnet *, u_long, void *);
118 static void emac_ifstart (struct ifnet *);
119 static void emac_ifwatchdog (struct ifnet *);
120 static int emac_ifinit (struct ifnet *);
121 static void emac_ifstop (struct ifnet *, int);
122 static void emac_setaddr (struct ifnet *);
124 CFATTACH_DECL(at91emac, sizeof(struct emac_softc),
125 emac_match, emac_attach, NULL, NULL);
127 #ifdef EMAC_DEBUG
128 int emac_debug = EMAC_DEBUG;
129 #define DPRINTFN(n,fmt) if (emac_debug >= (n)) printf fmt
130 #else
131 #define DPRINTFN(n,fmt)
132 #endif
134 static int
135 emac_match(device_t parent, cfdata_t match, void *aux)
137 if (strcmp(match->cf_name, "at91emac") == 0)
138 return 2;
139 return 0;
142 static void
143 emac_attach(device_t parent, device_t self, void *aux)
145 struct emac_softc *sc = device_private(self);
146 struct at91bus_attach_args *sa = aux;
147 prop_data_t enaddr;
148 uint32_t u;
150 printf("\n");
151 sc->sc_dev = self;
152 sc->sc_iot = sa->sa_iot;
153 sc->sc_pid = sa->sa_pid;
154 sc->sc_dmat = sa->sa_dmat;
156 if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
157 panic("%s: Cannot map registers", device_xname(self));
159 /* enable peripheral clock */
160 at91_peripheral_clock(sc->sc_pid, 1);
162 /* configure emac: */
163 EMAC_WRITE(ETH_CTL, 0); // disable everything
164 EMAC_WRITE(ETH_IDR, -1); // disable interrupts
165 EMAC_WRITE(ETH_RBQP, 0); // clear receive
166 EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
167 EMAC_WRITE(ETH_TCR, 0); // send nothing
168 //(void)EMAC_READ(ETH_ISR);
169 u = EMAC_READ(ETH_TSR);
170 EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
171 | ETH_TSR_IDLE | ETH_TSR_RLE
172 | ETH_TSR_COL|ETH_TSR_OVR)));
173 u = EMAC_READ(ETH_RSR);
174 EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
176 /* Fetch the Ethernet address from property if set. */
177 enaddr = prop_dictionary_get(device_properties(self), "mac-addr");
179 if (enaddr != NULL) {
180 KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
181 KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
182 memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr),
183 ETHER_ADDR_LEN);
184 } else {
185 static const uint8_t hardcoded[ETHER_ADDR_LEN] = {
186 0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94
188 memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN);
191 at91_intr_establish(sc->sc_pid, IPL_NET, INTR_HIGH_LEVEL, emac_intr, sc);
192 emac_init(sc);
195 static int
196 emac_gctx(struct emac_softc *sc)
198 struct ifnet * ifp = &sc->sc_ec.ec_if;
199 u_int32_t tsr;
201 tsr = EMAC_READ(ETH_TSR);
202 if (!(tsr & ETH_TSR_BNQ)) {
203 // no space left
204 return 0;
207 // free sent frames
208 while (sc->txqc > (tsr & ETH_TSR_IDLE ? 0 : 1)) {
209 int i = sc->txqi % TX_QLEN;
210 bus_dmamap_sync(sc->sc_dmat, sc->txq[i].m_dmamap, 0,
211 sc->txq[i].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
212 bus_dmamap_unload(sc->sc_dmat, sc->txq[i].m_dmamap);
213 m_freem(sc->txq[i].m);
214 DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n", __FUNCTION__, i, sc->txq[i].m, sc->txqc));
215 sc->txq[i].m = NULL;
216 sc->txqi = (i + 1) % TX_QLEN;
217 sc->txqc--;
220 // mark we're free
221 if (ifp->if_flags & IFF_OACTIVE) {
222 ifp->if_flags &= ~IFF_OACTIVE;
223 /* Disable transmit-buffer-free interrupt */
224 /*EMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/
227 return 1;
230 static int
231 emac_intr(void *arg)
233 struct emac_softc *sc = (struct emac_softc *)arg;
234 struct ifnet * ifp = &sc->sc_ec.ec_if;
235 u_int32_t imr, isr, rsr, ctl;
236 int bi;
238 imr = ~EMAC_READ(ETH_IMR);
239 if (!(imr & (ETH_ISR_RCOM|ETH_ISR_TBRE|ETH_ISR_TIDLE|ETH_ISR_RBNA|ETH_ISR_ROVR))) {
240 // interrupt not enabled, can't be us
241 return 0;
244 isr = EMAC_READ(ETH_ISR) & imr;
245 rsr = EMAC_READ(ETH_RSR); // get receive status register
247 DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__, isr, rsr, imr));
249 if (isr & ETH_ISR_RBNA) { // out of receive buffers
250 EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear interrupt
251 ctl = EMAC_READ(ETH_CTL); // get current control register value
252 EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); // disable receiver
253 EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear BNA bit
254 EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); // re-enable receiver
255 ifp->if_ierrors++;
256 ifp->if_ipackets++;
257 DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__));
259 if (isr & ETH_ISR_ROVR) {
260 EMAC_WRITE(ETH_RSR, ETH_RSR_OVR); // clear interrupt
261 ifp->if_ierrors++;
262 ifp->if_ipackets++;
263 DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__));
266 if (isr & ETH_ISR_RCOM) { // packet has been received!
267 uint32_t nfo;
268 // @@@ if memory is NOT coherent, then we're in trouble @@@@
269 // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
270 // printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr);
271 DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Info));
272 while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) {
273 int fl;
274 struct mbuf *m;
276 nfo = sc->RDSC[bi].Info;
277 fl = (nfo & ETH_RDSC_I_LEN) - 4;
278 DPRINTFN(2,("## nfo=0x%08X\n", nfo));
280 MGETHDR(m, M_DONTWAIT, MT_DATA);
281 if (m != NULL) MCLGET(m, M_DONTWAIT);
282 if (m != NULL && (m->m_flags & M_EXT)) {
283 bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0,
284 MCLBYTES, BUS_DMASYNC_POSTREAD);
285 bus_dmamap_unload(sc->sc_dmat,
286 sc->rxq[bi].m_dmamap);
287 sc->rxq[bi].m->m_pkthdr.rcvif = ifp;
288 sc->rxq[bi].m->m_pkthdr.len =
289 sc->rxq[bi].m->m_len = fl;
290 #if NBPFILTER > 0
291 if (ifp->if_bpf)
292 bpf_mtap(ifp->if_bpf, sc->rxq[bi].m);
293 #endif /* NBPFILTER > 0 */
294 DPRINTFN(2,("received %u bytes packet\n", fl));
295 (*ifp->if_input)(ifp, sc->rxq[bi].m);
296 if (mtod(m, intptr_t) & 3) {
297 m_adj(m, mtod(m, intptr_t) & 3);
299 sc->rxq[bi].m = m;
300 bus_dmamap_load(sc->sc_dmat,
301 sc->rxq[bi].m_dmamap,
302 m->m_ext.ext_buf, MCLBYTES,
303 NULL, BUS_DMA_NOWAIT);
304 bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0,
305 MCLBYTES, BUS_DMASYNC_PREREAD);
306 sc->RDSC[bi].Info = 0;
307 sc->RDSC[bi].Addr =
308 sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr
309 | (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
310 } else {
311 /* Drop packets until we can get replacement
312 * empty mbufs for the RXDQ.
314 if (m != NULL) {
315 m_freem(m);
317 ifp->if_ierrors++;
319 sc->rxqi++;
321 // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
324 if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
325 emac_ifstart(ifp);
327 #if 0 // reloop
328 irq = EMAC_READ(IntStsC);
329 if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0)
330 goto begin;
331 #endif
333 return (1);
337 static void
338 emac_init(struct emac_softc *sc)
340 bus_dma_segment_t segs;
341 void *addr;
342 int rsegs, err, i;
343 struct ifnet * ifp = &sc->sc_ec.ec_if;
344 uint32_t u;
345 #if 0
346 int mdcdiv = DEFAULT_MDCDIV;
347 #endif
349 callout_init(&sc->emac_tick_ch, 0);
351 // ok...
352 EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything
353 EMAC_WRITE(ETH_IDR, -1); // disable interrupts
354 EMAC_WRITE(ETH_RBQP, 0); // clear receive
355 EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
356 EMAC_WRITE(ETH_TCR, 0); // send nothing
357 // (void)EMAC_READ(ETH_ISR);
358 u = EMAC_READ(ETH_TSR);
359 EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
360 | ETH_TSR_IDLE | ETH_TSR_RLE
361 | ETH_TSR_COL|ETH_TSR_OVR)));
362 u = EMAC_READ(ETH_RSR);
363 EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
365 /* configure EMAC */
366 EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
367 EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);
368 #if 0
369 if (device_cfdata(&sc->sc_dev)->cf_flags)
370 mdcdiv = device_cfdata(&sc->sc_dev)->cf_flags;
371 #endif
372 /* set ethernet address */
373 EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
374 | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
375 | (sc->sc_enaddr[0]));
376 EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
377 | (sc->sc_enaddr[4]));
378 EMAC_WRITE(ETH_SA2L, 0);
379 EMAC_WRITE(ETH_SA2H, 0);
380 EMAC_WRITE(ETH_SA3L, 0);
381 EMAC_WRITE(ETH_SA3H, 0);
382 EMAC_WRITE(ETH_SA4L, 0);
383 EMAC_WRITE(ETH_SA4H, 0);
385 /* Allocate a page of memory for receive queue descriptors */
386 sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE;
387 sc->rbqlen *= PAGE_SIZE;
388 DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));
390 err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
391 MAX(16384, PAGE_SIZE), // see EMAC errata why forced to 16384 byte boundary
392 &segs, 1, &rsegs, BUS_DMA_WAITOK);
393 if (err == 0) {
394 DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
395 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
396 &sc->rbqpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT));
398 if (err == 0) {
399 DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
400 err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
401 sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
402 &sc->rbqpage_dmamap);
404 if (err == 0) {
405 DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
406 err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
407 sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
409 if (err != 0) {
410 panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
412 sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;
414 memset(sc->rbqpage, 0, sc->rbqlen);
416 /* Set up pointers to start of each queue in kernel addr space.
417 * Each descriptor queue or status queue entry uses 2 words
419 sc->RDSC = (void*)sc->rbqpage;
421 /* Populate the RXQ with mbufs */
422 sc->rxqi = 0;
423 for(i = 0; i < RX_QLEN; i++) {
424 struct mbuf *m;
426 err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, PAGE_SIZE,
427 BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
428 if (err) {
429 panic("%s: dmamap_create failed: %i\n", __FUNCTION__, err);
431 MGETHDR(m, M_WAIT, MT_DATA);
432 MCLGET(m, M_WAIT);
433 sc->rxq[i].m = m;
434 if (mtod(m, intptr_t) & 3) {
435 m_adj(m, mtod(m, intptr_t) & 3);
437 err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
438 m->m_ext.ext_buf, MCLBYTES, NULL,
439 BUS_DMA_WAITOK);
440 if (err) {
441 panic("%s: dmamap_load failed: %i\n", __FUNCTION__, err);
443 sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
444 | (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
445 sc->RDSC[i].Info = 0;
446 bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
447 MCLBYTES, BUS_DMASYNC_PREREAD);
450 /* prepare transmit queue */
451 for (i = 0; i < TX_QLEN; i++) {
452 err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
453 (BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
454 &sc->txq[i].m_dmamap);
455 if (err)
456 panic("ARGH #1");
457 sc->txq[i].m = NULL;
460 /* Program each queue's start addr, cur addr, and len registers
461 * with the physical addresses.
463 bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen,
464 BUS_DMASYNC_PREREAD);
465 addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr;
466 EMAC_WRITE(ETH_RBQP, (u_int32_t)addr);
468 /* Divide HCLK by 32 for MDC clock */
469 sc->sc_mii.mii_ifp = ifp;
470 sc->sc_mii.mii_readreg = emac_mii_readreg;
471 sc->sc_mii.mii_writereg = emac_mii_writereg;
472 sc->sc_mii.mii_statchg = emac_statchg;
473 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, emac_mediachange,
474 emac_mediastatus);
475 mii_attach((device_t )sc, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
476 MII_OFFSET_ANY, 0);
477 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
479 // enable / disable interrupts
481 #if 0
482 // enable / disable interrupts
483 EMAC_WRITE(ETH_IDR, -1);
484 EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
485 | ETH_ISR_RBNA | ETH_ISR_ROVR);
486 // (void)EMAC_READ(ETH_ISR); // why
488 // enable transmitter / receiver
489 EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
490 | ETH_CTL_CSR | ETH_CTL_MPE);
491 #endif
493 * We can support 802.1Q VLAN-sized frames.
495 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
497 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
498 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
499 ifp->if_ioctl = emac_ifioctl;
500 ifp->if_start = emac_ifstart;
501 ifp->if_watchdog = emac_ifwatchdog;
502 ifp->if_init = emac_ifinit;
503 ifp->if_stop = emac_ifstop;
504 ifp->if_timer = 0;
505 ifp->if_softc = sc;
506 IFQ_SET_READY(&ifp->if_snd);
507 if_attach(ifp);
508 ether_ifattach(ifp, (sc)->sc_enaddr);
511 static int
512 emac_mediachange(struct ifnet *ifp)
514 if (ifp->if_flags & IFF_UP)
515 emac_ifinit(ifp);
516 return (0);
519 static void
520 emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
522 struct emac_softc *sc = ifp->if_softc;
524 mii_pollstat(&sc->sc_mii);
525 ifmr->ifm_active = sc->sc_mii.mii_media_active;
526 ifmr->ifm_status = sc->sc_mii.mii_media_status;
531 emac_mii_readreg(device_t self, int phy, int reg)
533 struct emac_softc *sc;
535 sc = (struct emac_softc *)self;
536 EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD
537 | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
538 | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
539 | ETH_MAN_CODE_IEEE802_3));
540 while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) ;
541 return (EMAC_READ(ETH_MAN) & ETH_MAN_DATA);
544 void
545 emac_mii_writereg(device_t self, int phy, int reg, int val)
547 struct emac_softc *sc;
548 sc = (struct emac_softc *)self;
549 EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR
550 | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
551 | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
552 | ETH_MAN_CODE_IEEE802_3
553 | (val & ETH_MAN_DATA)));
554 while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) ;
558 void
559 emac_statchg(device_t self)
561 struct emac_softc *sc = (struct emac_softc *)self;
562 u_int32_t reg;
565 * We must keep the MAC and the PHY in sync as
566 * to the status of full-duplex!
568 reg = EMAC_READ(ETH_CFG);
569 if (sc->sc_mii.mii_media_active & IFM_FDX)
570 reg |= ETH_CFG_FD;
571 else
572 reg &= ~ETH_CFG_FD;
573 EMAC_WRITE(ETH_CFG, reg);
576 void
577 emac_tick(void *arg)
579 struct emac_softc* sc = (struct emac_softc *)arg;
580 struct ifnet * ifp = &sc->sc_ec.ec_if;
581 int s;
582 u_int32_t misses;
584 ifp->if_collisions += EMAC_READ(ETH_SCOL) + EMAC_READ(ETH_MCOL);
585 /* These misses are ok, they will happen if the RAM/CPU can't keep up */
586 misses = EMAC_READ(ETH_DRFC);
587 if (misses > 0)
588 printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses);
590 s = splnet();
591 if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
592 emac_ifstart(ifp);
594 splx(s);
596 mii_tick(&sc->sc_mii);
597 callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
601 static int
602 emac_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
604 struct emac_softc *sc = ifp->if_softc;
605 struct ifreq *ifr = (struct ifreq *)data;
606 int s, error;
608 s = splnet();
609 switch(cmd) {
610 case SIOCSIFMEDIA:
611 case SIOCGIFMEDIA:
612 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
613 break;
614 default:
615 error = ether_ioctl(ifp, cmd, data);
616 if (error == ENETRESET) {
617 if (ifp->if_flags & IFF_RUNNING)
618 emac_setaddr(ifp);
619 error = 0;
622 splx(s);
623 return error;
626 static void
627 emac_ifstart(struct ifnet *ifp)
629 struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
630 struct mbuf *m;
631 bus_dma_segment_t *segs;
632 int s, bi, err, nsegs;
634 s = splnet();
635 start:
636 if (emac_gctx(sc) == 0) {
637 /* Enable transmit-buffer-free interrupt */
638 EMAC_WRITE(ETH_IER, ETH_ISR_TBRE);
639 ifp->if_flags |= IFF_OACTIVE;
640 ifp->if_timer = 10;
641 splx(s);
642 return;
645 ifp->if_timer = 0;
647 IFQ_POLL(&ifp->if_snd, m);
648 if (m == NULL) {
649 splx(s);
650 return;
652 //more:
653 bi = (sc->txqi + sc->txqc) % TX_QLEN;
654 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
655 BUS_DMA_NOWAIT)) ||
656 sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
657 sc->txq[bi].m_dmamap->dm_nsegs > 1) {
658 /* Copy entire mbuf chain to new single */
659 struct mbuf *mn;
661 if (err == 0)
662 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
664 MGETHDR(mn, M_DONTWAIT, MT_DATA);
665 if (mn == NULL) goto stop;
666 if (m->m_pkthdr.len > MHLEN) {
667 MCLGET(mn, M_DONTWAIT);
668 if ((mn->m_flags & M_EXT) == 0) {
669 m_freem(mn);
670 goto stop;
673 m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *));
674 mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
675 IFQ_DEQUEUE(&ifp->if_snd, m);
676 m_freem(m);
677 m = mn;
678 bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
679 BUS_DMA_NOWAIT);
680 } else {
681 IFQ_DEQUEUE(&ifp->if_snd, m);
684 #if NBPFILTER > 0
685 if (ifp->if_bpf)
686 bpf_mtap(ifp->if_bpf, m);
687 #endif /* NBPFILTER > 0 */
689 nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
690 segs = sc->txq[bi].m_dmamap->dm_segs;
691 if (nsegs > 1) {
692 panic("#### ARGH #2");
695 sc->txq[bi].m = m;
696 sc->txqc++;
698 DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr,
699 (unsigned)m->m_pkthdr.len));
700 #ifdef DIAGNOSTIC
701 if (sc->txqc > TX_QLEN) {
702 panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN);
704 #endif
706 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
707 sc->txq[bi].m_dmamap->dm_mapsize,
708 BUS_DMASYNC_PREWRITE);
710 EMAC_WRITE(ETH_TAR, segs->ds_addr);
711 EMAC_WRITE(ETH_TCR, m->m_pkthdr.len);
712 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
713 goto start;
714 stop:
716 splx(s);
717 return;
720 static void
721 emac_ifwatchdog(struct ifnet *ifp)
723 struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
725 if ((ifp->if_flags & IFF_RUNNING) == 0)
726 return;
727 printf("%s: device timeout, CTL = 0x%08x, CFG = 0x%08x\n",
728 device_xname(sc->sc_dev), EMAC_READ(ETH_CTL), EMAC_READ(ETH_CFG));
731 static int
732 emac_ifinit(struct ifnet *ifp)
734 struct emac_softc *sc = ifp->if_softc;
735 int s = splnet();
737 callout_stop(&sc->emac_tick_ch);
739 // enable interrupts
740 EMAC_WRITE(ETH_IDR, -1);
741 EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
742 | ETH_ISR_RBNA | ETH_ISR_ROVR);
744 // enable transmitter / receiver
745 EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
746 | ETH_CTL_CSR | ETH_CTL_MPE);
748 mii_mediachg(&sc->sc_mii);
749 callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
750 ifp->if_flags |= IFF_RUNNING;
751 splx(s);
752 return 0;
755 static void
756 emac_ifstop(struct ifnet *ifp, int disable)
758 // u_int32_t u;
759 struct emac_softc *sc = ifp->if_softc;
761 #if 0
762 EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything
763 EMAC_WRITE(ETH_IDR, -1); // disable interrupts
764 // EMAC_WRITE(ETH_RBQP, 0); // clear receive
765 EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
766 EMAC_WRITE(ETH_TCR, 0); // send nothing
767 // (void)EMAC_READ(ETH_ISR);
768 u = EMAC_READ(ETH_TSR);
769 EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
770 | ETH_TSR_IDLE | ETH_TSR_RLE
771 | ETH_TSR_COL|ETH_TSR_OVR)));
772 u = EMAC_READ(ETH_RSR);
773 EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
774 #endif
775 callout_stop(&sc->emac_tick_ch);
777 /* Down the MII. */
778 mii_down(&sc->sc_mii);
780 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
781 ifp->if_timer = 0;
782 sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
785 static void
786 emac_setaddr(struct ifnet *ifp)
788 struct emac_softc *sc = ifp->if_softc;
789 struct ethercom *ac = &sc->sc_ec;
790 struct ether_multi *enm;
791 struct ether_multistep step;
792 u_int8_t ias[3][ETHER_ADDR_LEN];
793 u_int32_t h, nma = 0, hashes[2] = { 0, 0 };
794 u_int32_t ctl = EMAC_READ(ETH_CTL);
795 u_int32_t cfg = EMAC_READ(ETH_CFG);
797 /* disable receiver temporarily */
798 EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
800 cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI);
802 if (ifp->if_flags & IFF_PROMISC) {
803 cfg |= ETH_CFG_CAF;
804 } else {
805 cfg &= ~ETH_CFG_CAF;
808 // ETH_CFG_BIG?
810 ifp->if_flags &= ~IFF_ALLMULTI;
812 ETHER_FIRST_MULTI(step, ac, enm);
813 while (enm != NULL) {
814 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
816 * We must listen to a range of multicast addresses.
817 * For now, just accept all multicasts, rather than
818 * trying to set only those filter bits needed to match
819 * the range. (At this time, the only use of address
820 * ranges is for IP multicast routing, for which the
821 * range is big enough to require all bits set.)
823 cfg |= ETH_CFG_CAF;
824 hashes[0] = 0xffffffffUL;
825 hashes[1] = 0xffffffffUL;
826 ifp->if_flags |= IFF_ALLMULTI;
827 nma = 0;
828 break;
831 if (nma < 3) {
832 /* We can program 3 perfect address filters for mcast */
833 memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
834 } else {
836 * XXX: Datasheet is not very clear here, I'm not sure
837 * if I'm doing this right. --joff
839 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
841 /* Just want the 6 most-significant bits. */
842 h = h >> 26;
844 hashes[ h / 32 ] |= (1 << (h % 32));
845 cfg |= ETH_CFG_MTI;
847 ETHER_NEXT_MULTI(step, enm);
848 nma++;
851 // program...
852 DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
853 sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2],
854 sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5]));
855 EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
856 | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
857 | (sc->sc_enaddr[0]));
858 EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
859 | (sc->sc_enaddr[4]));
860 if (nma > 1) {
861 DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
862 ias[0][0], ias[0][1], ias[0][2],
863 ias[0][3], ias[0][4], ias[0][5]));
864 EMAC_WRITE(ETH_SA2L, (ias[0][3] << 24)
865 | (ias[0][2] << 16) | (ias[0][1] << 8)
866 | (ias[0][0]));
867 EMAC_WRITE(ETH_SA2H, (ias[0][4] << 8)
868 | (ias[0][5]));
870 if (nma > 2) {
871 DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
872 ias[1][0], ias[1][1], ias[1][2],
873 ias[1][3], ias[1][4], ias[1][5]));
874 EMAC_WRITE(ETH_SA3L, (ias[1][3] << 24)
875 | (ias[1][2] << 16) | (ias[1][1] << 8)
876 | (ias[1][0]));
877 EMAC_WRITE(ETH_SA3H, (ias[1][4] << 8)
878 | (ias[1][5]));
880 if (nma > 3) {
881 DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
882 ias[2][0], ias[2][1], ias[2][2],
883 ias[2][3], ias[2][4], ias[2][5]));
884 EMAC_WRITE(ETH_SA3L, (ias[2][3] << 24)
885 | (ias[2][2] << 16) | (ias[2][1] << 8)
886 | (ias[2][0]));
887 EMAC_WRITE(ETH_SA3H, (ias[2][4] << 8)
888 | (ias[2][5]));
890 EMAC_WRITE(ETH_HSH, hashes[0]);
891 EMAC_WRITE(ETH_HSL, hashes[1]);
892 EMAC_WRITE(ETH_CFG, cfg);
893 EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE);