Sync usage with man page.
[netbsd-mini2440.git] / sys / dev / ic / mtd803.c
blob9a8f6b33a2e7cce1d46d449ca4db318b1f51c13e
1 /* $NetBSD: mtd803.c,v 1.21 2009/03/14 21:04:20 dsl Exp $ */
3 /*-
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Peter Bex <Peter.Bex@student.kun.nl>.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * TODO:
35 * - Most importantly, get some bus_dmamap_syncs in the correct places.
36 * I don't have access to a computer with PCI other than i386, and i386
37 * is just such a machine where dmamap_syncs don't do anything.
38 * - Powerhook for when resuming after standby.
39 * - Watchdog stuff doesn't work yet, the system crashes.
40 * - There seems to be a CardBus version of the card. (see datasheet)
41 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
42 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
43 * raised every time a packet is sent. Strange, since everything works anyway
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.21 2009/03/14 21:04:20 dsl Exp $");
49 #include "bpfilter.h"
51 #include <sys/param.h>
52 #include <sys/mbuf.h>
53 #include <sys/systm.h>
54 #include <sys/device.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/syslog.h>
59 #include <net/if.h>
60 #include <net/if_ether.h>
61 #include <net/if_media.h>
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/if_inarp.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #include <net/bpfdesc.h>
74 #endif
76 #include <sys/bus.h>
78 #include <dev/ic/mtd803reg.h>
79 #include <dev/ic/mtd803var.h>
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
84 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
85 * Written by Peter Bex (peter.bex@student.kun.nl)
87 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com
90 #define MTD_READ_1(sc, reg) \
91 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
92 #define MTD_WRITE_1(sc, reg, data) \
93 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
95 #define MTD_READ_2(sc, reg) \
96 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
97 #define MTD_WRITE_2(sc, reg, data) \
98 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
100 #define MTD_READ_4(sc, reg) \
101 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
102 #define MTD_WRITE_4(sc, reg, data) \
103 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
105 #define MTD_SETBIT(sc, reg, x) \
106 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
107 #define MTD_CLRBIT(sc, reg, x) \
108 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
110 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len)))
112 int mtd_mii_readreg(device_t, int, int);
113 void mtd_mii_writereg(device_t, int, int, int);
114 void mtd_mii_statchg(device_t);
116 void mtd_start(struct ifnet *);
117 void mtd_stop(struct ifnet *, int);
118 int mtd_ioctl(struct ifnet *, u_long, void *);
119 void mtd_setmulti(struct mtd_softc *);
120 void mtd_watchdog(struct ifnet *);
122 int mtd_init(struct ifnet *);
123 void mtd_reset(struct mtd_softc *);
124 void mtd_shutdown(void *);
125 int mtd_init_desc(struct mtd_softc *);
126 int mtd_put(struct mtd_softc *, int, struct mbuf *);
127 struct mbuf *mtd_get(struct mtd_softc *, int, int);
129 int mtd_rxirq(struct mtd_softc *);
130 int mtd_txirq(struct mtd_softc *);
131 int mtd_bufirq(struct mtd_softc *);
135 mtd_config(struct mtd_softc *sc)
137 struct ifnet *ifp = &sc->ethercom.ec_if;
138 int i;
140 /* Read station address */
141 for (i = 0; i < ETHER_ADDR_LEN; ++i)
142 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
144 /* Initialize ifnet structure */
145 memcpy(ifp->if_xname, device_xname(&sc->dev), IFNAMSIZ);
146 ifp->if_softc = sc;
147 ifp->if_init = mtd_init;
148 ifp->if_start = mtd_start;
149 ifp->if_stop = mtd_stop;
150 ifp->if_ioctl = mtd_ioctl;
151 ifp->if_watchdog = mtd_watchdog;
152 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
153 IFQ_SET_READY(&ifp->if_snd);
155 /* Setup MII interface */
156 sc->mii.mii_ifp = ifp;
157 sc->mii.mii_readreg = mtd_mii_readreg;
158 sc->mii.mii_writereg = mtd_mii_writereg;
159 sc->mii.mii_statchg = mtd_mii_statchg;
161 sc->ethercom.ec_mii = &sc->mii;
162 ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange,
163 ether_mediastatus);
165 mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
167 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
168 aprint_error_dev(&sc->dev, "Unable to configure MII\n");
169 return 1;
170 } else {
171 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
174 if (mtd_init_desc(sc))
175 return 1;
177 /* Attach interface */
178 if_attach(ifp);
179 ether_ifattach(ifp, sc->eaddr);
181 #if NRND > 0
182 /* Initialise random source */
183 rnd_attach_source(&sc->rnd_src, device_xname(&sc->dev), RND_TYPE_NET, 0);
184 #endif
186 /* Add shutdown hook to reset card when we reboot */
187 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
189 return 0;
194 * mtd_init
195 * Must be called at splnet()
198 mtd_init(struct ifnet *ifp)
200 struct mtd_softc *sc = ifp->if_softc;
202 mtd_reset(sc);
205 * Set cache alignment and burst length. Don't really know what these
206 * mean, so their values are probably suboptimal.
208 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
210 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
212 /* Promiscuous mode? */
213 if (ifp->if_flags & IFF_PROMISC)
214 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
215 else
216 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
218 /* Broadcast mode? */
219 if (ifp->if_flags & IFF_BROADCAST)
220 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
221 else
222 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
224 mtd_setmulti(sc);
226 /* Enable interrupts */
227 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
228 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
230 /* Set descriptor base addresses */
231 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
232 + sizeof(struct mtd_desc) * MTD_NUM_RXD));
233 MTD_WRITE_4(sc, MTD_RXLBA,
234 htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
236 /* Enable receiver and transmitter */
237 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
238 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
240 /* Interface is running */
241 ifp->if_flags |= IFF_RUNNING;
242 ifp->if_flags &= ~IFF_OACTIVE;
244 return 0;
249 mtd_init_desc(struct mtd_softc *sc)
251 int rseg, err, i;
252 bus_dma_segment_t seg;
253 bus_size_t size;
255 /* Allocate memory for descriptors */
256 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
258 /* Allocate DMA-safe memory */
259 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
260 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
261 aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n", err);
262 return 1;
265 /* Map memory to kernel addressable space */
266 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
267 (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
268 aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n", err);
269 bus_dmamem_free(sc->dma_tag, &seg, rseg);
270 return 1;
273 /* Create a DMA map */
274 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
275 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
276 aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n", err);
277 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
278 bus_dmamem_free(sc->dma_tag, &seg, rseg);
279 return 1;
282 /* Load the DMA map */
283 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
284 size, NULL, BUS_DMA_NOWAIT)) != 0) {
285 aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
286 err);
287 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
288 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
289 bus_dmamem_free(sc->dma_tag, &seg, rseg);
290 return 1;
293 /* Allocate memory for the buffers */
294 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
296 /* Allocate DMA-safe memory */
297 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
298 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
299 aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n",
300 err);
302 /* Undo DMA map for descriptors */
303 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
304 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
305 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
306 bus_dmamem_free(sc->dma_tag, &seg, rseg);
307 return 1;
310 /* Map memory to kernel addressable space */
311 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
312 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
313 aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n",
314 err);
315 bus_dmamem_free(sc->dma_tag, &seg, rseg);
317 /* Undo DMA map for descriptors */
318 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
319 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
320 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
321 bus_dmamem_free(sc->dma_tag, &seg, rseg);
322 return 1;
325 /* Create a DMA map */
326 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
327 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
328 aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n",
329 err);
330 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
331 bus_dmamem_free(sc->dma_tag, &seg, rseg);
333 /* Undo DMA map for descriptors */
334 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
335 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
336 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
337 bus_dmamem_free(sc->dma_tag, &seg, rseg);
338 return 1;
341 /* Load the DMA map */
342 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
343 size, NULL, BUS_DMA_NOWAIT)) != 0) {
344 aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
345 err);
346 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
347 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
348 bus_dmamem_free(sc->dma_tag, &seg, rseg);
350 /* Undo DMA map for descriptors */
351 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
352 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
353 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
354 bus_dmamem_free(sc->dma_tag, &seg, rseg);
355 return 1;
358 /* Descriptors are stored as a circular linked list */
359 /* Fill in rx descriptors */
360 for (i = 0; i < MTD_NUM_RXD; ++i) {
361 sc->desc[i].stat = MTD_RXD_OWNER;
362 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */
363 /* Link back to first rx descriptor */
364 sc->desc[i].next =
365 htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
366 } else {
367 /* Link forward to next rx descriptor */
368 sc->desc[i].next =
369 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
370 + (i + 1) * sizeof(struct mtd_desc));
372 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
373 /* Set buffer's address */
374 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
375 + i * MTD_RXBUF_SIZE);
378 /* Fill in tx descriptors */
379 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
380 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
381 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
382 /* Link back to first tx descriptor */
383 sc->desc[i].next =
384 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
385 +MTD_NUM_RXD * sizeof(struct mtd_desc));
386 } else {
387 /* Link forward to next tx descriptor */
388 sc->desc[i].next =
389 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
390 + (i + 1) * sizeof(struct mtd_desc));
392 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
393 /* Set buffer's address */
394 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
395 + MTD_NUM_RXD * MTD_RXBUF_SIZE
396 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
399 return 0;
403 void
404 mtd_mii_statchg(device_t self)
406 /* Should we do something here? :) */
411 mtd_mii_readreg(device_t self, int phy, int reg)
413 struct mtd_softc *sc = device_private(self);
415 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
419 void
420 mtd_mii_writereg(device_t self, int phy, int reg, int val)
422 struct mtd_softc *sc = device_private(self);
424 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
429 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
431 int len, tlen;
432 char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
433 + index * MTD_TXBUF_SIZE;
434 struct mbuf *n;
436 for (tlen = 0; m != NULL; m = n) {
437 len = m->m_len;
438 if (len == 0) {
439 MFREE(m, n);
440 continue;
441 } else if (tlen > MTD_TXBUF_SIZE) {
442 /* XXX FIXME: No idea what to do here. */
443 aprint_error_dev(&sc->dev, "packet too large! Size = %i\n",
444 tlen);
445 MFREE(m, n);
446 continue;
448 memcpy(buf, mtod(m, void *), len);
449 buf += len;
450 tlen += len;
451 MFREE(m, n);
453 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
454 | MTD_TXD_CONF_IRQC
455 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
456 | (tlen & MTD_TXD_CONF_BUFS);
458 return tlen;
462 void
463 mtd_start(struct ifnet *ifp)
465 struct mtd_softc *sc = ifp->if_softc;
466 struct mbuf *m;
467 int len;
468 int first_tx = sc->cur_tx;
470 /* Don't transmit when the interface is busy or inactive */
471 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
472 return;
474 for (;;) {
475 IF_DEQUEUE(&ifp->if_snd, m);
477 if (m == NULL)
478 break;
480 #if NBPFILTER > 0
481 if (ifp->if_bpf)
482 bpf_mtap(ifp->if_bpf, m);
483 #endif
485 /* Copy mbuf chain into tx buffer */
486 len = mtd_put(sc, sc->cur_tx, m);
488 if (sc->cur_tx != first_tx)
489 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
491 if (++sc->cur_tx >= MTD_NUM_TXD)
492 sc->cur_tx = 0;
494 /* Mark first & last descriptor */
495 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
497 if (sc->cur_tx == 0) {
498 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
499 } else {
500 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
503 /* Give first descriptor to chip to complete transaction */
504 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
506 /* Transmit polling demand */
507 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
509 /* XXX FIXME: Set up a watchdog timer */
510 /* ifp->if_timer = 5; */
514 void
515 mtd_stop(struct ifnet *ifp, int disable)
517 struct mtd_softc *sc = ifp->if_softc;
519 /* Disable transmitter and receiver */
520 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
521 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
523 /* Disable interrupts */
524 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
526 /* Must do more at disable??... */
527 if (disable) {
528 /* Delete tx and rx descriptor base addresses */
529 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
530 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
533 ifp->if_timer = 0;
534 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
538 void
539 mtd_watchdog(struct ifnet *ifp)
541 struct mtd_softc *sc = ifp->if_softc;
542 int s;
544 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->dev));
545 ++sc->ethercom.ec_if.if_oerrors;
547 mtd_stop(ifp, 0);
549 s = splnet();
550 mtd_init(ifp);
551 splx(s);
553 return;
558 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
560 struct mtd_softc *sc = ifp->if_softc;
561 int s, error = 0;
563 s = splnet();
565 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
567 * Multicast list has changed; set the hardware
568 * filter accordingly.
570 if (ifp->if_flags & IFF_RUNNING)
571 mtd_setmulti(sc);
572 error = 0;
575 splx(s);
576 return error;
580 struct mbuf *
581 mtd_get(struct mtd_softc *sc, int index, int totlen)
583 struct ifnet *ifp = &sc->ethercom.ec_if;
584 struct mbuf *m, *m0, *newm;
585 int len;
586 char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
588 MGETHDR(m0, M_DONTWAIT, MT_DATA);
589 if (m0 == NULL)
590 return NULL;
592 m0->m_pkthdr.rcvif = ifp;
593 m0->m_pkthdr.len = totlen;
594 m = m0;
595 len = MHLEN;
597 while (totlen > 0) {
598 if (totlen >= MINCLSIZE) {
599 MCLGET(m, M_DONTWAIT);
600 if (!(m->m_flags & M_EXT)) {
601 m_freem(m0);
602 return NULL;
604 len = MCLBYTES;
607 if (m == m0) {
608 char *newdata = (char *)
609 ALIGN(m->m_data + sizeof(struct ether_header)) -
610 sizeof(struct ether_header);
611 len -= newdata - m->m_data;
612 m->m_data = newdata;
615 m->m_len = len = min(totlen, len);
616 memcpy(mtod(m, void *), buf, len);
617 buf += len;
619 totlen -= len;
620 if (totlen > 0) {
621 MGET(newm, M_DONTWAIT, MT_DATA);
622 if (newm == NULL) {
623 m_freem(m0);
624 return NULL;
626 len = MLEN;
627 m = m->m_next = newm;
631 return m0;
636 mtd_rxirq(struct mtd_softc *sc)
638 struct ifnet *ifp = &sc->ethercom.ec_if;
639 int len;
640 struct mbuf *m;
642 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
643 /* Error summary set? */
644 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
645 aprint_error_dev(&sc->dev, "received packet with errors\n");
646 /* Give up packet, since an error occurred */
647 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
648 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
649 MTD_RXD_CONF_BUFS;
650 ++ifp->if_ierrors;
651 if (++sc->cur_rx >= MTD_NUM_RXD)
652 sc->cur_rx = 0;
653 continue;
655 /* Get buffer length */
656 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
657 >> MTD_RXD_FLEN_SHIFT;
658 len -= ETHER_CRC_LEN;
660 /* Check packet size */
661 if (len <= sizeof(struct ether_header)) {
662 aprint_error_dev(&sc->dev, "invalid packet size %d; dropping\n",
663 len);
664 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
665 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
666 MTD_RXD_CONF_BUFS;
667 ++ifp->if_ierrors;
668 if (++sc->cur_rx >= MTD_NUM_RXD)
669 sc->cur_rx = 0;
670 continue;
673 m = mtd_get(sc, (sc->cur_rx), len);
675 /* Give descriptor back to card */
676 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
677 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
679 if (++sc->cur_rx >= MTD_NUM_RXD)
680 sc->cur_rx = 0;
682 if (m == NULL) {
683 aprint_error_dev(&sc->dev, "error pulling packet off interface\n");
684 ++ifp->if_ierrors;
685 continue;
688 ++ifp->if_ipackets;
690 #if NBPFILTER > 0
691 if (ifp->if_bpf)
692 bpf_mtap(ifp->if_bpf, m);
693 #endif
694 /* Pass the packet up */
695 (*ifp->if_input)(ifp, m);
698 return 1;
703 mtd_txirq(struct mtd_softc *sc)
705 struct ifnet *ifp = &sc->ethercom.ec_if;
707 /* Clear timeout */
708 ifp->if_timer = 0;
710 ifp->if_flags &= ~IFF_OACTIVE;
711 ++ifp->if_opackets;
713 /* XXX FIXME If there is some queued, do an mtd_start? */
715 return 1;
720 mtd_bufirq(struct mtd_softc *sc)
722 struct ifnet *ifp = &sc->ethercom.ec_if;
724 /* Clear timeout */
725 ifp->if_timer = 0;
727 /* XXX FIXME: Do something here to make sure we get some buffers! */
729 return 1;
734 mtd_irq_h(void *args)
736 struct mtd_softc *sc = args;
737 struct ifnet *ifp = &sc->ethercom.ec_if;
738 u_int32_t status;
739 int r = 0;
741 if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev))
742 return 0;
744 /* Disable interrupts */
745 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
747 for(;;) {
748 status = MTD_READ_4(sc, MTD_ISR);
749 #if NRND > 0
750 /* Add random seed before masking out bits */
751 if (status)
752 rnd_add_uint32(&sc->rnd_src, status);
753 #endif
754 status &= MTD_ISR_MASK;
755 if (!status) /* We didn't ask for this */
756 break;
758 MTD_WRITE_4(sc, MTD_ISR, status);
760 /* NOTE: Perhaps we should reset with some of these errors? */
762 if (status & MTD_ISR_RXBUN) {
763 aprint_error_dev(&sc->dev, "receive buffer unavailable\n");
764 ++ifp->if_ierrors;
767 if (status & MTD_ISR_RXERR) {
768 aprint_error_dev(&sc->dev, "receive error\n");
769 ++ifp->if_ierrors;
772 if (status & MTD_ISR_TXBUN) {
773 aprint_error_dev(&sc->dev, "transmit buffer unavailable\n");
774 ++ifp->if_ierrors;
777 if ((status & MTD_ISR_PDF)) {
778 aprint_error_dev(&sc->dev, "parallel detection fault\n");
779 ++ifp->if_ierrors;
782 if (status & MTD_ISR_FBUSERR) {
783 aprint_error_dev(&sc->dev, "fatal bus error\n");
784 ++ifp->if_ierrors;
787 if (status & MTD_ISR_TARERR) {
788 aprint_error_dev(&sc->dev, "target error\n");
789 ++ifp->if_ierrors;
792 if (status & MTD_ISR_MASTERR) {
793 aprint_error_dev(&sc->dev, "master error\n");
794 ++ifp->if_ierrors;
797 if (status & MTD_ISR_PARERR) {
798 aprint_error_dev(&sc->dev, "parity error\n");
799 ++ifp->if_ierrors;
802 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */
803 r |= mtd_rxirq(sc);
805 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */
806 r |= mtd_txirq(sc);
808 if (status & MTD_ISR_TXEARLY) /* Transmit early */
809 r |= mtd_txirq(sc);
811 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */
812 r |= mtd_bufirq(sc);
816 /* Enable interrupts */
817 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
819 return r;
823 void
824 mtd_setmulti(struct mtd_softc *sc)
826 struct ifnet *ifp = &sc->ethercom.ec_if;
827 u_int32_t rxtx_stat;
828 u_int32_t hash[2] = {0, 0};
829 u_int32_t crc;
830 struct ether_multi *enm;
831 struct ether_multistep step;
832 int mcnt = 0;
834 /* Get old status */
835 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
837 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
838 rxtx_stat |= MTD_RX_AMULTI;
839 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
840 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
841 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
842 return;
845 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
846 while (enm != NULL) {
847 /* We need the 6 most significant bits of the CRC */
848 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
850 hash[crc >> 5] |= 1 << (crc & 0xf);
852 ++mcnt;
853 ETHER_NEXT_MULTI(step, enm);
856 /* Accept multicast bit needs to be on? */
857 if (mcnt)
858 rxtx_stat |= MTD_RX_AMULTI;
859 else
860 rxtx_stat &= ~MTD_RX_AMULTI;
862 /* Write out the hash */
863 MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
864 MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
865 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
869 void
870 mtd_reset(struct mtd_softc *sc)
872 int i;
874 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
876 /* Reset descriptor status */
877 sc->cur_tx = 0;
878 sc->cur_rx = 0;
880 /* Wait until done with reset */
881 for (i = 0; i < MTD_TIMEOUT; ++i) {
882 DELAY(10);
883 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
884 break;
887 if (i == MTD_TIMEOUT) {
888 aprint_error_dev(&sc->dev, "reset timed out\n");
891 /* Wait a little so chip can stabilize */
892 DELAY(1000);
896 void
897 mtd_shutdown (void *arg)
899 struct mtd_softc *sc = arg;
900 struct ifnet *ifp = &sc->ethercom.ec_if;
902 #if NRND > 0
903 rnd_detach_source(&sc->rnd_src);
904 #endif
905 mtd_stop(ifp, 1);