1 /* $NetBSD: if_mec.c,v 1.40 2009/11/27 11:04:18 tsutsui Exp $ */
4 * Copyright (c) 2004, 2008 Izumi Tsutsui. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 * MACE MAC-110 Ethernet driver
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.40 2009/11/27 11:04:18 tsutsui Exp $");
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/callout.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/ioctl.h>
79 #include <sys/errno.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_ether.h>
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
100 #include <machine/bus.h>
101 #include <machine/intr.h>
102 #include <machine/machtype.h>
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
107 #include <sgimips/mace/macevar.h>
108 #include <sgimips/mace/if_mecreg.h>
110 #include <dev/arcbios/arcbios.h>
111 #include <dev/arcbios/arcbiosvar.h>
113 /* #define MEC_DEBUG */
116 #define MEC_DEBUG_RESET 0x01
117 #define MEC_DEBUG_START 0x02
118 #define MEC_DEBUG_STOP 0x04
119 #define MEC_DEBUG_INTR 0x08
120 #define MEC_DEBUG_RXINTR 0x10
121 #define MEC_DEBUG_TXINTR 0x20
122 #define MEC_DEBUG_TXSEGS 0x40
123 uint32_t mec_debug
= 0;
124 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
126 #define DPRINTF(x, y) /* nothing */
129 /* #define MEC_EVENT_COUNTERS */
131 #ifdef MEC_EVENT_COUNTERS
132 #define MEC_EVCNT_INCR(ev) (ev)->ev_count++
134 #define MEC_EVCNT_INCR(ev) do {} while (/* CONSTCOND */ 0)
138 * Transmit descriptor list size
140 #define MEC_NTXDESC 64
141 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
142 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
143 #define MEC_NTXDESC_RSVD 4
144 #define MEC_NTXDESC_INTR 8
147 * software state for TX
150 struct mbuf
*txs_mbuf
; /* head of our mbuf chain */
151 bus_dmamap_t txs_dmamap
; /* our DMA map */
153 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
154 #define MEC_TXS_TXDPTR 0x00000080 /* concat txd_ptr is used */
158 * Transmit buffer descriptor
160 #define MEC_TXDESCSIZE 128
162 #define MEC_TXD_BUFOFFSET sizeof(uint64_t)
163 #define MEC_TXD_BUFOFFSET1 \
164 (sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
165 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
166 #define MEC_TXD_BUFSIZE1 (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
167 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
168 #define MEC_TXD_ALIGN 8
169 #define MEC_TXD_ALIGNMASK (MEC_TXD_ALIGN - 1)
170 #define MEC_TXD_ROUNDUP(addr) \
171 (((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
172 #define MEC_NTXSEG 16
175 volatile uint64_t txd_cmd
;
176 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
177 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
178 #define TXCMD_BUFSTART(x) ((x) << 16)
179 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
180 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
181 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
182 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
183 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
184 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
186 #define txd_stat txd_cmd
187 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
188 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
189 #define MEC_TXSTAT_COLCNT_SHIFT 16
190 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
191 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
192 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
193 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
194 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
195 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
196 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
197 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
198 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
199 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
200 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
203 uint64_t txptr
[MEC_NTXPTR
];
204 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
205 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
206 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
207 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
208 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
210 uint8_t txbuf
[MEC_TXD_BUFSIZE
];
212 #define txd_ptr txd_data.txptr
213 #define txd_buf txd_data.txbuf
217 * Receive buffer size
219 #define MEC_NRXDESC 16
220 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
221 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
224 * Receive buffer description
226 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
227 #define MEC_RXD_NRXPAD 3
228 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
229 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
230 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
233 volatile uint64_t rxd_stat
;
234 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
235 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
236 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
237 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
238 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
239 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
240 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
241 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
242 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
243 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
244 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
245 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
246 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
247 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
248 #define RXSTAT_CKSUM(x) (((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
249 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
250 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
251 uint64_t rxd_pad1
[MEC_RXD_NRXPAD
];
252 uint8_t rxd_buf
[MEC_RXD_BUFSIZE
];
256 * control structures for DMA ops
258 struct mec_control_data
{
260 * TX descriptors and buffers
262 struct mec_txdesc mcd_txdesc
[MEC_NTXDESC
];
265 * RX descriptors and buffers
267 struct mec_rxdesc mcd_rxdesc
[MEC_NRXDESC
];
271 * It _seems_ there are some restrictions on descriptor address:
273 * - Base address of txdescs should be 8kbyte aligned
274 * - Each txdesc should be 128byte aligned
275 * - Each rxdesc should be 4kbyte aligned
277 * So we should specify 8k align to allocalte txdescs.
278 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
279 * so rxdescs are also allocated at 4kbyte aligned.
281 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
283 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
284 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
285 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
288 * software state per device
291 device_t sc_dev
; /* generic device structures */
293 bus_space_tag_t sc_st
; /* bus_space tag */
294 bus_space_handle_t sc_sh
; /* bus_space handle */
295 bus_dma_tag_t sc_dmat
; /* bus_dma tag */
297 struct ethercom sc_ethercom
; /* Ethernet common part */
299 struct mii_data sc_mii
; /* MII/media information */
300 int sc_phyaddr
; /* MII address */
301 struct callout sc_tick_ch
; /* tick callout */
303 uint8_t sc_enaddr
[ETHER_ADDR_LEN
]; /* MAC address */
305 bus_dmamap_t sc_cddmamap
; /* bus_dma map for control data */
306 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308 /* pointer to allocated control data */
309 struct mec_control_data
*sc_control_data
;
310 #define sc_txdesc sc_control_data->mcd_txdesc
311 #define sc_rxdesc sc_control_data->mcd_rxdesc
313 /* software state for TX descs */
314 struct mec_txsoft sc_txsoft
[MEC_NTXDESC
];
316 int sc_txpending
; /* number of TX requests pending */
317 int sc_txdirty
; /* first dirty TX descriptor */
318 int sc_txlast
; /* last used TX descriptor */
320 int sc_rxptr
; /* next ready RX buffer */
323 rndsource_element_t sc_rnd_source
; /* random source */
325 #ifdef MEC_EVENT_COUNTERS
326 struct evcnt sc_ev_txpkts
; /* TX packets queued total */
327 struct evcnt sc_ev_txdpad
; /* TX packets padded in txdesc buf */
328 struct evcnt sc_ev_txdbuf
; /* TX packets copied to txdesc buf */
329 struct evcnt sc_ev_txptr1
; /* TX packets using concat ptr1 */
330 struct evcnt sc_ev_txptr1a
; /* TX packets w/ptr1 ~160bytes */
331 struct evcnt sc_ev_txptr1b
; /* TX packets w/ptr1 ~256bytes */
332 struct evcnt sc_ev_txptr1c
; /* TX packets w/ptr1 ~512bytes */
333 struct evcnt sc_ev_txptr1d
; /* TX packets w/ptr1 ~1024bytes */
334 struct evcnt sc_ev_txptr1e
; /* TX packets w/ptr1 >1024bytes */
335 struct evcnt sc_ev_txptr2
; /* TX packets using concat ptr1,2 */
336 struct evcnt sc_ev_txptr2a
; /* TX packets w/ptr2 ~160bytes */
337 struct evcnt sc_ev_txptr2b
; /* TX packets w/ptr2 ~256bytes */
338 struct evcnt sc_ev_txptr2c
; /* TX packets w/ptr2 ~512bytes */
339 struct evcnt sc_ev_txptr2d
; /* TX packets w/ptr2 ~1024bytes */
340 struct evcnt sc_ev_txptr2e
; /* TX packets w/ptr2 >1024bytes */
341 struct evcnt sc_ev_txptr3
; /* TX packets using concat ptr1,2,3 */
342 struct evcnt sc_ev_txptr3a
; /* TX packets w/ptr3 ~160bytes */
343 struct evcnt sc_ev_txptr3b
; /* TX packets w/ptr3 ~256bytes */
344 struct evcnt sc_ev_txptr3c
; /* TX packets w/ptr3 ~512bytes */
345 struct evcnt sc_ev_txptr3d
; /* TX packets w/ptr3 ~1024bytes */
346 struct evcnt sc_ev_txptr3e
; /* TX packets w/ptr3 >1024bytes */
347 struct evcnt sc_ev_txmbuf
; /* TX packets copied to new mbufs */
348 struct evcnt sc_ev_txmbufa
; /* TX packets w/mbuf ~160bytes */
349 struct evcnt sc_ev_txmbufb
; /* TX packets w/mbuf ~256bytes */
350 struct evcnt sc_ev_txmbufc
; /* TX packets w/mbuf ~512bytes */
351 struct evcnt sc_ev_txmbufd
; /* TX packets w/mbuf ~1024bytes */
352 struct evcnt sc_ev_txmbufe
; /* TX packets w/mbuf >1024bytes */
353 struct evcnt sc_ev_txptrs
; /* TX packets using ptrs total */
354 struct evcnt sc_ev_txptrc0
; /* TX packets w/ptrs no hdr chain */
355 struct evcnt sc_ev_txptrc1
; /* TX packets w/ptrs 1 hdr chain */
356 struct evcnt sc_ev_txptrc2
; /* TX packets w/ptrs 2 hdr chains */
357 struct evcnt sc_ev_txptrc3
; /* TX packets w/ptrs 3 hdr chains */
358 struct evcnt sc_ev_txptrc4
; /* TX packets w/ptrs 4 hdr chains */
359 struct evcnt sc_ev_txptrc5
; /* TX packets w/ptrs 5 hdr chains */
360 struct evcnt sc_ev_txptrc6
; /* TX packets w/ptrs >5 hdr chains */
361 struct evcnt sc_ev_txptrh0
; /* TX packets w/ptrs ~8bytes hdr */
362 struct evcnt sc_ev_txptrh1
; /* TX packets w/ptrs ~16bytes hdr */
363 struct evcnt sc_ev_txptrh2
; /* TX packets w/ptrs ~32bytes hdr */
364 struct evcnt sc_ev_txptrh3
; /* TX packets w/ptrs ~64bytes hdr */
365 struct evcnt sc_ev_txptrh4
; /* TX packets w/ptrs ~80bytes hdr */
366 struct evcnt sc_ev_txptrh5
; /* TX packets w/ptrs ~96bytes hdr */
367 struct evcnt sc_ev_txdstall
; /* TX stalled due to no txdesc */
368 struct evcnt sc_ev_txempty
; /* TX empty interrupts */
369 struct evcnt sc_ev_txsent
; /* TX sent interrupts */
373 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
374 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
376 #define MEC_TXDESCSYNC(sc, x, ops) \
377 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
378 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
379 #define MEC_TXCMDSYNC(sc, x, ops) \
380 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
381 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
383 #define MEC_RXSTATSYNC(sc, x, ops) \
384 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
385 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
386 #define MEC_RXBUFSYNC(sc, x, len, ops) \
387 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
388 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
389 MEC_ETHER_ALIGN + (len), (ops))
391 /* XXX these values should be moved to <net/if_ether.h> ? */
392 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
393 #define MEC_ETHER_ALIGN 2
395 static int mec_match(device_t
, cfdata_t
, void *);
396 static void mec_attach(device_t
, device_t
, void *);
398 static int mec_mii_readreg(device_t
, int, int);
399 static void mec_mii_writereg(device_t
, int, int, int);
400 static int mec_mii_wait(struct mec_softc
*);
401 static void mec_statchg(device_t
);
403 static void enaddr_aton(const char *, uint8_t *);
405 static int mec_init(struct ifnet
* ifp
);
406 static void mec_start(struct ifnet
*);
407 static void mec_watchdog(struct ifnet
*);
408 static void mec_tick(void *);
409 static int mec_ioctl(struct ifnet
*, u_long
, void *);
410 static void mec_reset(struct mec_softc
*);
411 static void mec_setfilter(struct mec_softc
*);
412 static int mec_intr(void *arg
);
413 static void mec_stop(struct ifnet
*, int);
414 static void mec_rxintr(struct mec_softc
*);
415 static void mec_rxcsum(struct mec_softc
*, struct mbuf
*, uint16_t,
417 static void mec_txintr(struct mec_softc
*, uint32_t);
418 static bool mec_shutdown(device_t
, int);
420 CFATTACH_DECL_NEW(mec
, sizeof(struct mec_softc
),
421 mec_match
, mec_attach
, NULL
, NULL
);
423 static int mec_matched
= 0;
426 mec_match(device_t parent
, cfdata_t cf
, void *aux
)
429 /* allow only one device */
438 mec_attach(device_t parent
, device_t self
, void *aux
)
440 struct mec_softc
*sc
= device_private(self
);
441 struct mace_attach_args
*maa
= aux
;
442 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
443 uint64_t address
, command
;
445 struct mii_softc
*child
;
446 bus_dma_segment_t seg
;
451 sc
->sc_st
= maa
->maa_st
;
452 if (bus_space_subregion(sc
->sc_st
, maa
->maa_sh
,
453 maa
->maa_offset
, 0, &sc
->sc_sh
) != 0) {
454 aprint_error(": can't map i/o space\n");
458 /* set up DMA structures */
459 sc
->sc_dmat
= maa
->maa_dmat
;
462 * Allocate the control data structures, and create and load the
465 if ((err
= bus_dmamem_alloc(sc
->sc_dmat
,
466 sizeof(struct mec_control_data
), MEC_CONTROL_DATA_ALIGN
, 0,
467 &seg
, 1, &rseg
, BUS_DMA_NOWAIT
)) != 0) {
468 aprint_error(": unable to allocate control data, error = %d\n",
473 * XXX needs re-think...
474 * control data structures contain whole RX data buffer, so
475 * BUS_DMA_COHERENT (which disables cache) may cause some performance
476 * issue on copying data from the RX buffer to mbuf on normal memory,
477 * though we have to make sure all bus_dmamap_sync(9) ops are called
478 * properly in that case.
480 if ((err
= bus_dmamem_map(sc
->sc_dmat
, &seg
, rseg
,
481 sizeof(struct mec_control_data
),
482 (void **)&sc
->sc_control_data
, /*BUS_DMA_COHERENT*/ 0)) != 0) {
483 aprint_error(": unable to map control data, error = %d\n", err
);
486 memset(sc
->sc_control_data
, 0, sizeof(struct mec_control_data
));
488 if ((err
= bus_dmamap_create(sc
->sc_dmat
,
489 sizeof(struct mec_control_data
), 1,
490 sizeof(struct mec_control_data
), 0, 0, &sc
->sc_cddmamap
)) != 0) {
491 aprint_error(": unable to create control data DMA map,"
492 " error = %d\n", err
);
495 if ((err
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cddmamap
,
496 sc
->sc_control_data
, sizeof(struct mec_control_data
), NULL
,
497 BUS_DMA_NOWAIT
)) != 0) {
498 aprint_error(": unable to load control data DMA map,"
499 " error = %d\n", err
);
503 /* create TX buffer DMA maps */
504 for (i
= 0; i
< MEC_NTXDESC
; i
++) {
505 if ((err
= bus_dmamap_create(sc
->sc_dmat
,
506 MCLBYTES
, MEC_NTXSEG
, MCLBYTES
, PAGE_SIZE
, 0,
507 &sc
->sc_txsoft
[i
].txs_dmamap
)) != 0) {
508 aprint_error(": unable to create tx DMA map %d,"
509 " error = %d\n", i
, err
);
514 callout_init(&sc
->sc_tick_ch
, 0);
516 /* get Ethernet address from ARCBIOS */
517 if ((macaddr
= ARCBIOS
->GetEnvironmentVariable("eaddr")) == NULL
) {
518 aprint_error(": unable to get MAC address!\n");
522 * On some machines the DS2502 chip storing the serial number/
523 * mac address is on the pci riser board - if this board is
524 * missing, ARCBIOS will not know a good ethernet address (but
525 * otherwise the machine will work fine).
528 if (strcmp(macaddr
, "ff:ff:ff:ff:ff:ff") == 0) {
530 const char * netaddr
=
531 ARCBIOS
->GetEnvironmentVariable("netaddr");
534 * Create a MAC address by abusing the "netaddr" env var
536 sc
->sc_enaddr
[0] = 0xf2;
537 sc
->sc_enaddr
[1] = 0x0b;
538 sc
->sc_enaddr
[2] = 0xa4;
543 while (*netaddr
&& *netaddr
!= '.') {
544 if (*netaddr
>= '0' && *netaddr
<= '9')
545 v
= v
*10 + (*netaddr
- '0');
554 memcpy(sc
->sc_enaddr
+3, ((uint8_t *)&ui
)+1, 3);
557 enaddr_aton(macaddr
, sc
->sc_enaddr
);
559 /* set the Ethernet address */
561 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
562 address
= address
<< 8;
563 address
|= sc
->sc_enaddr
[i
];
565 bus_space_write_8(sc
->sc_st
, sc
->sc_sh
, MEC_STATION
, address
);
570 command
= bus_space_read_8(sc
->sc_st
, sc
->sc_sh
, MEC_MAC_CONTROL
);
572 aprint_normal(": MAC-110 Ethernet, rev %u\n",
573 (u_int
)((command
& MEC_MAC_REVISION
) >> MEC_MAC_REVISION_SHIFT
));
576 aprint_normal_dev(self
,
577 "could not get ethernet address from firmware"
578 " - generated one from the \"netaddr\" environment"
580 aprint_normal_dev(self
, "Ethernet address %s\n",
581 ether_sprintf(sc
->sc_enaddr
));
583 /* Done, now attach everything */
585 sc
->sc_mii
.mii_ifp
= ifp
;
586 sc
->sc_mii
.mii_readreg
= mec_mii_readreg
;
587 sc
->sc_mii
.mii_writereg
= mec_mii_writereg
;
588 sc
->sc_mii
.mii_statchg
= mec_statchg
;
590 /* Set up PHY properties */
591 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
592 ifmedia_init(&sc
->sc_mii
.mii_media
, 0, ether_mediachange
,
594 mii_attach(self
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
597 child
= LIST_FIRST(&sc
->sc_mii
.mii_phys
);
599 /* No PHY attached */
600 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
| IFM_MANUAL
,
602 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
| IFM_MANUAL
);
604 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
| IFM_AUTO
);
605 sc
->sc_phyaddr
= child
->mii_phy
;
608 strcpy(ifp
->if_xname
, device_xname(self
));
610 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
611 ifp
->if_ioctl
= mec_ioctl
;
612 ifp
->if_start
= mec_start
;
613 ifp
->if_watchdog
= mec_watchdog
;
614 ifp
->if_init
= mec_init
;
615 ifp
->if_stop
= mec_stop
;
616 ifp
->if_mtu
= ETHERMTU
;
617 IFQ_SET_READY(&ifp
->if_snd
);
619 /* mec has dumb RX cksum support */
620 ifp
->if_capabilities
= IFCAP_CSUM_TCPv4_Rx
| IFCAP_CSUM_UDPv4_Rx
;
622 /* We can support 802.1Q VLAN-sized frames. */
623 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
625 /* attach the interface */
627 ether_ifattach(ifp
, sc
->sc_enaddr
);
629 /* establish interrupt */
630 cpu_intr_establish(maa
->maa_intr
, maa
->maa_intrmask
, mec_intr
, sc
);
633 rnd_attach_source(&sc
->sc_rnd_source
, device_xname(self
),
637 #ifdef MEC_EVENT_COUNTERS
638 evcnt_attach_dynamic(&sc
->sc_ev_txpkts
, EVCNT_TYPE_MISC
,
639 NULL
, device_xname(self
), "TX pkts queued total");
640 evcnt_attach_dynamic(&sc
->sc_ev_txdpad
, EVCNT_TYPE_MISC
,
641 NULL
, device_xname(self
), "TX pkts padded in txdesc buf");
642 evcnt_attach_dynamic(&sc
->sc_ev_txdbuf
, EVCNT_TYPE_MISC
,
643 NULL
, device_xname(self
), "TX pkts copied to txdesc buf");
644 evcnt_attach_dynamic(&sc
->sc_ev_txptr1
, EVCNT_TYPE_MISC
,
645 NULL
, device_xname(self
), "TX pkts using concat ptr1");
646 evcnt_attach_dynamic(&sc
->sc_ev_txptr1a
, EVCNT_TYPE_MISC
,
647 NULL
, device_xname(self
), "TX pkts w/ptr1 ~160bytes");
648 evcnt_attach_dynamic(&sc
->sc_ev_txptr1b
, EVCNT_TYPE_MISC
,
649 NULL
, device_xname(self
), "TX pkts w/ptr1 ~256bytes");
650 evcnt_attach_dynamic(&sc
->sc_ev_txptr1c
, EVCNT_TYPE_MISC
,
651 NULL
, device_xname(self
), "TX pkts w/ptr1 ~512bytes");
652 evcnt_attach_dynamic(&sc
->sc_ev_txptr1d
, EVCNT_TYPE_MISC
,
653 NULL
, device_xname(self
), "TX pkts w/ptr1 ~1024bytes");
654 evcnt_attach_dynamic(&sc
->sc_ev_txptr1e
, EVCNT_TYPE_MISC
,
655 NULL
, device_xname(self
), "TX pkts w/ptr1 >1024bytes");
656 evcnt_attach_dynamic(&sc
->sc_ev_txptr2
, EVCNT_TYPE_MISC
,
657 NULL
, device_xname(self
), "TX pkts using concat ptr1,2");
658 evcnt_attach_dynamic(&sc
->sc_ev_txptr2a
, EVCNT_TYPE_MISC
,
659 NULL
, device_xname(self
), "TX pkts w/ptr2 ~160bytes");
660 evcnt_attach_dynamic(&sc
->sc_ev_txptr2b
, EVCNT_TYPE_MISC
,
661 NULL
, device_xname(self
), "TX pkts w/ptr2 ~256bytes");
662 evcnt_attach_dynamic(&sc
->sc_ev_txptr2c
, EVCNT_TYPE_MISC
,
663 NULL
, device_xname(self
), "TX pkts w/ptr2 ~512bytes");
664 evcnt_attach_dynamic(&sc
->sc_ev_txptr2d
, EVCNT_TYPE_MISC
,
665 NULL
, device_xname(self
), "TX pkts w/ptr2 ~1024bytes");
666 evcnt_attach_dynamic(&sc
->sc_ev_txptr2e
, EVCNT_TYPE_MISC
,
667 NULL
, device_xname(self
), "TX pkts w/ptr2 >1024bytes");
668 evcnt_attach_dynamic(&sc
->sc_ev_txptr3
, EVCNT_TYPE_MISC
,
669 NULL
, device_xname(self
), "TX pkts using concat ptr1,2,3");
670 evcnt_attach_dynamic(&sc
->sc_ev_txptr3a
, EVCNT_TYPE_MISC
,
671 NULL
, device_xname(self
), "TX pkts w/ptr3 ~160bytes");
672 evcnt_attach_dynamic(&sc
->sc_ev_txptr3b
, EVCNT_TYPE_MISC
,
673 NULL
, device_xname(self
), "TX pkts w/ptr3 ~256bytes");
674 evcnt_attach_dynamic(&sc
->sc_ev_txptr3c
, EVCNT_TYPE_MISC
,
675 NULL
, device_xname(self
), "TX pkts w/ptr3 ~512bytes");
676 evcnt_attach_dynamic(&sc
->sc_ev_txptr3d
, EVCNT_TYPE_MISC
,
677 NULL
, device_xname(self
), "TX pkts w/ptr3 ~1024bytes");
678 evcnt_attach_dynamic(&sc
->sc_ev_txptr3e
, EVCNT_TYPE_MISC
,
679 NULL
, device_xname(self
), "TX pkts w/ptr3 >1024bytes");
680 evcnt_attach_dynamic(&sc
->sc_ev_txmbuf
, EVCNT_TYPE_MISC
,
681 NULL
, device_xname(self
), "TX pkts copied to new mbufs");
682 evcnt_attach_dynamic(&sc
->sc_ev_txmbufa
, EVCNT_TYPE_MISC
,
683 NULL
, device_xname(self
), "TX pkts w/mbuf ~160bytes");
684 evcnt_attach_dynamic(&sc
->sc_ev_txmbufb
, EVCNT_TYPE_MISC
,
685 NULL
, device_xname(self
), "TX pkts w/mbuf ~256bytes");
686 evcnt_attach_dynamic(&sc
->sc_ev_txmbufc
, EVCNT_TYPE_MISC
,
687 NULL
, device_xname(self
), "TX pkts w/mbuf ~512bytes");
688 evcnt_attach_dynamic(&sc
->sc_ev_txmbufd
, EVCNT_TYPE_MISC
,
689 NULL
, device_xname(self
), "TX pkts w/mbuf ~1024bytes");
690 evcnt_attach_dynamic(&sc
->sc_ev_txmbufe
, EVCNT_TYPE_MISC
,
691 NULL
, device_xname(self
), "TX pkts w/mbuf >1024bytes");
692 evcnt_attach_dynamic(&sc
->sc_ev_txptrs
, EVCNT_TYPE_MISC
,
693 NULL
, device_xname(self
), "TX pkts using ptrs total");
694 evcnt_attach_dynamic(&sc
->sc_ev_txptrc0
, EVCNT_TYPE_MISC
,
695 NULL
, device_xname(self
), "TX pkts w/ptrs no hdr chain");
696 evcnt_attach_dynamic(&sc
->sc_ev_txptrc1
, EVCNT_TYPE_MISC
,
697 NULL
, device_xname(self
), "TX pkts w/ptrs 1 hdr chain");
698 evcnt_attach_dynamic(&sc
->sc_ev_txptrc2
, EVCNT_TYPE_MISC
,
699 NULL
, device_xname(self
), "TX pkts w/ptrs 2 hdr chains");
700 evcnt_attach_dynamic(&sc
->sc_ev_txptrc3
, EVCNT_TYPE_MISC
,
701 NULL
, device_xname(self
), "TX pkts w/ptrs 3 hdr chains");
702 evcnt_attach_dynamic(&sc
->sc_ev_txptrc4
, EVCNT_TYPE_MISC
,
703 NULL
, device_xname(self
), "TX pkts w/ptrs 4 hdr chains");
704 evcnt_attach_dynamic(&sc
->sc_ev_txptrc5
, EVCNT_TYPE_MISC
,
705 NULL
, device_xname(self
), "TX pkts w/ptrs 5 hdr chains");
706 evcnt_attach_dynamic(&sc
->sc_ev_txptrc6
, EVCNT_TYPE_MISC
,
707 NULL
, device_xname(self
), "TX pkts w/ptrs >5 hdr chains");
708 evcnt_attach_dynamic(&sc
->sc_ev_txptrh0
, EVCNT_TYPE_MISC
,
709 NULL
, device_xname(self
), "TX pkts w/ptrs ~8bytes hdr");
710 evcnt_attach_dynamic(&sc
->sc_ev_txptrh1
, EVCNT_TYPE_MISC
,
711 NULL
, device_xname(self
), "TX pkts w/ptrs ~16bytes hdr");
712 evcnt_attach_dynamic(&sc
->sc_ev_txptrh2
, EVCNT_TYPE_MISC
,
713 NULL
, device_xname(self
), "TX pkts w/ptrs ~32bytes hdr");
714 evcnt_attach_dynamic(&sc
->sc_ev_txptrh3
, EVCNT_TYPE_MISC
,
715 NULL
, device_xname(self
), "TX pkts w/ptrs ~64bytes hdr");
716 evcnt_attach_dynamic(&sc
->sc_ev_txptrh4
, EVCNT_TYPE_MISC
,
717 NULL
, device_xname(self
), "TX pkts w/ptrs ~80bytes hdr");
718 evcnt_attach_dynamic(&sc
->sc_ev_txptrh5
, EVCNT_TYPE_MISC
,
719 NULL
, device_xname(self
), "TX pkts w/ptrs ~96bytes hdr");
720 evcnt_attach_dynamic(&sc
->sc_ev_txdstall
, EVCNT_TYPE_MISC
,
721 NULL
, device_xname(self
), "TX stalled due to no txdesc");
722 evcnt_attach_dynamic(&sc
->sc_ev_txempty
, EVCNT_TYPE_MISC
,
723 NULL
, device_xname(self
), "TX empty interrupts");
724 evcnt_attach_dynamic(&sc
->sc_ev_txsent
, EVCNT_TYPE_MISC
,
725 NULL
, device_xname(self
), "TX sent interrupts");
728 /* set shutdown hook to reset interface on powerdown */
729 if (pmf_device_register1(self
, NULL
, NULL
, mec_shutdown
))
730 pmf_class_network_register(self
, ifp
);
732 aprint_error_dev(self
, "couldn't establish power handler\n");
737 * Free any resources we've allocated during the failed attach
738 * attempt. Do this in reverse order and fall though.
741 for (i
= 0; i
< MEC_NTXDESC
; i
++) {
742 if (sc
->sc_txsoft
[i
].txs_dmamap
!= NULL
)
743 bus_dmamap_destroy(sc
->sc_dmat
,
744 sc
->sc_txsoft
[i
].txs_dmamap
);
746 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cddmamap
);
748 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cddmamap
);
750 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_control_data
,
751 sizeof(struct mec_control_data
));
753 bus_dmamem_free(sc
->sc_dmat
, &seg
, rseg
);
759 mec_mii_readreg(device_t self
, int phy
, int reg
)
761 struct mec_softc
*sc
= device_private(self
);
762 bus_space_tag_t st
= sc
->sc_st
;
763 bus_space_handle_t sh
= sc
->sc_sh
;
767 if (mec_mii_wait(sc
) != 0)
770 bus_space_write_8(st
, sh
, MEC_PHY_ADDRESS
,
771 (phy
<< MEC_PHY_ADDR_DEVSHIFT
) | (reg
& MEC_PHY_ADDR_REGISTER
));
773 bus_space_write_8(st
, sh
, MEC_PHY_READ_INITIATE
, 1);
777 for (i
= 0; i
< 20; i
++) {
780 val
= bus_space_read_8(st
, sh
, MEC_PHY_DATA
);
782 if ((val
& MEC_PHY_DATA_BUSY
) == 0)
783 return val
& MEC_PHY_DATA_VALUE
;
789 mec_mii_writereg(device_t self
, int phy
, int reg
, int val
)
791 struct mec_softc
*sc
= device_private(self
);
792 bus_space_tag_t st
= sc
->sc_st
;
793 bus_space_handle_t sh
= sc
->sc_sh
;
795 if (mec_mii_wait(sc
) != 0) {
796 printf("timed out writing %x: %x\n", reg
, val
);
800 bus_space_write_8(st
, sh
, MEC_PHY_ADDRESS
,
801 (phy
<< MEC_PHY_ADDR_DEVSHIFT
) | (reg
& MEC_PHY_ADDR_REGISTER
));
805 bus_space_write_8(st
, sh
, MEC_PHY_DATA
, val
& MEC_PHY_DATA_VALUE
);
813 mec_mii_wait(struct mec_softc
*sc
)
818 for (i
= 0; i
< 100; i
++) {
822 busy
= bus_space_read_8(sc
->sc_st
, sc
->sc_sh
, MEC_PHY_DATA
);
825 if ((busy
& MEC_PHY_DATA_BUSY
) == 0)
828 if (busy
== 0xffff) /* XXX ? */
833 printf("%s: MII timed out\n", device_xname(sc
->sc_dev
));
838 mec_statchg(device_t self
)
840 struct mec_softc
*sc
= device_private(self
);
841 bus_space_tag_t st
= sc
->sc_st
;
842 bus_space_handle_t sh
= sc
->sc_sh
;
845 control
= bus_space_read_8(st
, sh
, MEC_MAC_CONTROL
);
846 control
&= ~(MEC_MAC_IPGT
| MEC_MAC_IPGR1
| MEC_MAC_IPGR2
|
847 MEC_MAC_FULL_DUPLEX
| MEC_MAC_SPEED_SELECT
);
849 /* must also set IPG here for duplex stuff ... */
850 if ((sc
->sc_mii
.mii_media_active
& IFM_FDX
) != 0) {
851 control
|= MEC_MAC_FULL_DUPLEX
;
854 control
|= MEC_MAC_IPG_DEFAULT
;
857 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, control
);
862 * maybe this function should be moved to common part
863 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
866 enaddr_aton(const char *str
, uint8_t *eaddr
)
871 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
877 eaddr
[i
] = (c
- '0');
878 } else if (isxdigit(c
)) {
879 eaddr
[i
] = (toupper(c
) + 10 - 'A');
883 eaddr
[i
] = (eaddr
[i
] << 4) | (c
- '0');
884 } else if (isxdigit(c
)) {
885 eaddr
[i
] = (eaddr
[i
] << 4) | (toupper(c
) + 10 - 'A');
891 mec_init(struct ifnet
*ifp
)
893 struct mec_softc
*sc
= ifp
->if_softc
;
894 bus_space_tag_t st
= sc
->sc_st
;
895 bus_space_handle_t sh
= sc
->sc_sh
;
896 struct mec_rxdesc
*rxd
;
899 /* cancel any pending I/O */
905 /* setup filter for multicast or promisc mode */
908 /* set the TX ring pointer to the base address */
909 bus_space_write_8(st
, sh
, MEC_TX_RING_BASE
, MEC_CDTXADDR(sc
, 0));
911 sc
->sc_txpending
= 0;
913 sc
->sc_txlast
= MEC_NTXDESC
- 1;
915 /* put RX buffers into FIFO */
916 for (i
= 0; i
< MEC_NRXDESC
; i
++) {
917 rxd
= &sc
->sc_rxdesc
[i
];
919 MEC_RXSTATSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
920 MEC_RXBUFSYNC(sc
, i
, ETHER_MAX_LEN
, BUS_DMASYNC_PREREAD
);
921 bus_space_write_8(st
, sh
, MEC_MCL_RX_FIFO
, MEC_CDRXADDR(sc
, i
));
925 #if 0 /* XXX no info */
926 bus_space_write_8(st
, sh
, MEC_TIMER
, 0);
930 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
931 * spurious interrupts when TX buffers are empty
933 bus_space_write_8(st
, sh
, MEC_DMA_CONTROL
,
934 (MEC_RXD_DMAOFFSET
<< MEC_DMA_RX_DMA_OFFSET_SHIFT
) |
935 (MEC_NRXDESC
<< MEC_DMA_RX_INT_THRESH_SHIFT
) |
936 MEC_DMA_TX_DMA_ENABLE
| /* MEC_DMA_TX_INT_ENABLE | */
937 MEC_DMA_RX_DMA_ENABLE
| MEC_DMA_RX_INT_ENABLE
);
939 callout_reset(&sc
->sc_tick_ch
, hz
, mec_tick
, sc
);
941 if ((rc
= ether_mediachange(ifp
)) != 0)
944 ifp
->if_flags
|= IFF_RUNNING
;
945 ifp
->if_flags
&= ~IFF_OACTIVE
;
952 mec_reset(struct mec_softc
*sc
)
954 bus_space_tag_t st
= sc
->sc_st
;
955 bus_space_handle_t sh
= sc
->sc_sh
;
959 bus_space_write_8(st
, sh
, MEC_DMA_CONTROL
, 0);
962 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, MEC_MAC_CORE_RESET
);
964 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, 0);
967 /* Default to 100/half and let auto-negotiation work its magic */
968 control
= MEC_MAC_SPEED_SELECT
| MEC_MAC_FILTER_MATCHMULTI
|
971 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, control
);
972 /* stop DMA again for sanity */
973 bus_space_write_8(st
, sh
, MEC_DMA_CONTROL
, 0);
975 DPRINTF(MEC_DEBUG_RESET
, ("mec: control now %llx\n",
976 bus_space_read_8(st
, sh
, MEC_MAC_CONTROL
)));
980 mec_start(struct ifnet
*ifp
)
982 struct mec_softc
*sc
= ifp
->if_softc
;
984 struct mec_txdesc
*txd
;
985 struct mec_txsoft
*txs
;
987 bus_space_tag_t st
= sc
->sc_st
;
988 bus_space_handle_t sh
= sc
->sc_sh
;
989 int error
, firsttx
, nexttx
, opending
;
990 int len
, bufoff
, buflen
, nsegs
, align
, resid
, pseg
, nptr
, slen
, i
;
993 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
997 * Remember the previous txpending and the first transmit descriptor.
999 opending
= sc
->sc_txpending
;
1000 firsttx
= MEC_NEXTTX(sc
->sc_txlast
);
1002 DPRINTF(MEC_DEBUG_START
,
1003 ("%s: opending = %d, firsttx = %d\n", __func__
, opending
, firsttx
));
1005 while (sc
->sc_txpending
< MEC_NTXDESC
- 1) {
1006 /* Grab a packet off the queue. */
1007 IFQ_POLL(&ifp
->if_snd
, m0
);
1013 * Get the next available transmit descriptor.
1015 nexttx
= MEC_NEXTTX(sc
->sc_txlast
);
1016 txd
= &sc
->sc_txdesc
[nexttx
];
1017 txs
= &sc
->sc_txsoft
[nexttx
];
1018 dmamap
= txs
->txs_dmamap
;
1024 nptr
= 0; /* XXX gcc */
1025 pseg
= 0; /* XXX gcc */
1027 len
= m0
->m_pkthdr
.len
;
1029 DPRINTF(MEC_DEBUG_START
,
1030 ("%s: len = %d, nexttx = %d, txpending = %d\n",
1031 __func__
, len
, nexttx
, sc
->sc_txpending
));
1033 if (len
<= MEC_TXD_BUFSIZE
) {
1035 * If a TX packet will fit into small txdesc buffer,
1036 * just copy it into there. Maybe it's faster than
1037 * checking alignment and calling bus_dma(9) etc.
1039 DPRINTF(MEC_DEBUG_START
, ("%s: short packet\n",
1041 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
1044 * I don't know if MEC chip does auto padding,
1045 * but do it manually for safety.
1047 if (len
< ETHER_PAD_LEN
) {
1048 MEC_EVCNT_INCR(&sc
->sc_ev_txdpad
);
1049 bufoff
= MEC_TXD_BUFSTART(ETHER_PAD_LEN
);
1050 m_copydata(m0
, 0, len
, txd
->txd_buf
+ bufoff
);
1051 memset(txd
->txd_buf
+ bufoff
+ len
, 0,
1052 ETHER_PAD_LEN
- len
);
1053 len
= buflen
= ETHER_PAD_LEN
;
1055 MEC_EVCNT_INCR(&sc
->sc_ev_txdbuf
);
1056 bufoff
= MEC_TXD_BUFSTART(len
);
1057 m_copydata(m0
, 0, len
, txd
->txd_buf
+ bufoff
);
1062 * If the packet won't fit the static buffer in txdesc,
1063 * we have to use the concatenate pointers to handle it.
1065 DPRINTF(MEC_DEBUG_START
, ("%s: long packet\n",
1067 txs
->txs_flags
= MEC_TXS_TXDPTR
;
1070 * Call bus_dmamap_load_mbuf(9) first to see
1071 * how many chains the TX mbuf has.
1073 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
1074 BUS_DMA_WRITE
| BUS_DMA_NOWAIT
);
1077 * Check chains which might contain headers.
1078 * They might be so much fragmented and
1079 * it's better to copy them into txdesc buffer
1080 * since they would be small enough.
1082 nsegs
= dmamap
->dm_nsegs
;
1083 for (pseg
= 0; pseg
< nsegs
; pseg
++) {
1084 slen
= dmamap
->dm_segs
[pseg
].ds_len
;
1086 MEC_TXD_BUFSIZE1
- MEC_TXD_ALIGN
)
1091 * Check if the rest chains can be fit into
1092 * the concatinate pointers.
1094 align
= dmamap
->dm_segs
[pseg
].ds_addr
&
1098 * If the first chain isn't uint64_t
1099 * aligned, append the unaligned part
1100 * into txdesc buffer too.
1102 resid
= MEC_TXD_ALIGN
- align
;
1104 for (; pseg
< nsegs
; pseg
++) {
1106 dmamap
->dm_segs
[pseg
].ds_len
;
1111 } else if (pseg
== 0) {
1113 * In this case, the first chain is
1114 * uint64_t aligned but it's too long
1115 * to put into txdesc buf.
1116 * We have to put some data into
1117 * txdesc buf even in this case,
1118 * so put MEC_TXD_ALIGN bytes there.
1120 buflen
= resid
= MEC_TXD_ALIGN
;
1122 nptr
= nsegs
- pseg
;
1123 if (nptr
<= MEC_NTXPTR
) {
1124 bufoff
= MEC_TXD_BUFSTART(buflen
);
1127 * Check if all the rest chains are
1131 for (i
= pseg
+ 1; i
< nsegs
; i
++)
1133 dmamap
->dm_segs
[i
].ds_addr
1134 & MEC_TXD_ALIGNMASK
;
1136 /* chains are not aligned */
1140 /* The TX mbuf chains doesn't fit. */
1144 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
1148 * The TX mbuf chains can't be put into
1149 * the concatinate buffers. In this case,
1150 * we have to allocate a new contiguous mbuf
1151 * and copy data into it.
1153 * Even in this case, the Ethernet header in
1154 * the TX mbuf might be unaligned and trailing
1155 * data might be word aligned, so put 2 byte
1156 * (MEC_ETHER_ALIGN) padding at the top of the
1157 * allocated mbuf and copy TX packets.
1158 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1159 * at the top of the new mbuf won't be uint64_t
1160 * alignd, but we have to put some data into
1161 * txdesc buffer anyway even if the buffer
1162 * is uint64_t aligned.
1164 DPRINTF(MEC_DEBUG_START
|MEC_DEBUG_TXSEGS
,
1165 ("%s: re-allocating mbuf\n", __func__
));
1167 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1169 printf("%s: unable to allocate "
1171 device_xname(sc
->sc_dev
));
1174 if (len
> (MHLEN
- MEC_ETHER_ALIGN
)) {
1175 MCLGET(m
, M_DONTWAIT
);
1176 if ((m
->m_flags
& M_EXT
) == 0) {
1177 printf("%s: unable to allocate "
1179 device_xname(sc
->sc_dev
));
1184 m
->m_data
+= MEC_ETHER_ALIGN
;
1187 * Copy whole data (including unaligned part)
1188 * for following bpf_mtap().
1190 m_copydata(m0
, 0, len
, mtod(m
, void *));
1191 m
->m_pkthdr
.len
= m
->m_len
= len
;
1192 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
,
1193 dmamap
, m
, BUS_DMA_WRITE
| BUS_DMA_NOWAIT
);
1194 if (dmamap
->dm_nsegs
> 1) {
1195 /* should not happen, but for sanity */
1196 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
1200 printf("%s: unable to load TX buffer, "
1202 device_xname(sc
->sc_dev
), error
);
1207 * Only the first segment should be put into
1208 * the concatinate pointer in this case.
1214 * Set lenght of unaligned part which will be
1215 * copied into txdesc buffer.
1217 buflen
= MEC_TXD_ALIGN
- MEC_ETHER_ALIGN
;
1218 bufoff
= MEC_TXD_BUFSTART(buflen
);
1220 #ifdef MEC_EVENT_COUNTERS
1221 MEC_EVCNT_INCR(&sc
->sc_ev_txmbuf
);
1223 MEC_EVCNT_INCR(&sc
->sc_ev_txmbufa
);
1224 else if (len
<= 256)
1225 MEC_EVCNT_INCR(&sc
->sc_ev_txmbufb
);
1226 else if (len
<= 512)
1227 MEC_EVCNT_INCR(&sc
->sc_ev_txmbufc
);
1228 else if (len
<= 1024)
1229 MEC_EVCNT_INCR(&sc
->sc_ev_txmbufd
);
1231 MEC_EVCNT_INCR(&sc
->sc_ev_txmbufe
);
1234 #ifdef MEC_EVENT_COUNTERS
1236 MEC_EVCNT_INCR(&sc
->sc_ev_txptrs
);
1238 MEC_EVCNT_INCR(&sc
->sc_ev_txptr1
);
1241 &sc
->sc_ev_txptr1a
);
1242 else if (len
<= 256)
1244 &sc
->sc_ev_txptr1b
);
1245 else if (len
<= 512)
1247 &sc
->sc_ev_txptr1c
);
1248 else if (len
<= 1024)
1250 &sc
->sc_ev_txptr1d
);
1253 &sc
->sc_ev_txptr1e
);
1254 } else if (nptr
== 2) {
1255 MEC_EVCNT_INCR(&sc
->sc_ev_txptr2
);
1258 &sc
->sc_ev_txptr2a
);
1259 else if (len
<= 256)
1261 &sc
->sc_ev_txptr2b
);
1262 else if (len
<= 512)
1264 &sc
->sc_ev_txptr2c
);
1265 else if (len
<= 1024)
1267 &sc
->sc_ev_txptr2d
);
1270 &sc
->sc_ev_txptr2e
);
1271 } else if (nptr
== 3) {
1272 MEC_EVCNT_INCR(&sc
->sc_ev_txptr3
);
1275 &sc
->sc_ev_txptr3a
);
1276 else if (len
<= 256)
1278 &sc
->sc_ev_txptr3b
);
1279 else if (len
<= 512)
1281 &sc
->sc_ev_txptr3c
);
1282 else if (len
<= 1024)
1284 &sc
->sc_ev_txptr3d
);
1287 &sc
->sc_ev_txptr3e
);
1290 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc0
);
1292 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc1
);
1294 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc2
);
1296 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc3
);
1298 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc4
);
1300 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc5
);
1302 MEC_EVCNT_INCR(&sc
->sc_ev_txptrc6
);
1304 MEC_EVCNT_INCR(&sc
->sc_ev_txptrh0
);
1305 else if (buflen
<= 16)
1306 MEC_EVCNT_INCR(&sc
->sc_ev_txptrh1
);
1307 else if (buflen
<= 32)
1308 MEC_EVCNT_INCR(&sc
->sc_ev_txptrh2
);
1309 else if (buflen
<= 64)
1310 MEC_EVCNT_INCR(&sc
->sc_ev_txptrh3
);
1311 else if (buflen
<= 80)
1312 MEC_EVCNT_INCR(&sc
->sc_ev_txptrh4
);
1314 MEC_EVCNT_INCR(&sc
->sc_ev_txptrh5
);
1317 m_copydata(m0
, 0, buflen
, txd
->txd_buf
+ bufoff
);
1319 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
1326 * sync the DMA map for TX mbuf
1328 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, buflen
,
1329 len
- buflen
, BUS_DMASYNC_PREWRITE
);
1334 * Pass packet to bpf if there is a listener.
1337 bpf_mtap(ifp
->if_bpf
, m0
);
1339 MEC_EVCNT_INCR(&sc
->sc_ev_txpkts
);
1342 * setup the transmit descriptor.
1344 txdcmd
= TXCMD_BUFSTART(MEC_TXDESCSIZE
- buflen
) | (len
- 1);
1347 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1348 * if more than half txdescs have been queued
1349 * because TX_EMPTY interrupts will rarely happen
1350 * if TX queue is so stacked.
1352 if (sc
->sc_txpending
> (MEC_NTXDESC
/ 2) &&
1353 (nexttx
& (MEC_NTXDESC_INTR
- 1)) == 0)
1354 txdcmd
|= MEC_TXCMD_TXINT
;
1356 if ((txs
->txs_flags
& MEC_TXS_TXDPTR
) != 0) {
1357 bus_dma_segment_t
*segs
= dmamap
->dm_segs
;
1359 DPRINTF(MEC_DEBUG_TXSEGS
,
1360 ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1361 __func__
, dmamap
->dm_nsegs
, pseg
, nptr
));
1365 KASSERT((segs
[pseg
+ 2].ds_addr
&
1366 MEC_TXD_ALIGNMASK
) == 0);
1367 txdcmd
|= MEC_TXCMD_PTR3
;
1369 TXPTR_LEN(segs
[pseg
+ 2].ds_len
- 1) |
1370 segs
[pseg
+ 2].ds_addr
;
1373 KASSERT((segs
[pseg
+ 1].ds_addr
&
1374 MEC_TXD_ALIGNMASK
) == 0);
1375 txdcmd
|= MEC_TXCMD_PTR2
;
1377 TXPTR_LEN(segs
[pseg
+ 1].ds_len
- 1) |
1378 segs
[pseg
+ 1].ds_addr
;
1381 txdcmd
|= MEC_TXCMD_PTR1
;
1383 TXPTR_LEN(segs
[pseg
].ds_len
- resid
- 1) |
1384 (segs
[pseg
].ds_addr
+ resid
);
1387 panic("%s: impossible nptr in %s",
1388 device_xname(sc
->sc_dev
), __func__
);
1392 * Store a pointer to the packet so we can
1398 * In this case all data are copied to buffer in txdesc,
1399 * we can free TX mbuf here.
1403 txd
->txd_cmd
= txdcmd
;
1405 DPRINTF(MEC_DEBUG_START
,
1406 ("%s: txd_cmd = 0x%016llx\n",
1407 __func__
, txd
->txd_cmd
));
1408 DPRINTF(MEC_DEBUG_START
,
1409 ("%s: txd_ptr[0] = 0x%016llx\n",
1410 __func__
, txd
->txd_ptr
[0]));
1411 DPRINTF(MEC_DEBUG_START
,
1412 ("%s: txd_ptr[1] = 0x%016llx\n",
1413 __func__
, txd
->txd_ptr
[1]));
1414 DPRINTF(MEC_DEBUG_START
,
1415 ("%s: txd_ptr[2] = 0x%016llx\n",
1416 __func__
, txd
->txd_ptr
[2]));
1417 DPRINTF(MEC_DEBUG_START
,
1418 ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1419 __func__
, len
, len
, buflen
, buflen
));
1421 /* sync TX descriptor */
1422 MEC_TXDESCSYNC(sc
, nexttx
,
1423 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1426 bus_space_write_8(st
, sh
, MEC_TX_RING_PTR
, MEC_NEXTTX(nexttx
));
1428 /* advance the TX pointer. */
1430 sc
->sc_txlast
= nexttx
;
1433 if (sc
->sc_txpending
== MEC_NTXDESC
- 1) {
1434 /* No more slots; notify upper layer. */
1435 MEC_EVCNT_INCR(&sc
->sc_ev_txdstall
);
1436 ifp
->if_flags
|= IFF_OACTIVE
;
1439 if (sc
->sc_txpending
!= opending
) {
1441 * If the transmitter was idle,
1442 * reset the txdirty pointer and re-enable TX interrupt.
1444 if (opending
== 0) {
1445 sc
->sc_txdirty
= firsttx
;
1446 bus_space_write_8(st
, sh
, MEC_TX_ALIAS
,
1447 MEC_TX_ALIAS_INT_ENABLE
);
1450 /* Set a watchdog timer in case the chip flakes out. */
1456 mec_stop(struct ifnet
*ifp
, int disable
)
1458 struct mec_softc
*sc
= ifp
->if_softc
;
1459 struct mec_txsoft
*txs
;
1462 DPRINTF(MEC_DEBUG_STOP
, ("%s\n", __func__
));
1465 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1467 callout_stop(&sc
->sc_tick_ch
);
1468 mii_down(&sc
->sc_mii
);
1470 /* release any TX buffers */
1471 for (i
= 0; i
< MEC_NTXDESC
; i
++) {
1472 txs
= &sc
->sc_txsoft
[i
];
1473 if ((txs
->txs_flags
& MEC_TXS_TXDPTR
) != 0) {
1474 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
1475 m_freem(txs
->txs_mbuf
);
1476 txs
->txs_mbuf
= NULL
;
1482 mec_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
1488 error
= ether_ioctl(ifp
, cmd
, data
);
1489 if (error
== ENETRESET
) {
1491 * Multicast list has changed; set the hardware filter
1494 if (ifp
->if_flags
& IFF_RUNNING
)
1495 error
= mec_init(ifp
);
1500 /* Try to get more packets going. */
1508 mec_watchdog(struct ifnet
*ifp
)
1510 struct mec_softc
*sc
= ifp
->if_softc
;
1512 printf("%s: device timeout\n", device_xname(sc
->sc_dev
));
1521 struct mec_softc
*sc
= arg
;
1525 mii_tick(&sc
->sc_mii
);
1528 callout_reset(&sc
->sc_tick_ch
, hz
, mec_tick
, sc
);
1532 mec_setfilter(struct mec_softc
*sc
)
1534 struct ethercom
*ec
= &sc
->sc_ethercom
;
1535 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1536 struct ether_multi
*enm
;
1537 struct ether_multistep step
;
1538 bus_space_tag_t st
= sc
->sc_st
;
1539 bus_space_handle_t sh
= sc
->sc_sh
;
1541 uint32_t control
, hash
;
1544 control
= bus_space_read_8(st
, sh
, MEC_MAC_CONTROL
);
1545 control
&= ~MEC_MAC_FILTER_MASK
;
1547 if (ifp
->if_flags
& IFF_PROMISC
) {
1548 control
|= MEC_MAC_FILTER_PROMISC
;
1549 bus_space_write_8(st
, sh
, MEC_MULTICAST
, 0xffffffffffffffffULL
);
1550 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, control
);
1556 ETHER_FIRST_MULTI(step
, ec
, enm
);
1557 while (enm
!= NULL
) {
1558 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
1559 /* set allmulti for a range of multicast addresses */
1560 control
|= MEC_MAC_FILTER_ALLMULTI
;
1561 bus_space_write_8(st
, sh
, MEC_MULTICAST
,
1562 0xffffffffffffffffULL
);
1563 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, control
);
1567 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1569 hash
= mec_calchash(enm
->enm_addrlo
);
1570 mchash
|= 1 << hash
;
1572 ETHER_NEXT_MULTI(step
, enm
);
1575 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1578 control
|= MEC_MAC_FILTER_MATCHMULTI
;
1580 bus_space_write_8(st
, sh
, MEC_MULTICAST
, mchash
);
1581 bus_space_write_8(st
, sh
, MEC_MAC_CONTROL
, control
);
1587 struct mec_softc
*sc
= arg
;
1588 bus_space_tag_t st
= sc
->sc_st
;
1589 bus_space_handle_t sh
= sc
->sc_sh
;
1590 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1591 uint32_t statreg
, statack
, txptr
;
1594 DPRINTF(MEC_DEBUG_INTR
, ("%s: called\n", __func__
));
1599 statreg
= bus_space_read_8(st
, sh
, MEC_INT_STATUS
);
1601 DPRINTF(MEC_DEBUG_INTR
,
1602 ("%s: INT_STAT = 0x%08x\n", __func__
, statreg
));
1604 statack
= statreg
& MEC_INT_STATUS_MASK
;
1607 bus_space_write_8(st
, sh
, MEC_INT_STATUS
, statack
);
1612 (MEC_INT_RX_THRESHOLD
|
1613 MEC_INT_RX_FIFO_UNDERFLOW
)) {
1619 MEC_INT_TX_PACKET_SENT
|
1620 MEC_INT_TX_ABORT
)) {
1621 txptr
= (statreg
& MEC_INT_TX_RING_BUFFER_ALIAS
)
1622 >> MEC_INT_TX_RING_BUFFER_SHIFT
;
1623 mec_txintr(sc
, txptr
);
1625 if ((statack
& MEC_INT_TX_EMPTY
) != 0) {
1627 * disable TX interrupt to stop
1628 * TX empty interrupt
1630 bus_space_write_8(st
, sh
, MEC_TX_ALIAS
, 0);
1631 DPRINTF(MEC_DEBUG_INTR
,
1632 ("%s: disable TX_INT\n", __func__
));
1634 #ifdef MEC_EVENT_COUNTERS
1635 if ((statack
& MEC_INT_TX_EMPTY
) != 0)
1636 MEC_EVCNT_INCR(&sc
->sc_ev_txempty
);
1637 if ((statack
& MEC_INT_TX_PACKET_SENT
) != 0)
1638 MEC_EVCNT_INCR(&sc
->sc_ev_txsent
);
1643 (MEC_INT_TX_LINK_FAIL
|
1644 MEC_INT_TX_MEM_ERROR
|
1646 MEC_INT_RX_FIFO_UNDERFLOW
|
1647 MEC_INT_RX_DMA_UNDERFLOW
)) {
1648 printf("%s: %s: interrupt status = 0x%08x\n",
1649 device_xname(sc
->sc_dev
), __func__
, statreg
);
1655 if (sent
&& !IFQ_IS_EMPTY(&ifp
->if_snd
)) {
1656 /* try to get more packets going */
1662 rnd_add_uint32(&sc
->sc_rnd_source
, statreg
);
1669 mec_rxintr(struct mec_softc
*sc
)
1671 bus_space_tag_t st
= sc
->sc_st
;
1672 bus_space_handle_t sh
= sc
->sc_sh
;
1673 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1675 struct mec_rxdesc
*rxd
;
1681 DPRINTF(MEC_DEBUG_RXINTR
, ("%s: called\n", __func__
));
1683 for (i
= sc
->sc_rxptr
;; i
= MEC_NEXTRX(i
)) {
1684 rxd
= &sc
->sc_rxdesc
[i
];
1686 MEC_RXSTATSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
);
1687 rxstat
= rxd
->rxd_stat
;
1689 DPRINTF(MEC_DEBUG_RXINTR
,
1690 ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1691 __func__
, rxstat
, i
));
1692 DPRINTF(MEC_DEBUG_RXINTR
, ("%s: rxfifo = 0x%08x\n",
1693 __func__
, (u_int
)bus_space_read_8(st
, sh
, MEC_RX_FIFO
)));
1695 if ((rxstat
& MEC_RXSTAT_RECEIVED
) == 0) {
1696 MEC_RXSTATSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
1700 len
= rxstat
& MEC_RXSTAT_LEN
;
1702 if (len
< ETHER_MIN_LEN
||
1703 len
> (MCLBYTES
- MEC_ETHER_ALIGN
)) {
1704 /* invalid length packet; drop it. */
1705 DPRINTF(MEC_DEBUG_RXINTR
,
1706 ("%s: wrong packet\n", __func__
));
1710 MEC_RXSTATSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
1711 bus_space_write_8(st
, sh
, MEC_MCL_RX_FIFO
,
1712 MEC_CDRXADDR(sc
, i
));
1717 * If 802.1Q VLAN MTU is enabled, ignore the bad packet error.
1719 if ((sc
->sc_ethercom
.ec_capenable
& ETHERCAP_VLAN_MTU
) != 0)
1720 rxstat
&= ~MEC_RXSTAT_BADPACKET
;
1723 (MEC_RXSTAT_BADPACKET
|
1724 MEC_RXSTAT_LONGEVENT
|
1725 MEC_RXSTAT_INVALID
|
1726 MEC_RXSTAT_CRCERROR
|
1727 MEC_RXSTAT_VIOLATION
)) {
1728 printf("%s: mec_rxintr: status = 0x%016"PRIx64
"\n",
1729 device_xname(sc
->sc_dev
), rxstat
);
1734 * The MEC includes the CRC with every packet. Trim
1737 len
-= ETHER_CRC_LEN
;
1740 * now allocate an mbuf (and possibly a cluster) to hold
1741 * the received packet.
1743 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1745 printf("%s: unable to allocate RX mbuf\n",
1746 device_xname(sc
->sc_dev
));
1749 if (len
> (MHLEN
- MEC_ETHER_ALIGN
)) {
1750 MCLGET(m
, M_DONTWAIT
);
1751 if ((m
->m_flags
& M_EXT
) == 0) {
1752 printf("%s: unable to allocate RX cluster\n",
1753 device_xname(sc
->sc_dev
));
1761 * Note MEC chip seems to insert 2 byte padding at the top of
1762 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1764 MEC_RXBUFSYNC(sc
, i
, len
+ ETHER_CRC_LEN
, BUS_DMASYNC_POSTREAD
);
1765 memcpy(mtod(m
, void *), rxd
->rxd_buf
, MEC_ETHER_ALIGN
+ len
);
1766 crc
= be32dec(rxd
->rxd_buf
+ MEC_ETHER_ALIGN
+ len
);
1767 MEC_RXBUFSYNC(sc
, i
, ETHER_MAX_LEN
, BUS_DMASYNC_PREREAD
);
1768 m
->m_data
+= MEC_ETHER_ALIGN
;
1770 /* put RX buffer into FIFO again */
1772 MEC_RXSTATSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
1773 bus_space_write_8(st
, sh
, MEC_MCL_RX_FIFO
, MEC_CDRXADDR(sc
, i
));
1775 m
->m_pkthdr
.rcvif
= ifp
;
1776 m
->m_pkthdr
.len
= m
->m_len
= len
;
1777 if ((ifp
->if_csum_flags_rx
& (M_CSUM_TCPv4
|M_CSUM_UDPv4
)) != 0)
1778 mec_rxcsum(sc
, m
, RXSTAT_CKSUM(rxstat
), crc
);
1784 * Pass this up to any BPF listeners, but only
1785 * pass it up the stack if it's for us.
1788 bpf_mtap(ifp
->if_bpf
, m
);
1792 (*ifp
->if_input
)(ifp
, m
);
1795 /* update RX pointer */
1800 mec_rxcsum(struct mec_softc
*sc
, struct mbuf
*m
, uint16_t rxcsum
, uint32_t crc
)
1802 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1803 struct ether_header
*eh
;
1806 u_int len
, pktlen
, hlen
;
1807 uint32_t csum_data
, dsum
;
1815 if (len
< ETHER_HDR_LEN
+ sizeof(struct ip
))
1817 pktlen
= len
- ETHER_HDR_LEN
;
1818 eh
= mtod(m
, struct ether_header
*);
1819 if (ntohs(eh
->ether_type
) != ETHERTYPE_IP
)
1821 ip
= (struct ip
*)((uint8_t *)eh
+ ETHER_HDR_LEN
);
1822 if (ip
->ip_v
!= IPVERSION
)
1825 hlen
= ip
->ip_hl
<< 2;
1826 if (hlen
< sizeof(struct ip
))
1830 * Bail if too short, has random trailing garbage, truncated,
1831 * fragment, or has ethernet pad.
1833 if (ntohs(ip
->ip_len
) < hlen
||
1834 ntohs(ip
->ip_len
) != pktlen
||
1835 (ntohs(ip
->ip_off
) & (IP_MF
| IP_OFFMASK
)) != 0)
1840 if ((ifp
->if_csum_flags_rx
& M_CSUM_TCPv4
) == 0 ||
1841 pktlen
< (hlen
+ sizeof(struct tcphdr
)))
1843 csum_flags
= M_CSUM_TCPv4
| M_CSUM_DATA
| M_CSUM_NO_PSEUDOHDR
;
1846 if ((ifp
->if_csum_flags_rx
& M_CSUM_UDPv4
) == 0 ||
1847 pktlen
< (hlen
+ sizeof(struct udphdr
)))
1849 uh
= (struct udphdr
*)((uint8_t *)ip
+ hlen
);
1850 if (uh
->uh_sum
== 0)
1851 goto out
; /* no checksum */
1852 csum_flags
= M_CSUM_UDPv4
| M_CSUM_DATA
| M_CSUM_NO_PSEUDOHDR
;
1859 * The computed checksum includes Ethernet header, IP headers,
1860 * and CRC, so we have to deduct them.
1861 * Note IP header cksum should be 0xffff so we don't have to
1866 /* deduct Ethernet header */
1867 dp
= (const uint16_t *)eh
;
1868 for (hlen
= 0; hlen
< (ETHER_HDR_LEN
/ sizeof(uint16_t)); hlen
++)
1869 dsum
+= ntohs(*dp
++);
1873 dsum
+= (crc
>> 24) & 0x00ff;
1874 dsum
+= (crc
>> 8) & 0xffff;
1875 dsum
+= (crc
<< 8) & 0xff00;
1877 dsum
+= (crc
>> 16) & 0xffff;
1878 dsum
+= (crc
>> 0) & 0xffff;
1881 dsum
= (dsum
>> 16) + (dsum
& 0xffff);
1884 csum_data
+= (uint16_t)~dsum
;
1886 while (csum_data
>> 16)
1887 csum_data
= (csum_data
>> 16) + (csum_data
& 0xffff);
1890 m
->m_pkthdr
.csum_flags
= csum_flags
;
1891 m
->m_pkthdr
.csum_data
= csum_data
;
1895 mec_txintr(struct mec_softc
*sc
, uint32_t txptr
)
1897 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1898 struct mec_txdesc
*txd
;
1899 struct mec_txsoft
*txs
;
1900 bus_dmamap_t dmamap
;
1905 DPRINTF(MEC_DEBUG_TXINTR
, ("%s: called\n", __func__
));
1907 for (i
= sc
->sc_txdirty
; i
!= txptr
&& sc
->sc_txpending
!= 0;
1908 i
= MEC_NEXTTX(i
), sc
->sc_txpending
--) {
1909 txd
= &sc
->sc_txdesc
[i
];
1911 MEC_TXCMDSYNC(sc
, i
,
1912 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1914 txstat
= txd
->txd_stat
;
1915 DPRINTF(MEC_DEBUG_TXINTR
,
1916 ("%s: dirty = %d, txstat = 0x%016llx\n",
1917 __func__
, i
, txstat
));
1918 if ((txstat
& MEC_TXSTAT_SENT
) == 0) {
1919 MEC_TXCMDSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
1923 txs
= &sc
->sc_txsoft
[i
];
1924 if ((txs
->txs_flags
& MEC_TXS_TXDPTR
) != 0) {
1925 dmamap
= txs
->txs_dmamap
;
1926 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0,
1927 dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1928 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
1929 m_freem(txs
->txs_mbuf
);
1930 txs
->txs_mbuf
= NULL
;
1933 col
= (txstat
& MEC_TXSTAT_COLCNT
) >> MEC_TXSTAT_COLCNT_SHIFT
;
1934 ifp
->if_collisions
+= col
;
1936 if ((txstat
& MEC_TXSTAT_SUCCESS
) == 0) {
1937 printf("%s: TX error: txstat = 0x%016"PRIx64
"\n",
1938 device_xname(sc
->sc_dev
), txstat
);
1944 /* update the dirty TX buffer pointer */
1946 DPRINTF(MEC_DEBUG_INTR
,
1947 ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1948 __func__
, sc
->sc_txdirty
, sc
->sc_txpending
));
1950 /* cancel the watchdog timer if there are no pending TX packets */
1951 if (sc
->sc_txpending
== 0)
1953 if (sc
->sc_txpending
< MEC_NTXDESC
- MEC_NTXDESC_RSVD
)
1954 ifp
->if_flags
&= ~IFF_OACTIVE
;
1958 mec_shutdown(device_t self
, int howto
)
1960 struct mec_softc
*sc
= device_private(self
);
1962 mec_stop(&sc
->sc_ethercom
.ec_if
, 1);
1963 /* make sure to stop DMA etc. */