1 /* $NetBSD: if_xe.c,v 1.18 2009/03/18 17:06:46 cegger Exp $ */
3 * Copyright (c) 1998 Darrin B. Jewell
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.18 2009/03/18 17:06:46 cegger Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/device.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
50 #include <netinet/in.h>
51 #include <netinet/if_inarp.h>
54 #include <machine/autoconf.h>
55 #include <machine/cpu.h>
56 #include <machine/intr.h>
57 #include <machine/bus.h>
59 #include <next68k/next68k/isr.h>
61 #include <next68k/dev/mb8795reg.h>
62 #include <next68k/dev/mb8795var.h>
64 #include <next68k/dev/bmapreg.h>
65 #include <next68k/dev/intiovar.h>
66 #include <next68k/dev/nextdmareg.h>
67 #include <next68k/dev/nextdmavar.h>
69 #include <next68k/dev/if_xevar.h>
70 #include <next68k/dev/if_xereg.h>
78 #define DPRINTF(x) if (xe_debug) printf x;
79 extern char *ndtracep
;
80 extern char ndtrace
[];
81 extern int ndtraceshow
;
82 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
87 #define PRINTF(x) printf x;
91 int xe_match(struct device
*, struct cfdata
*, void *);
92 void xe_attach(struct device
*, struct device
*, void *);
96 struct mbuf
* xe_dma_rxmap_load(struct mb8795_softc
*, bus_dmamap_t
);
98 bus_dmamap_t
xe_dma_rx_continue(void *);
99 void xe_dma_rx_completed(bus_dmamap_t
, void *);
100 bus_dmamap_t
xe_dma_tx_continue(void *);
101 void xe_dma_tx_completed(bus_dmamap_t
, void *);
102 void xe_dma_rx_shutdown(void *);
103 void xe_dma_tx_shutdown(void *);
105 static void findchannel_defer(struct device
*);
107 CFATTACH_DECL(xe
, sizeof(struct xe_softc
),
108 xe_match
, xe_attach
, NULL
, NULL
);
110 static int xe_dma_medias
[] = {
115 static int nxe_dma_medias
= (sizeof(xe_dma_medias
)/sizeof(xe_dma_medias
[0]));
117 static int attached
= 0;
120 * Functions and the switch for the MI code.
122 u_char
xe_read_reg(struct mb8795_softc
*, int);
123 void xe_write_reg(struct mb8795_softc
*, int, u_char
);
124 void xe_dma_reset(struct mb8795_softc
*);
125 void xe_dma_rx_setup(struct mb8795_softc
*);
126 void xe_dma_rx_go(struct mb8795_softc
*);
127 struct mbuf
* xe_dma_rx_mbuf(struct mb8795_softc
*);
128 void xe_dma_tx_setup(struct mb8795_softc
*);
129 void xe_dma_tx_go(struct mb8795_softc
*);
130 int xe_dma_tx_mbuf(struct mb8795_softc
*, struct mbuf
*);
131 int xe_dma_tx_isactive(struct mb8795_softc
*);
133 struct mb8795_glue xe_glue
= {
147 xe_match(struct device
*parent
, struct cfdata
*match
, void *aux
)
149 struct intio_attach_args
*ia
= (struct intio_attach_args
*)aux
;
154 ia
->ia_addr
= (void *)NEXT_P_ENET
;
160 findchannel_defer(struct device
*self
)
162 struct xe_softc
*xsc
= (struct xe_softc
*)self
;
163 struct mb8795_softc
*sc
= &xsc
->sc_mb8795
;
166 if (!xsc
->sc_txdma
) {
167 xsc
->sc_txdma
= nextdma_findchannel ("enetx");
168 if (xsc
->sc_txdma
== NULL
)
169 panic ("%s: can't find enetx DMA channel",
170 sc
->sc_dev
.dv_xname
);
172 if (!xsc
->sc_rxdma
) {
173 xsc
->sc_rxdma
= nextdma_findchannel ("enetr");
174 if (xsc
->sc_rxdma
== NULL
)
175 panic ("%s: can't find enetr DMA channel",
176 sc
->sc_dev
.dv_xname
);
178 printf ("%s: using DMA channels %s %s\n", sc
->sc_dev
.dv_xname
,
179 xsc
->sc_txdma
->sc_dev
.dv_xname
, xsc
->sc_rxdma
->sc_dev
.dv_xname
);
181 nextdma_setconf (xsc
->sc_rxdma
, continue_cb
, xe_dma_rx_continue
);
182 nextdma_setconf (xsc
->sc_rxdma
, completed_cb
, xe_dma_rx_completed
);
183 nextdma_setconf (xsc
->sc_rxdma
, shutdown_cb
, xe_dma_rx_shutdown
);
184 nextdma_setconf (xsc
->sc_rxdma
, cb_arg
, sc
);
186 nextdma_setconf (xsc
->sc_txdma
, continue_cb
, xe_dma_tx_continue
);
187 nextdma_setconf (xsc
->sc_txdma
, completed_cb
, xe_dma_tx_completed
);
188 nextdma_setconf (xsc
->sc_txdma
, shutdown_cb
, xe_dma_tx_shutdown
);
189 nextdma_setconf (xsc
->sc_txdma
, cb_arg
, sc
);
191 /* Initialize the DMA maps */
192 error
= bus_dmamap_create(xsc
->sc_txdma
->sc_dmat
, MCLBYTES
,
193 (MCLBYTES
/MSIZE
), MCLBYTES
, 0, BUS_DMA_ALLOCNOW
,
196 panic("%s: can't create tx DMA map, error = %d",
197 sc
->sc_dev
.dv_xname
, error
);
200 for(i
= 0; i
< MB8795_NRXBUFS
; i
++) {
201 error
= bus_dmamap_create(xsc
->sc_rxdma
->sc_dmat
, MCLBYTES
,
202 (MCLBYTES
/MSIZE
), MCLBYTES
, 0, BUS_DMA_ALLOCNOW
,
203 &xsc
->sc_rx_dmamap
[i
]);
205 panic("%s: can't create rx DMA map, error = %d",
206 sc
->sc_dev
.dv_xname
, error
);
208 xsc
->sc_rx_mb_head
[i
] = NULL
;
210 xsc
->sc_rx_loaded_idx
= 0;
211 xsc
->sc_rx_completed_idx
= 0;
212 xsc
->sc_rx_handled_idx
= 0;
214 /* @@@ more next hacks
215 * the 2000 covers at least a 1500 mtu + headers
216 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
218 xsc
->sc_txbuf
= malloc(2000, M_DEVBUF
, M_NOWAIT
);
220 panic("%s: can't malloc tx DMA buffer", sc
->sc_dev
.dv_xname
);
222 xsc
->sc_tx_mb_head
= NULL
;
223 xsc
->sc_tx_loaded
= 0;
225 mb8795_config(sc
, xe_dma_medias
, nxe_dma_medias
, xe_dma_medias
[0]);
227 isrlink_autovec(xe_tint
, sc
, NEXT_I_IPL(NEXT_I_ENETX
), 1, NULL
);
228 INTR_ENABLE(NEXT_I_ENETX
);
229 isrlink_autovec(xe_rint
, sc
, NEXT_I_IPL(NEXT_I_ENETR
), 1, NULL
);
230 INTR_ENABLE(NEXT_I_ENETR
);
234 xe_attach(struct device
*parent
, struct device
*self
, void *aux
)
236 struct intio_attach_args
*ia
= (struct intio_attach_args
*)aux
;
237 struct xe_softc
*xsc
= (struct xe_softc
*)self
;
238 struct mb8795_softc
*sc
= &xsc
->sc_mb8795
;
240 DPRINTF(("%s: xe_attach()\n",sc
->sc_dev
.dv_xname
));
243 extern u_char rom_enetaddr
[6]; /* kludge from machdep.c:next68k_bootargs() */
246 sc
->sc_enaddr
[i
] = rom_enetaddr
[i
];
250 printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
252 sc
->sc_enaddr
[0],sc
->sc_enaddr
[1],sc
->sc_enaddr
[2],
253 sc
->sc_enaddr
[3],sc
->sc_enaddr
[4],sc
->sc_enaddr
[5]);
255 xsc
->sc_bst
= ia
->ia_bst
;
256 if (bus_space_map(xsc
->sc_bst
, NEXT_P_ENET
,
257 XE_DEVICE_SIZE
, 0, &xsc
->sc_bsh
)) {
258 panic("\n%s: can't map mb8795 registers",
259 sc
->sc_dev
.dv_xname
);
262 sc
->sc_bmap_bst
= ia
->ia_bst
;
263 if (bus_space_map(sc
->sc_bmap_bst
, NEXT_P_BMAP
,
264 BMAP_SIZE
, 0, &sc
->sc_bmap_bsh
)) {
265 panic("\n%s: can't map bmap registers",
266 sc
->sc_dev
.dv_xname
);
270 * Set up glue for MI code.
272 sc
->sc_glue
= &xe_glue
;
274 xsc
->sc_txdma
= nextdma_findchannel ("enetx");
275 xsc
->sc_rxdma
= nextdma_findchannel ("enetr");
276 if (xsc
->sc_rxdma
&& xsc
->sc_txdma
) {
277 findchannel_defer (self
);
279 config_defer (self
, findchannel_defer
);
288 if (!INTR_OCCURRED(NEXT_I_ENETX
))
290 mb8795_tint((struct mb8795_softc
*)arg
);
297 if (!INTR_OCCURRED(NEXT_I_ENETR
))
299 mb8795_rint((struct mb8795_softc
*)arg
);
308 xe_read_reg(struct mb8795_softc
*sc
, int reg
)
310 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
312 return(bus_space_read_1(xsc
->sc_bst
, xsc
->sc_bsh
, reg
));
316 xe_write_reg(struct mb8795_softc
*sc
, int reg
, u_char val
)
318 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
320 bus_space_write_1(xsc
->sc_bst
, xsc
->sc_bsh
, reg
, val
);
324 xe_dma_reset(struct mb8795_softc
*sc
)
326 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
329 DPRINTF(("xe DMA reset\n"));
331 nextdma_reset(xsc
->sc_rxdma
);
332 nextdma_reset(xsc
->sc_txdma
);
334 if (xsc
->sc_tx_loaded
) {
335 bus_dmamap_sync(xsc
->sc_txdma
->sc_dmat
, xsc
->sc_tx_dmamap
,
336 0, xsc
->sc_tx_dmamap
->dm_mapsize
,
337 BUS_DMASYNC_POSTWRITE
);
338 bus_dmamap_unload(xsc
->sc_txdma
->sc_dmat
, xsc
->sc_tx_dmamap
);
339 xsc
->sc_tx_loaded
= 0;
341 if (xsc
->sc_tx_mb_head
) {
342 m_freem(xsc
->sc_tx_mb_head
);
343 xsc
->sc_tx_mb_head
= NULL
;
346 for(i
= 0; i
< MB8795_NRXBUFS
; i
++) {
347 if (xsc
->sc_rx_mb_head
[i
]) {
348 bus_dmamap_unload(xsc
->sc_rxdma
->sc_dmat
, xsc
->sc_rx_dmamap
[i
]);
349 m_freem(xsc
->sc_rx_mb_head
[i
]);
350 xsc
->sc_rx_mb_head
[i
] = NULL
;
356 xe_dma_rx_setup(struct mb8795_softc
*sc
)
358 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
361 DPRINTF(("xe DMA rx setup\n"));
363 for(i
= 0; i
< MB8795_NRXBUFS
; i
++) {
364 xsc
->sc_rx_mb_head
[i
] =
365 xe_dma_rxmap_load(sc
, xsc
->sc_rx_dmamap
[i
]);
367 xsc
->sc_rx_loaded_idx
= 0;
368 xsc
->sc_rx_completed_idx
= 0;
369 xsc
->sc_rx_handled_idx
= 0;
371 nextdma_init(xsc
->sc_rxdma
);
375 xe_dma_rx_go(struct mb8795_softc
*sc
)
377 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
379 DPRINTF(("xe DMA rx go\n"));
381 nextdma_start(xsc
->sc_rxdma
, DMACSR_SETREAD
);
385 xe_dma_rx_mbuf(struct mb8795_softc
*sc
)
387 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
392 if (xsc
->sc_rx_handled_idx
!= xsc
->sc_rx_completed_idx
) {
393 xsc
->sc_rx_handled_idx
++;
394 xsc
->sc_rx_handled_idx
%= MB8795_NRXBUFS
;
396 map
= xsc
->sc_rx_dmamap
[xsc
->sc_rx_handled_idx
];
397 m
= xsc
->sc_rx_mb_head
[xsc
->sc_rx_handled_idx
];
399 m
->m_len
= map
->dm_xfer_len
;
401 bus_dmamap_sync(xsc
->sc_rxdma
->sc_dmat
, map
,
402 0, map
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
404 bus_dmamap_unload(xsc
->sc_rxdma
->sc_dmat
, map
);
406 /* Install a fresh mbuf for next packet */
408 xsc
->sc_rx_mb_head
[xsc
->sc_rx_handled_idx
] =
409 xe_dma_rxmap_load(sc
,map
);
412 * DMA restarts create 0 length packets for example
414 if (m
->m_len
< ETHER_MIN_LEN
) {
423 xe_dma_tx_setup(struct mb8795_softc
*sc
)
425 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
427 DPRINTF(("xe DMA tx setup\n"));
429 nextdma_init(xsc
->sc_txdma
);
433 xe_dma_tx_go(struct mb8795_softc
*sc
)
435 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
437 DPRINTF(("xe DMA tx go\n"));
439 nextdma_start(xsc
->sc_txdma
, DMACSR_SETWRITE
);
443 xe_dma_tx_mbuf(struct mb8795_softc
*sc
, struct mbuf
*m
)
445 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
448 xsc
->sc_tx_mb_head
= m
;
450 /* The following is a next specific hack that should
451 * probably be moved out of MI code.
452 * This macro assumes it can move forward as needed
453 * in the buffer. Perhaps it should zero the extra buffer.
455 #define REALIGN_DMABUF(s,l) \
456 { (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
457 &~(DMA_BEGINALIGNMENT-1))); \
458 (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
459 &~(DMA_ENDALIGNMENT-1)))-(s);}
462 error
= bus_dmamap_load_mbuf(xsc
->sc_txdma
->sc_dmat
,
463 xsc
->sc_tx_dmamap
, xsc
->sc_tx_mb_head
, BUS_DMA_NOWAIT
);
466 u_char
*buf
= xsc
->sc_txbuf
;
469 buflen
= m
->m_pkthdr
.len
;
473 for (m
=xsc
->sc_tx_mb_head
; m
; m
= m
->m_next
) {
474 if (m
->m_len
== 0) continue;
475 memcpy(p
, mtod(m
, u_char
*), m
->m_len
);
478 /* Fix runt packets */
479 if (buflen
< ETHER_MIN_LEN
- ETHER_CRC_LEN
) {
481 ETHER_MIN_LEN
- ETHER_CRC_LEN
- buflen
);
482 buflen
= ETHER_MIN_LEN
- ETHER_CRC_LEN
;
486 error
= bus_dmamap_load(xsc
->sc_txdma
->sc_dmat
, xsc
->sc_tx_dmamap
,
487 buf
,buflen
,NULL
,BUS_DMA_NOWAIT
);
491 printf("%s: can't load mbuf chain, error = %d\n",
492 sc
->sc_dev
.dv_xname
, error
);
493 m_freem(xsc
->sc_tx_mb_head
);
494 xsc
->sc_tx_mb_head
= NULL
;
499 if (xsc
->sc_tx_loaded
!= 0) {
500 panic("%s: xsc->sc_tx_loaded is %d",sc
->sc_dev
.dv_xname
,
505 bus_dmamap_sync(xsc
->sc_txdma
->sc_dmat
, xsc
->sc_tx_dmamap
, 0,
506 xsc
->sc_tx_dmamap
->dm_mapsize
, BUS_DMASYNC_PREWRITE
);
512 xe_dma_tx_isactive(struct mb8795_softc
*sc
)
514 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
516 return (xsc
->sc_tx_loaded
!= 0);
519 /****************************************************************/
522 xe_dma_tx_completed(bus_dmamap_t map
, void *arg
)
524 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
525 struct mb8795_softc
*sc
= arg
;
528 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
531 DPRINTF(("%s: xe_dma_tx_completed()\n",sc
->sc_dev
.dv_xname
));
534 if (!xsc
->sc_tx_loaded
) {
535 panic("%s: tx completed never loaded",sc
->sc_dev
.dv_xname
);
537 if (map
!= xsc
->sc_tx_dmamap
) {
538 panic("%s: unexpected tx completed map",sc
->sc_dev
.dv_xname
);
545 xe_dma_tx_shutdown(void *arg
)
547 struct mb8795_softc
*sc
= arg
;
548 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
549 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
551 DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc
->sc_dev
.dv_xname
));
554 if (!xsc
->sc_tx_loaded
) {
555 panic("%s: tx shutdown never loaded",sc
->sc_dev
.dv_xname
);
560 MB_WRITE_REG(sc
, MB8795_TXMODE
, MB8795_TXMODE_TURBO1
);
561 if (xsc
->sc_tx_loaded
) {
562 bus_dmamap_sync(xsc
->sc_txdma
->sc_dmat
, xsc
->sc_tx_dmamap
,
563 0, xsc
->sc_tx_dmamap
->dm_mapsize
,
564 BUS_DMASYNC_POSTWRITE
);
565 bus_dmamap_unload(xsc
->sc_txdma
->sc_dmat
, xsc
->sc_tx_dmamap
);
566 m_freem(xsc
->sc_tx_mb_head
);
567 xsc
->sc_tx_mb_head
= NULL
;
573 if (xsc
->sc_tx_loaded
!= 0) {
574 panic("%s: sc->sc_tx_loaded is %d",sc
->sc_dev
.dv_xname
,
582 if ((ifp
->if_flags
& IFF_RUNNING
) && !IF_IS_EMPTY(&sc
->sc_tx_snd
)) {
583 void mb8795_start_dma(struct mb8795_softc
*); /* XXXX */
584 mb8795_start_dma(sc
);
589 /* Enable ready interrupt */
590 MB_WRITE_REG(sc
, MB8795_TXMASK
,
591 MB_READ_REG(sc
, MB8795_TXMASK
)
592 | MB8795_TXMASK_TXRXIE
/* READYIE */);
598 xe_dma_rx_completed(bus_dmamap_t map
, void *arg
)
600 struct mb8795_softc
*sc
= arg
;
601 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
602 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
604 if (ifp
->if_flags
& IFF_RUNNING
) {
605 xsc
->sc_rx_completed_idx
++;
606 xsc
->sc_rx_completed_idx
%= MB8795_NRXBUFS
;
608 DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
609 sc
->sc_dev
.dv_xname
, xsc
->sc_rx_completed_idx
));
611 #if (defined(DIAGNOSTIC))
612 if (map
!= xsc
->sc_rx_dmamap
[xsc
->sc_rx_completed_idx
]) {
613 panic("%s: Unexpected rx dmamap completed",
614 sc
->sc_dev
.dv_xname
);
620 DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
621 sc
->sc_dev
.dv_xname
));
626 xe_dma_rx_shutdown(void *arg
)
628 struct mb8795_softc
*sc
= arg
;
629 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
630 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
632 if (ifp
->if_flags
& IFF_RUNNING
) {
633 DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
634 sc
->sc_dev
.dv_xname
));
636 nextdma_start(xsc
->sc_rxdma
, DMACSR_SETREAD
);
638 MB_WRITE_REG(sc
, MB8795_RXMODE
, MB8795_RXMODE_TEST
| MB8795_RXMODE_MULTICAST
);
642 DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
643 sc
->sc_dev
.dv_xname
));
648 * load a dmamap with a freshly allocated mbuf
651 xe_dma_rxmap_load(struct mb8795_softc
*sc
, bus_dmamap_t map
)
653 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
654 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
658 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
660 MCLGET(m
, M_DONTWAIT
);
661 if ((m
->m_flags
& M_EXT
) == 0) {
669 /* @@@ Handle this gracefully by reusing a scratch buffer
672 panic("Unable to get memory for incoming ethernet");
675 /* Align buffer, @@@ next specific.
676 * perhaps should be using M_ALIGN here instead?
677 * First we give us a little room to align with.
680 u_char
*buf
= m
->m_data
;
681 int buflen
= m
->m_len
;
682 buflen
-= DMA_ENDALIGNMENT
+DMA_BEGINALIGNMENT
;
683 REALIGN_DMABUF(buf
, buflen
);
688 m
->m_pkthdr
.rcvif
= ifp
;
689 m
->m_pkthdr
.len
= m
->m_len
;
691 error
= bus_dmamap_load_mbuf(xsc
->sc_rxdma
->sc_dmat
,
692 map
, m
, BUS_DMA_NOWAIT
);
694 bus_dmamap_sync(xsc
->sc_rxdma
->sc_dmat
, map
, 0,
695 map
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
698 DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
699 m
->m_data
, m
->m_len
));
700 DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
701 MCLBYTES
, map
->_dm_size
));
703 panic("%s: can't load rx mbuf chain, error = %d",
704 sc
->sc_dev
.dv_xname
, error
);
713 xe_dma_rx_continue(void *arg
)
715 struct mb8795_softc
*sc
= arg
;
716 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
717 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
718 bus_dmamap_t map
= NULL
;
720 if (ifp
->if_flags
& IFF_RUNNING
) {
721 if (((xsc
->sc_rx_loaded_idx
+1)%MB8795_NRXBUFS
) == xsc
->sc_rx_handled_idx
) {
722 /* make space for one packet by dropping one */
724 m
= xe_dma_rx_mbuf (sc
);
727 #if (defined(DIAGNOSTIC))
728 DPRINTF(("%s: out of receive DMA buffers\n",sc
->sc_dev
.dv_xname
));
731 xsc
->sc_rx_loaded_idx
++;
732 xsc
->sc_rx_loaded_idx
%= MB8795_NRXBUFS
;
733 map
= xsc
->sc_rx_dmamap
[xsc
->sc_rx_loaded_idx
];
735 DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
736 sc
->sc_dev
.dv_xname
,xsc
->sc_rx_loaded_idx
));
740 panic("%s: Unexpected rx DMA continue while if not running",
741 sc
->sc_dev
.dv_xname
);
748 xe_dma_tx_continue(void *arg
)
750 struct mb8795_softc
*sc
= arg
;
751 struct xe_softc
*xsc
= (struct xe_softc
*)sc
;
754 DPRINTF(("%s: xe_dma_tx_continue()\n",sc
->sc_dev
.dv_xname
));
756 if (xsc
->sc_tx_loaded
) {
759 map
= xsc
->sc_tx_dmamap
;
764 if (xsc
->sc_tx_loaded
!= 1) {
765 panic("%s: sc->sc_tx_loaded is %d",sc
->sc_dev
.dv_xname
,