1 /* $NetBSD: nextdma.c,v 1.44 2008/12/17 17:12:52 cegger Exp $ */
3 * Copyright (c) 1998 Darrin B. Jewell
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.44 2008/12/17 17:12:52 cegger Exp $");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
45 #define _M68K_BUS_DMA_PRIVATE
46 #include <machine/autoconf.h>
47 #include <machine/cpu.h>
48 #include <machine/intr.h>
50 #include <m68k/cacheops.h>
52 #include <next68k/next68k/isr.h>
53 #include <next68k/next68k/nextrom.h>
55 #include <next68k/dev/intiovar.h>
57 #include "nextdmareg.h"
58 #include "nextdmavar.h"
69 #define panic __asm volatile("trap #15"); printf
71 #define NEXTDMA_DEBUG nextdma_debug
72 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
74 int nextdma_debug
= 0;
75 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
77 char ndtrace
[8192+100];
78 char *ndtracep
= ndtrace
;
79 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
84 #define PRINTF(x) printf x
87 int nextdma_debug_enetr_idx
= 0;
88 unsigned int nextdma_debug_enetr_state
[100] = { 0 };
89 int nextdma_debug_scsi_idx
= 0;
90 unsigned int nextdma_debug_scsi_state
[100] = { 0 };
92 void nextdma_debug_initstate(struct nextdma_softc
*);
93 void nextdma_debug_savestate(struct nextdma_softc
*, unsigned int);
94 void nextdma_debug_scsi_dumpstate(void);
95 void nextdma_debug_enetr_dumpstate(void);
99 int nextdma_match(struct device
*, struct cfdata
*, void *);
100 void nextdma_attach(struct device
*, struct device
*, void *);
102 void nextdmamap_sync(bus_dma_tag_t
, bus_dmamap_t
, bus_addr_t
, bus_size_t
, int);
103 int nextdma_continue(struct nextdma_softc
*);
104 void nextdma_rotate(struct nextdma_softc
*);
106 void nextdma_setup_cont_regs(struct nextdma_softc
*);
107 void nextdma_setup_curr_regs(struct nextdma_softc
*);
110 static int nextdma_esp_intr(void *);
113 static int nextdma_enet_intr(void *);
116 #define nd_bsr4(reg) \
117 bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
118 #define nd_bsw4(reg,val) \
119 bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
121 CFATTACH_DECL(nextdma
, sizeof(struct nextdma_softc
),
122 nextdma_match
, nextdma_attach
, NULL
, NULL
);
124 static struct nextdma_channel nextdma_channel
[] = {
126 { "scsi", NEXT_P_SCSI_CSR
, DD_SIZE
, NEXT_I_SCSI_DMA
, &nextdma_esp_intr
},
129 { "enetx", NEXT_P_ENETX_CSR
, DD_SIZE
, NEXT_I_ENETX_DMA
, &nextdma_enet_intr
},
130 { "enetr", NEXT_P_ENETR_CSR
, DD_SIZE
, NEXT_I_ENETR_DMA
, &nextdma_enet_intr
},
133 static int nnextdma_channels
= (sizeof(nextdma_channel
)/sizeof(nextdma_channel
[0]));
135 static int attached
= 0;
137 struct nextdma_softc
*
138 nextdma_findchannel(const char *name
)
143 for (dev
= deviter_first(&di
, DEVITER_F_ROOT_FIRST
);
145 dev
= deviter_next(&di
)) {
146 if (strncmp(dev
->dv_xname
, "nextdma", 7) == 0) {
147 struct nextdma_softc
*nsc
= device_private(dev
);
148 if (strcmp(nsc
->sc_chan
->nd_name
, name
) == 0)
152 deviter_release(&di
);
155 return device_private(dev
);
159 nextdma_match(struct device
*parent
, struct cfdata
*match
, void *aux
)
161 struct intio_attach_args
*ia
= (struct intio_attach_args
*)aux
;
163 if (attached
>= nnextdma_channels
)
166 ia
->ia_addr
= (void *)nextdma_channel
[attached
].nd_base
;
172 nextdma_attach(struct device
*parent
, struct device
*self
, void *aux
)
174 struct nextdma_softc
*nsc
= (struct nextdma_softc
*)self
;
175 struct intio_attach_args
*ia
= (struct intio_attach_args
*)aux
;
177 if (attached
>= nnextdma_channels
)
180 nsc
->sc_chan
= &nextdma_channel
[attached
];
182 nsc
->sc_dmat
= ia
->ia_dmat
;
183 nsc
->sc_bst
= ia
->ia_bst
;
185 if (bus_space_map(nsc
->sc_bst
, nsc
->sc_chan
->nd_base
,
186 nsc
->sc_chan
->nd_size
, 0, &nsc
->sc_bsh
)) {
187 panic("%s: can't map DMA registers for channel %s",
188 nsc
->sc_dev
.dv_xname
, nsc
->sc_chan
->nd_name
);
193 isrlink_autovec(nsc
->sc_chan
->nd_intrfunc
, nsc
,
194 NEXT_I_IPL(nsc
->sc_chan
->nd_intr
), 10, NULL
);
195 INTR_ENABLE(nsc
->sc_chan
->nd_intr
);
197 printf (": channel %d (%s)\n", attached
,
198 nsc
->sc_chan
->nd_name
);
205 nextdma_init(struct nextdma_softc
*nsc
)
211 snprintb(sbuf
, sizeof(sbuf
), NEXT_INTR_BITS
,
212 (NEXT_I_BIT(nsc
->sc_chan
->nd_intr
));
213 printf("DMA init ipl (%ld) intr(0x%s)\n",
214 NEXT_I_IPL(nsc
->sc_chan
->nd_intr
), sbuf
);
218 nsc
->sc_stat
.nd_map
= NULL
;
219 nsc
->sc_stat
.nd_idx
= 0;
220 nsc
->sc_stat
.nd_map_cont
= NULL
;
221 nsc
->sc_stat
.nd_idx_cont
= 0;
222 nsc
->sc_stat
.nd_exception
= 0;
224 nd_bsw4 (DD_CSR
, DMACSR_RESET
| DMACSR_CLRCOMPLETE
);
228 nextdma_setup_curr_regs(nsc
);
229 nextdma_setup_cont_regs(nsc
);
232 #if defined(DIAGNOSTIC)
235 state
= nd_bsr4 (DD_CSR
);
238 /* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
239 * milo (a 25 MHz 68040 mono cube) didn't have this problem
240 * Darrin B. Jewell <jewell@mit.edu> Mon May 25 07:53:05 1998
242 state
&= (DMACSR_COMPLETE
| DMACSR_SUPDATE
| DMACSR_ENABLE
);
244 state
&= (DMACSR_BUSEXC
| DMACSR_COMPLETE
|
245 DMACSR_SUPDATE
| DMACSR_ENABLE
);
249 panic("DMA did not reset");
256 nextdma_reset(struct nextdma_softc
*nsc
)
259 struct nextdma_status
*stat
= &nsc
->sc_stat
;
263 DPRINTF(("DMA reset\n"));
265 #if (defined(ND_DEBUG))
266 if (NEXTDMA_DEBUG
> 1) nextdma_print(nsc
);
269 nd_bsw4 (DD_CSR
, DMACSR_CLRCOMPLETE
| DMACSR_RESET
);
270 if ((stat
->nd_map
) || (stat
->nd_map_cont
)) {
271 if (stat
->nd_map_cont
) {
272 DPRINTF(("DMA: resetting with non null continue map\n"));
273 if (nsc
->sc_conf
.nd_completed_cb
)
274 (*nsc
->sc_conf
.nd_completed_cb
)
275 (stat
->nd_map_cont
, nsc
->sc_conf
.nd_cb_arg
);
277 stat
->nd_map_cont
= 0;
278 stat
->nd_idx_cont
= 0;
280 if (nsc
->sc_conf
.nd_shutdown_cb
)
281 (*nsc
->sc_conf
.nd_shutdown_cb
)(nsc
->sc_conf
.nd_cb_arg
);
289 /****************************************************************/
292 /* Call the completed and continue callbacks to try to fill
293 * in the dma continue buffers.
296 nextdma_rotate(struct nextdma_softc
*nsc
)
298 struct nextdma_status
*stat
= &nsc
->sc_stat
;
300 NDTRACEIF (*ndtracep
++ = 'r');
301 DPRINTF(("DMA nextdma_rotate()\n"));
303 /* Rotate the continue map into the current map */
304 stat
->nd_map
= stat
->nd_map_cont
;
305 stat
->nd_idx
= stat
->nd_idx_cont
;
307 if ((!stat
->nd_map_cont
) ||
308 ((++stat
->nd_idx_cont
>= stat
->nd_map_cont
->dm_nsegs
))) {
309 if (nsc
->sc_conf
.nd_continue_cb
) {
310 stat
->nd_map_cont
= (*nsc
->sc_conf
.nd_continue_cb
)
311 (nsc
->sc_conf
.nd_cb_arg
);
312 if (stat
->nd_map_cont
) {
313 stat
->nd_map_cont
->dm_xfer_len
= 0;
316 stat
->nd_map_cont
= 0;
318 stat
->nd_idx_cont
= 0;
321 #if defined(DIAGNOSTIC) && 0
322 if (stat
->nd_map_cont
) {
323 if (!DMA_BEGINALIGNED(stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_addr
)) {
325 panic("DMA request unaligned at start");
327 if (!DMA_ENDALIGNED(stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_addr
+
328 stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_len
)) {
330 panic("DMA request unaligned at end");
338 nextdma_setup_curr_regs(struct nextdma_softc
*nsc
)
342 bus_addr_t dd_saved_next
;
343 bus_addr_t dd_saved_limit
;
344 struct nextdma_status
*stat
= &nsc
->sc_stat
;
346 NDTRACEIF (*ndtracep
++ = 'C');
347 DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
350 dd_next
= stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_addr
;
351 dd_limit
= (stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_addr
+
352 stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_len
);
354 if (!turbo
&& nsc
->sc_chan
->nd_intr
== NEXT_I_ENETX_DMA
) {
355 dd_limit
|= 0x80000000; /* Ethernet transmit needs secret magic */
359 dd_next
= turbo
? 0 : 0xdeadbeef;
360 dd_limit
= turbo
? 0 : 0xdeadbeef;
363 dd_saved_next
= dd_next
;
364 dd_saved_limit
= dd_limit
;
366 NDTRACEIF (if (stat
->nd_map
) {
367 sprintf (ndtracep
, "%ld", stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_len
);
368 ndtracep
+= strlen (ndtracep
);
371 if (!turbo
&& (nsc
->sc_chan
->nd_intr
== NEXT_I_ENETX_DMA
)) {
372 nd_bsw4 (DD_NEXT_INITBUF
, dd_next
);
374 nd_bsw4 (DD_NEXT
, dd_next
);
376 nd_bsw4 (DD_LIMIT
, dd_limit
);
377 if (!turbo
) nd_bsw4 (DD_SAVED_NEXT
, dd_saved_next
);
378 if (!turbo
) nd_bsw4 (DD_SAVED_LIMIT
, dd_saved_limit
);
381 if ((nd_bsr4 (DD_NEXT_INITBUF
) != dd_next
)
382 || (nd_bsr4 (DD_NEXT
) != dd_next
)
383 || (nd_bsr4 (DD_LIMIT
) != dd_limit
)
384 || (!turbo
&& (nd_bsr4 (DD_SAVED_NEXT
) != dd_saved_next
))
385 || (!turbo
&& (nd_bsr4 (DD_SAVED_LIMIT
) != dd_saved_limit
))
388 panic("DMA failure writing to current regs");
394 nextdma_setup_cont_regs(struct nextdma_softc
*nsc
)
398 bus_addr_t dd_saved_start
;
399 bus_addr_t dd_saved_stop
;
400 struct nextdma_status
*stat
= &nsc
->sc_stat
;
402 NDTRACEIF (*ndtracep
++ = 'c');
403 DPRINTF(("DMA nextdma_setup_regs()\n"));
405 if (stat
->nd_map_cont
) {
406 dd_start
= stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_addr
;
407 dd_stop
= (stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_addr
+
408 stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_len
);
410 if (!turbo
&& nsc
->sc_chan
->nd_intr
== NEXT_I_ENETX_DMA
) {
411 dd_stop
|= 0x80000000; /* Ethernet transmit needs secret magic */
415 dd_start
= turbo
? nd_bsr4 (DD_NEXT
) : 0xdeadbee0;
416 dd_stop
= turbo
? 0 : 0xdeadbee0;
419 dd_saved_start
= dd_start
;
420 dd_saved_stop
= dd_stop
;
422 NDTRACEIF (if (stat
->nd_map_cont
) {
423 sprintf (ndtracep
, "%ld", stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_len
);
424 ndtracep
+= strlen (ndtracep
);
427 nd_bsw4 (DD_START
, dd_start
);
428 nd_bsw4 (DD_STOP
, dd_stop
);
429 if (!turbo
) nd_bsw4 (DD_SAVED_START
, dd_saved_start
);
430 if (!turbo
) nd_bsw4 (DD_SAVED_STOP
, dd_saved_stop
);
431 if (turbo
&& nsc
->sc_chan
->nd_intr
== NEXT_I_ENETR_DMA
)
432 nd_bsw4 (DD_STOP
- 0x40, dd_start
);
435 if ((nd_bsr4 (DD_START
) != dd_start
)
436 || (dd_stop
&& (nd_bsr4 (DD_STOP
) != dd_stop
))
437 || (!turbo
&& (nd_bsr4 (DD_SAVED_START
) != dd_saved_start
))
438 || (!turbo
&& (nd_bsr4 (DD_SAVED_STOP
) != dd_saved_stop
))
441 panic("DMA failure writing to continue regs");
446 /****************************************************************/
450 nextdma_esp_intr(void *arg
)
452 /* @@@ This is bogus, we can't be certain of arg's type
453 * unless the interrupt is for us. For now we successfully
454 * cheat because DMA interrupts are the only things invoked
455 * at this interrupt level.
457 struct nextdma_softc
*nsc
= arg
;
458 int esp_dma_int(void *); /* XXX */
460 if (!INTR_OCCURRED(nsc
->sc_chan
->nd_intr
))
462 /* Handle dma interrupts */
464 return esp_dma_int (nsc
->sc_conf
.nd_cb_arg
);
471 nextdma_enet_intr(void *arg
)
473 /* @@@ This is bogus, we can't be certain of arg's type
474 * unless the interrupt is for us. For now we successfully
475 * cheat because DMA interrupts are the only things invoked
476 * at this interrupt level.
478 struct nextdma_softc
*nsc
= arg
;
484 struct nextdma_status
*stat
= &nsc
->sc_stat
;
486 if (!INTR_OCCURRED(nsc
->sc_chan
->nd_intr
))
488 /* Handle dma interrupts */
490 NDTRACEIF (*ndtracep
++ = 'D');
495 snprintb(sbuf
, sizeof(sbuf
), NEXT_INTR_BITS
,
496 (NEXT_I_BIT(nsc
->sc_chan
->nd_intr
));
497 printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
498 NEXT_I_IPL(nsc
->sc_chan
->nd_intr
), sbuf
);
505 panic("DMA missing current map in interrupt!");
509 state
= nd_bsr4 (DD_CSR
);
511 #if defined(ND_DEBUG)
512 nextdma_debug_savestate(nsc
, state
);
516 if (/* (state & DMACSR_READ) || */ !(state
& DMACSR_COMPLETE
)) {
519 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, state
);
520 printf("DMA: state 0x%s\n",sbuf
);
521 panic("DMA complete not set in interrupt");
525 DPRINTF(("DMA: finishing xfer\n"));
527 onext
= stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_addr
;
528 olimit
= onext
+ stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_len
;
531 if (state
& DMACSR_ENABLE
) {
532 /* enable bit was set */
535 if (state
& DMACSR_SUPDATE
) {
536 /* supdate bit was set */
539 if (stat
->nd_map_cont
== NULL
) {
540 KASSERT(stat
->nd_idx
+1 == stat
->nd_map
->dm_nsegs
);
541 /* Expecting a shutdown, didn't SETSUPDATE last turn */
544 if (state
& DMACSR_BUSEXC
) {
545 /* bus exception bit was set */
549 case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
550 case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
552 volatile u_int
*limit
= (volatile u_int
*)IIOV(0x2000050+0x4000);
555 slimit
= nd_bsr4 (DD_SAVED_LIMIT
);
558 case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
559 case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
561 volatile u_int
*limit
= (volatile u_int
*)IIOV(0x2000050+0x4000);
564 slimit
= nd_bsr4 (DD_SAVED_LIMIT
);
567 case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
568 case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
569 slimit
= nd_bsr4 (DD_NEXT
);
571 case 0x04: /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
572 case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
573 slimit
= nd_bsr4 (DD_LIMIT
);
579 printf("DMA: please send this output to port-next68k-maintainer@NetBSD.org:\n");
580 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, state
);
581 printf("DMA: state 0x%s\n",sbuf
);
583 panic("DMA: condition 0x%02x not yet documented to occur",result
);
590 if (!turbo
&& nsc
->sc_chan
->nd_intr
== NEXT_I_ENETX_DMA
) {
591 slimit
&= ~0x80000000;
596 if ((state
& DMACSR_READ
))
597 DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext
, slimit
, olimit
,
598 (state
& DMACSR_READ
) ? "read" : "write"));
599 if ((slimit
< onext
) || (slimit
> olimit
)) {
601 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, state
);
602 printf("DMA: state 0x%s\n",sbuf
);
604 panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit
);
609 if ((state
& DMACSR_ENABLE
) && ((stat
->nd_idx
+1) != stat
->nd_map
->dm_nsegs
)) {
610 if (slimit
!= olimit
) {
612 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, state
);
613 printf("DMA: state 0x%s\n",sbuf
);
615 panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit
);
620 #if (defined(ND_DEBUG))
621 if (NEXTDMA_DEBUG
> 2) nextdma_print(nsc
);
624 stat
->nd_map
->dm_xfer_len
+= slimit
-onext
;
626 /* If we've reached the end of the current map, then inform
627 * that we've completed that map.
629 if ((stat
->nd_idx
+1) == stat
->nd_map
->dm_nsegs
) {
630 if (nsc
->sc_conf
.nd_completed_cb
)
631 (*nsc
->sc_conf
.nd_completed_cb
)
632 (stat
->nd_map
, nsc
->sc_conf
.nd_cb_arg
);
634 KASSERT(stat
->nd_map
== stat
->nd_map_cont
);
635 KASSERT(stat
->nd_idx
+1 == stat
->nd_idx_cont
);
640 #if (defined(ND_DEBUG))
643 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, state
);
644 printf("CLNDMAP: dd->dd_csr = 0x%s\n", sbuf
);
647 if (state
& DMACSR_ENABLE
) {
648 u_long dmadir
; /* DMACSR_SETREAD or DMACSR_SETWRITE */
651 nextdma_setup_cont_regs(nsc
);
653 if (state
& DMACSR_READ
) {
654 dmadir
= DMACSR_SETREAD
;
656 dmadir
= DMACSR_SETWRITE
;
659 if (stat
->nd_map_cont
== NULL
) {
660 KASSERT(stat
->nd_idx
+1 == stat
->nd_map
->dm_nsegs
);
661 nd_bsw4 (DD_CSR
, DMACSR_CLRCOMPLETE
| dmadir
);
662 NDTRACEIF (*ndtracep
++ = 'g');
664 nd_bsw4 (DD_CSR
, DMACSR_CLRCOMPLETE
| dmadir
| DMACSR_SETSUPDATE
);
665 NDTRACEIF (*ndtracep
++ = 'G');
668 DPRINTF(("DMA: a shutdown occurred\n"));
669 nd_bsw4 (DD_CSR
, DMACSR_CLRCOMPLETE
| DMACSR_RESET
);
671 /* Cleanup more incomplete transfers */
672 /* cleanup continue map */
673 if (stat
->nd_map_cont
) {
674 DPRINTF(("DMA: shutting down with non null continue map\n"));
675 if (nsc
->sc_conf
.nd_completed_cb
)
676 (*nsc
->sc_conf
.nd_completed_cb
)
677 (stat
->nd_map_cont
, nsc
->sc_conf
.nd_cb_arg
);
679 stat
->nd_map_cont
= 0;
680 stat
->nd_idx_cont
= 0;
682 if (nsc
->sc_conf
.nd_shutdown_cb
)
683 (*nsc
->sc_conf
.nd_shutdown_cb
)(nsc
->sc_conf
.nd_cb_arg
);
690 snprintb(sbuf
, sizeof(sbuf
),
691 NEXT_INTR_BITS
, NEXT_I_BIT(nsc
->sc_chan
->nd_intr
));
692 printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
693 NEXT_I_IPL(nsc
->sc_chan
->nd_intr
), sbuf
);
702 * Check to see if dma has finished for a channel */
704 nextdma_finished(struct nextdma_softc
*nsc
)
708 struct nextdma_status
*stat
= &nsc
->sc_stat
;
711 r
= (stat
->nd_map
== NULL
) && (stat
->nd_map_cont
== NULL
);
718 nextdma_start(struct nextdma_softc
*nsc
, u_long dmadir
)
720 struct nextdma_status
*stat
= &nsc
->sc_stat
;
722 NDTRACEIF (*ndtracep
++ = 'n');
724 if (!nextdma_finished(nsc
)) {
727 snprintb(sbuf
, sizeof(sbuf
),
728 NEXT_INTR_BITS
, NEXT_I_BIT(nsc
->sc_chan
->nd_intr
));
729 panic("DMA trying to start before previous finished on intr(0x%s)", sbuf
);
737 snprintb(sbuf
, sizeof(sbuf
),
738 NEXT_INTR_BITS
, NEXT_I_BIT(nsc
->sc_chan
->nd_intr
));
739 printf("DMA start (%ld) intr(0x%s)\n",
740 NEXT_I_IPL(nsc
->sc_chan
->nd_intr
), sbuf
);
747 panic("DMA: nextdma_start() with non null map");
749 if (stat
->nd_map_cont
) {
751 panic("DMA: nextdma_start() with non null continue map");
756 if ((dmadir
!= DMACSR_SETREAD
) && (dmadir
!= DMACSR_SETWRITE
)) {
757 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
761 #if defined(ND_DEBUG)
762 nextdma_debug_initstate(nsc
);
765 /* preload both the current and the continue maps */
769 if (!stat
->nd_map_cont
) {
770 panic("No map available in nextdma_start()");
780 snprintb(sbuf
, sizeof(sbuf
),
781 NEXT_INTR_BITS
, NEXT_I_BIT(nsc
->sc_chan
->nd_intr
));
782 printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
783 (dmadir
== DMACSR_SETREAD
? "read" : "write"), stat
->nd_map
->dm_nsegs
, sbuf
);
787 nd_bsw4 (DD_CSR
, (turbo
? DMACSR_INITBUFTURBO
: DMACSR_INITBUF
) |
788 DMACSR_RESET
| dmadir
);
791 nextdma_setup_curr_regs(nsc
);
792 nextdma_setup_cont_regs(nsc
);
794 #if (defined(ND_DEBUG))
795 if (NEXTDMA_DEBUG
> 2) nextdma_print(nsc
);
798 if (stat
->nd_map_cont
== NULL
) {
799 nd_bsw4 (DD_CSR
, DMACSR_SETENABLE
| dmadir
);
801 nd_bsw4 (DD_CSR
, DMACSR_SETSUPDATE
| DMACSR_SETENABLE
| dmadir
);
805 /* This routine is used for debugging */
807 nextdma_print(struct nextdma_softc
*nsc
)
811 u_long dd_next_initbuf
;
815 u_long dd_saved_next
;
816 u_long dd_saved_limit
;
817 u_long dd_saved_start
;
818 u_long dd_saved_stop
;
820 struct nextdma_status
*stat
= &nsc
->sc_stat
;
822 /* Read all of the registers before we print anything out,
823 * in case something changes
825 dd_csr
= nd_bsr4 (DD_CSR
);
826 dd_next
= nd_bsr4 (DD_NEXT
);
827 dd_next_initbuf
= nd_bsr4 (DD_NEXT_INITBUF
);
828 dd_limit
= nd_bsr4 (DD_LIMIT
);
829 dd_start
= nd_bsr4 (DD_START
);
830 dd_stop
= nd_bsr4 (DD_STOP
);
831 dd_saved_next
= nd_bsr4 (DD_SAVED_NEXT
);
832 dd_saved_limit
= nd_bsr4 (DD_SAVED_LIMIT
);
833 dd_saved_start
= nd_bsr4 (DD_SAVED_START
);
834 dd_saved_stop
= nd_bsr4 (DD_SAVED_STOP
);
836 snprintb(sbuf
, sizeof(sbuf
), NEXT_INTR_BITS
,
837 *(volatile u_long
*)IIOV(NEXT_P_INTRSTAT
));
838 printf("NDMAP: *intrstat = 0x%s\n", sbuf
);
840 snprintb(sbuf
, sizeof(sbuf
), NEXT_INTR_BITS
,
841 *(volatile u_long
*)IIOV(NEXT_P_INTRMASK
));
842 printf("NDMAP: *intrmask = 0x%s\n", sbuf
);
844 /* NDMAP is Next DMA Print (really!) */
849 printf("NDMAP: nd_map->dm_mapsize = %ld\n",
850 stat
->nd_map
->dm_mapsize
);
851 printf("NDMAP: nd_map->dm_nsegs = %d\n",
852 stat
->nd_map
->dm_nsegs
);
853 printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
854 stat
->nd_map
->dm_xfer_len
);
855 printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
856 stat
->nd_idx
, stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_addr
);
857 printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
858 stat
->nd_idx
, stat
->nd_map
->dm_segs
[stat
->nd_idx
].ds_len
);
860 printf("NDMAP: Entire map;\n");
861 for(i
=0;i
<stat
->nd_map
->dm_nsegs
;i
++) {
862 printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
863 i
,stat
->nd_map
->dm_segs
[i
].ds_addr
);
864 printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
865 i
,stat
->nd_map
->dm_segs
[i
].ds_len
);
868 printf("NDMAP: nd_map = NULL\n");
870 if (stat
->nd_map_cont
) {
871 printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
872 stat
->nd_map_cont
->dm_mapsize
);
873 printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
874 stat
->nd_map_cont
->dm_nsegs
);
875 printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
876 stat
->nd_map_cont
->dm_xfer_len
);
877 printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
878 stat
->nd_idx_cont
,stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_addr
);
879 printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
880 stat
->nd_idx_cont
,stat
->nd_map_cont
->dm_segs
[stat
->nd_idx_cont
].ds_len
);
881 if (stat
->nd_map_cont
!= stat
->nd_map
) {
883 printf("NDMAP: Entire map;\n");
884 for(i
=0;i
<stat
->nd_map_cont
->dm_nsegs
;i
++) {
885 printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
886 i
,stat
->nd_map_cont
->dm_segs
[i
].ds_addr
);
887 printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
888 i
,stat
->nd_map_cont
->dm_segs
[i
].ds_len
);
892 printf("NDMAP: nd_map_cont = NULL\n");
895 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, dd_csr
);
896 printf("NDMAP: dd->dd_csr = 0x%s\n", sbuf
);
898 printf("NDMAP: dd->dd_saved_next = 0x%08lx\n", dd_saved_next
);
899 printf("NDMAP: dd->dd_saved_limit = 0x%08lx\n", dd_saved_limit
);
900 printf("NDMAP: dd->dd_saved_start = 0x%08lx\n", dd_saved_start
);
901 printf("NDMAP: dd->dd_saved_stop = 0x%08lx\n", dd_saved_stop
);
902 printf("NDMAP: dd->dd_next = 0x%08lx\n", dd_next
);
903 printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf
);
904 printf("NDMAP: dd->dd_limit = 0x%08lx\n", dd_limit
);
905 printf("NDMAP: dd->dd_start = 0x%08lx\n", dd_start
);
906 printf("NDMAP: dd->dd_stop = 0x%08lx\n", dd_stop
);
908 snprintb(sbuf
, sizeof(sbuf
), NEXT_INTR_BITS
,
909 (NEXT_I_BIT(nsc
->sc_chan
->nd_intr
)));
910 printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
911 NEXT_I_IPL(nsc
->sc_chan
->nd_intr
), sbuf
);
914 #if defined(ND_DEBUG)
916 nextdma_debug_initstate(struct nextdma_softc
*nsc
)
918 switch(nsc
->sc_chan
->nd_intr
) {
919 case NEXT_I_ENETR_DMA
:
920 memset(nextdma_debug_enetr_state
,0,sizeof(nextdma_debug_enetr_state
));
922 case NEXT_I_SCSI_DMA
:
923 memset(nextdma_debug_scsi_state
,0,sizeof(nextdma_debug_scsi_state
));
929 nextdma_debug_savestate(struct nextdma_softc
*nsc
, unsigned int state
)
931 switch(nsc
->sc_chan
->nd_intr
) {
932 case NEXT_I_ENETR_DMA
:
933 nextdma_debug_enetr_state
[nextdma_debug_enetr_idx
++] = state
;
934 nextdma_debug_enetr_idx
%= (sizeof(nextdma_debug_enetr_state
)/sizeof(unsigned int));
936 case NEXT_I_SCSI_DMA
:
937 nextdma_debug_scsi_state
[nextdma_debug_scsi_idx
++] = state
;
938 nextdma_debug_scsi_idx
%= (sizeof(nextdma_debug_scsi_state
)/sizeof(unsigned int));
944 nextdma_debug_enetr_dumpstate(void)
949 i
= nextdma_debug_enetr_idx
;
952 if (nextdma_debug_enetr_state
[i
]) {
953 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, nextdma_debug_enetr_state
[i
]);
954 printf("DMA: 0x%02x state 0x%s\n",i
,sbuf
);
957 i
%= (sizeof(nextdma_debug_enetr_state
)/sizeof(unsigned int));
958 } while (i
!= nextdma_debug_enetr_idx
);
963 nextdma_debug_scsi_dumpstate(void)
968 i
= nextdma_debug_scsi_idx
;
971 if (nextdma_debug_scsi_state
[i
]) {
972 snprintb(sbuf
, sizeof(sbuf
), DMACSR_BITS
, nextdma_debug_scsi_state
[i
]);
973 printf("DMA: 0x%02x state 0x%s\n",i
,sbuf
);
976 i
%= (sizeof(nextdma_debug_scsi_state
)/sizeof(unsigned int));
977 } while (i
!= nextdma_debug_scsi_idx
);