1 /* $NetBSD: dma.c,v 1.41 2008/06/15 07:15:30 tsutsui Exp $ */
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1982, 1990, 1993
34 * The Regents of the University of California. All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)dma.c 8.1 (Berkeley) 6/10/93
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.41 2008/06/15 07:15:30 tsutsui Exp $");
70 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/callout.h>
75 #include <sys/device.h>
76 #include <sys/kernel.h>
79 #include <uvm/uvm_extern.h>
81 #include <machine/bus.h>
83 #include <m68k/cacheops.h>
85 #include <hp300/dev/intiovar.h>
86 #include <hp300/dev/dmareg.h>
87 #include <hp300/dev/dmavar.h>
90 * The largest single request will be MAXPHYS bytes which will require
91 * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
92 * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
93 * buffer is not page aligned (+1).
95 #define DMAMAXIO (MAXPHYS/PAGE_SIZE+1)
103 struct dmaqueue
*dm_job
; /* current job */
104 struct dmadevice
*dm_hwaddr
; /* registers if DMA_C */
105 struct dmaBdevice
*dm_Bhwaddr
; /* registers if not DMA_C */
106 char dm_flags
; /* misc. flags */
107 u_short dm_cmd
; /* DMA controller command */
108 int dm_cur
; /* current segment */
109 int dm_last
; /* last segment */
110 struct dma_chain dm_chain
[DMAMAXIO
]; /* all segments */
115 bus_space_tag_t sc_bst
;
116 bus_space_handle_t sc_bsh
;
118 struct dmareg
*sc_dmareg
; /* pointer to our hardware */
119 struct dma_channel sc_chan
[NDMACHAN
]; /* 2 channels */
120 TAILQ_HEAD(, dmaqueue
) sc_queue
; /* job queue */
121 struct callout sc_debug_ch
;
122 char sc_type
; /* A, B, or C */
123 int sc_ipl
; /* our interrupt level */
124 void *sc_ih
; /* interrupt cookie */
132 #define DMAF_PCFLUSH 0x01
133 #define DMAF_VCFLUSH 0x02
134 #define DMAF_NOINTR 0x04
136 static int dmamatch(device_t
, cfdata_t
, void *);
137 static void dmaattach(device_t
, device_t
, void *);
139 CFATTACH_DECL_NEW(dma
, sizeof(struct dma_softc
),
140 dmamatch
, dmaattach
, NULL
, NULL
);
142 static int dmaintr(void *);
146 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
147 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
148 #define DDB_FOLLOW 0x04
151 static void dmatimeout(void *);
152 int dmatimo
[NDMACHAN
];
154 long dmahits
[NDMACHAN
];
155 long dmamisses
[NDMACHAN
];
156 long dmabyte
[NDMACHAN
];
157 long dmaword
[NDMACHAN
];
158 long dmalword
[NDMACHAN
];
161 static struct dma_softc
*dma_softc
;
164 dmamatch(device_t parent
, cfdata_t cf
, void *aux
)
166 struct intio_attach_args
*ia
= aux
;
167 static int dmafound
= 0; /* can only have one */
169 if (strcmp("dma", ia
->ia_modname
) != 0 || dmafound
)
177 dmaattach(device_t parent
, device_t self
, void *aux
)
179 struct dma_softc
*sc
= device_private(self
);
180 struct intio_attach_args
*ia
= aux
;
181 struct dma_channel
*dc
;
188 /* There's just one. */
191 sc
->sc_bst
= ia
->ia_bst
;
192 if (bus_space_map(sc
->sc_bst
, ia
->ia_iobase
, INTIO_DEVSIZE
, 0,
194 aprint_error(": can't map registers\n");
198 dma
= bus_space_vaddr(sc
->sc_bst
, sc
->sc_bsh
);
202 * Determine the DMA type. A DMA_A or DMA_B will fail the
205 * XXX Don't know how to easily differentiate the A and B cards,
206 * so we just hope nobody has an A card (A cards will work if
207 * splbio works out to ipl 3).
209 if (hp300_bus_space_probe(sc
->sc_bst
, sc
->sc_bsh
, DMA_ID2
, 1) == 0) {
213 panic("%s: DMA card requires hp320 support", __func__
);
216 rev
= dma
->dma_id
[2];
218 sc
->sc_type
= (rev
== 'B') ? DMA_B
: DMA_C
;
220 TAILQ_INIT(&sc
->sc_queue
);
221 callout_init(&sc
->sc_debug_ch
, 0);
223 for (i
= 0; i
< NDMACHAN
; i
++) {
224 dc
= &sc
->sc_chan
[i
];
228 dc
->dm_hwaddr
= &dma
->dma_chan0
;
229 dc
->dm_Bhwaddr
= &dma
->dma_Bchan0
;
233 dc
->dm_hwaddr
= &dma
->dma_chan1
;
234 dc
->dm_Bhwaddr
= &dma
->dma_Bchan1
;
239 panic("%s: more than 2 channels?", __func__
);
245 /* make sure timeout is really not needed */
246 callout_reset(&sc
->sc_debug_ch
, 30 * hz
, dmatimeout
, sc
);
249 aprint_normal(": 98620%c, 2 channels, %d-bit DMA\n",
250 rev
, (rev
== 'B') ? 16 : 32);
253 * Defer hooking up our interrupt until the first
254 * DMA-using controller has hooked up theirs.
260 * Compute the ipl and (re)establish the interrupt handler
261 * for the DMA controller.
266 struct dma_softc
*sc
= dma_softc
;
268 if (sc
->sc_ih
!= NULL
)
269 intr_disestablish(sc
->sc_ih
);
272 * Our interrupt level must be as high as the highest
273 * device using DMA (i.e. splbio).
275 sc
->sc_ipl
= PSLTOIPL(ipl2psl_table
[IPL_VM
]);
276 sc
->sc_ih
= intr_establish(dmaintr
, sc
, sc
->sc_ipl
, IPL_VM
);
280 dmareq(struct dmaqueue
*dq
)
282 struct dma_softc
*sc
= dma_softc
;
286 s
= splhigh(); /* XXXthorpej */
292 for (i
= NDMACHAN
- 1; i
>= 0; i
--) {
294 * Can we use this channel?
296 if ((chan
& (1 << i
)) == 0)
300 * We can use it; is it busy?
302 if (sc
->sc_chan
[i
].dm_job
!= NULL
)
306 * Not busy; give the caller this channel.
308 sc
->sc_chan
[i
].dm_job
= dq
;
315 * Couldn't get a channel now; put this in the queue.
317 TAILQ_INSERT_TAIL(&sc
->sc_queue
, dq
, dq_list
);
323 dmafree(struct dmaqueue
*dq
)
325 int unit
= dq
->dq_chan
;
326 struct dma_softc
*sc
= dma_softc
;
327 struct dma_channel
*dc
= &sc
->sc_chan
[unit
];
332 s
= splhigh(); /* XXXthorpej */
343 #if defined(CACHE_HAVE_PAC) || defined(M68040)
345 * XXX we may not always go thru the flush code in dmastop()
347 if (dc
->dm_flags
& DMAF_PCFLUSH
) {
349 dc
->dm_flags
&= ~DMAF_PCFLUSH
;
353 #if defined(CACHE_HAVE_VAC)
354 if (dc
->dm_flags
& DMAF_VCFLUSH
) {
356 * 320/350s have VACs that may also need flushing.
357 * In our case we only flush the supervisor side
358 * because we know that if we are DMAing to user
359 * space, the physical pages will also be mapped
360 * in kernel space (via vmapbuf) and hence cache-
361 * inhibited by the pmap module due to the multiple
365 dc
->dm_flags
&= ~DMAF_VCFLUSH
;
370 * Channel is now free. Look for another job to run on this
375 for (dn
= TAILQ_FIRST(&sc
->sc_queue
); dn
!= NULL
;
376 dn
= TAILQ_NEXT(dn
, dq_list
)) {
377 if (dn
->dq_chan
& chan
) {
379 TAILQ_REMOVE(&sc
->sc_queue
, dn
, dq_list
);
381 dn
->dq_chan
= dq
->dq_chan
;
384 /* Start the initiator. */
385 (*dn
->dq_start
)(dn
->dq_softc
);
393 dmago(int unit
, char *addr
, int count
, int flags
)
395 struct dma_softc
*sc
= dma_softc
;
396 struct dma_channel
*dc
= &sc
->sc_chan
[unit
];
401 panic("dmago: count > MAXPHYS");
404 if (sc
->sc_type
== DMA_B
&& (flags
& DMAGO_LWORD
))
405 panic("dmago: no can do 32-bit DMA");
409 if (dmadebug
& DDB_FOLLOW
)
410 printf("dmago(%d, %p, %x, %x)\n",
411 unit
, addr
, count
, flags
);
412 if (flags
& DMAGO_LWORD
)
414 else if (flags
& DMAGO_WORD
)
420 * Build the DMA chain
422 for (seg
= 0; count
> 0; seg
++) {
423 dc
->dm_chain
[seg
].dc_addr
= (char *) kvtop(addr
);
426 * Push back dirty cache lines
428 if (mmutype
== MMU_68040
)
429 DCFP((paddr_t
)dc
->dm_chain
[seg
].dc_addr
);
431 if (count
< (tcount
= PAGE_SIZE
- ((int)addr
& PGOFSET
)))
433 dc
->dm_chain
[seg
].dc_count
= tcount
;
436 if (flags
& DMAGO_LWORD
)
438 else if (flags
& DMAGO_WORD
)
442 * Try to compact the DMA transfer if the pages are adjacent.
443 * Note: this will never happen on the first iteration.
445 if (dc
->dm_chain
[seg
].dc_addr
== dmaend
447 /* only 16-bit count on 98620B */
448 && (sc
->sc_type
!= DMA_B
||
449 dc
->dm_chain
[seg
- 1].dc_count
+ tcount
<= 65536)
455 dmaend
+= dc
->dm_chain
[seg
].dc_count
;
456 dc
->dm_chain
[--seg
].dc_count
+= tcount
;
461 dmaend
= dc
->dm_chain
[seg
].dc_addr
+
462 dc
->dm_chain
[seg
].dc_count
;
463 dc
->dm_chain
[seg
].dc_count
= tcount
;
470 * Set up the command word based on flags
472 dc
->dm_cmd
= DMA_ENAB
| DMA_IPL(sc
->sc_ipl
) | DMA_START
;
473 if ((flags
& DMAGO_READ
) == 0)
474 dc
->dm_cmd
|= DMA_WRT
;
475 if (flags
& DMAGO_LWORD
)
476 dc
->dm_cmd
|= DMA_LWORD
;
477 else if (flags
& DMAGO_WORD
)
478 dc
->dm_cmd
|= DMA_WORD
;
479 if (flags
& DMAGO_PRI
)
480 dc
->dm_cmd
|= DMA_PRI
;
484 * On the 68040 we need to flush (push) the data cache before a
485 * DMA (already done above) and flush again after DMA completes.
486 * In theory we should only need to flush prior to a write DMA
487 * and purge after a read DMA but if the entire page is not
488 * involved in the DMA we might purge some valid data.
490 if (mmutype
== MMU_68040
&& (flags
& DMAGO_READ
))
491 dc
->dm_flags
|= DMAF_PCFLUSH
;
494 #if defined(CACHE_HAVE_PAC)
496 * Remember if we need to flush external physical cache when
497 * DMA is done. We only do this if we are reading (writing memory).
499 if (ectype
== EC_PHYS
&& (flags
& DMAGO_READ
))
500 dc
->dm_flags
|= DMAF_PCFLUSH
;
503 #if defined(CACHE_HAVE_VAC)
504 if (ectype
== EC_VIRT
&& (flags
& DMAGO_READ
))
505 dc
->dm_flags
|= DMAF_VCFLUSH
;
509 * Remember if we can skip the dma completion interrupt on
510 * the last segment in the chain.
512 if (flags
& DMAGO_NOINT
) {
513 if (dc
->dm_cur
== dc
->dm_last
)
514 dc
->dm_cmd
&= ~DMA_ENAB
;
516 dc
->dm_flags
|= DMAF_NOINTR
;
519 if (dmadebug
& DDB_IO
) {
520 if (((dmadebug
&DDB_WORD
) && (dc
->dm_cmd
&DMA_WORD
)) ||
521 ((dmadebug
&DDB_LWORD
) && (dc
->dm_cmd
&DMA_LWORD
))) {
522 printf("dmago: cmd %x, flags %x\n",
523 dc
->dm_cmd
, dc
->dm_flags
);
524 for (seg
= 0; seg
<= dc
->dm_last
; seg
++)
525 printf(" %d: %d@%p\n", seg
,
526 dc
->dm_chain
[seg
].dc_count
,
527 dc
->dm_chain
[seg
].dc_addr
);
538 struct dma_softc
*sc
= dma_softc
;
539 struct dma_channel
*dc
= &sc
->sc_chan
[unit
];
542 if (dmadebug
& DDB_FOLLOW
)
543 printf("dmastop(%d)\n", unit
);
548 #if defined(CACHE_HAVE_PAC) || defined(M68040)
549 if (dc
->dm_flags
& DMAF_PCFLUSH
) {
551 dc
->dm_flags
&= ~DMAF_PCFLUSH
;
555 #if defined(CACHE_HAVE_VAC)
556 if (dc
->dm_flags
& DMAF_VCFLUSH
) {
558 * 320/350s have VACs that may also need flushing.
559 * In our case we only flush the supervisor side
560 * because we know that if we are DMAing to user
561 * space, the physical pages will also be mapped
562 * in kernel space (via vmapbuf) and hence cache-
563 * inhibited by the pmap module due to the multiple
567 dc
->dm_flags
&= ~DMAF_VCFLUSH
;
572 * We may get this interrupt after a device service routine
573 * has freed the dma channel. So, ignore the intr if there's
574 * nothing on the queue.
576 if (dc
->dm_job
!= NULL
)
577 (*dc
->dm_job
->dq_done
)(dc
->dm_job
->dq_softc
);
583 struct dma_softc
*sc
= arg
;
584 struct dma_channel
*dc
;
589 if (dmadebug
& DDB_FOLLOW
)
592 for (i
= 0; i
< NDMACHAN
; i
++) {
593 dc
= &sc
->sc_chan
[i
];
595 if ((stat
& DMA_INTR
) == 0)
599 if (dmadebug
& DDB_IO
) {
600 if (((dmadebug
&DDB_WORD
) && (dc
->dm_cmd
&DMA_WORD
)) ||
601 ((dmadebug
&DDB_LWORD
) && (dc
->dm_cmd
&DMA_LWORD
)))
602 printf("dmaintr: flags %x unit %d stat %x "
604 dc
->dm_flags
, i
, stat
, dc
->dm_cur
+ 1);
606 if (stat
& DMA_ARMED
)
607 printf("dma channel %d: intr when armed\n", i
);
610 * Load the next segemnt, or finish up if we're done.
613 if (dc
->dm_cur
<= dc
->dm_last
) {
618 * If we're the last segment, disable the
619 * completion interrupt, if necessary.
621 if (dc
->dm_cur
== dc
->dm_last
&&
622 (dc
->dm_flags
& DMAF_NOINTR
))
623 dc
->dm_cmd
&= ~DMA_ENAB
;
634 dmatimeout(void *arg
)
637 struct dma_softc
*sc
= arg
;
639 for (i
= 0; i
< NDMACHAN
; i
++) {
643 printf("dma channel %d timeout #%d\n",
649 callout_reset(&sc
->sc_debug_ch
, 30 * hz
, dmatimeout
, sc
);