No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / hp300 / dev / dma.c
bloba42519670ddb2c2c0de7aefe7c77402abd75e169
1 /* $NetBSD: dma.c,v 1.41 2008/06/15 07:15:30 tsutsui Exp $ */
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1982, 1990, 1993
34 * The Regents of the University of California. All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
60 * @(#)dma.c 8.1 (Berkeley) 6/10/93
64 * DMA driver
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.41 2008/06/15 07:15:30 tsutsui Exp $");
70 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/callout.h>
75 #include <sys/device.h>
76 #include <sys/kernel.h>
77 #include <sys/proc.h>
79 #include <uvm/uvm_extern.h>
81 #include <machine/bus.h>
83 #include <m68k/cacheops.h>
85 #include <hp300/dev/intiovar.h>
86 #include <hp300/dev/dmareg.h>
87 #include <hp300/dev/dmavar.h>
90 * The largest single request will be MAXPHYS bytes which will require
91 * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
92 * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
93 * buffer is not page aligned (+1).
95 #define DMAMAXIO (MAXPHYS/PAGE_SIZE+1)
97 struct dma_chain {
98 int dc_count;
99 char *dc_addr;
102 struct dma_channel {
103 struct dmaqueue *dm_job; /* current job */
104 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
105 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
106 char dm_flags; /* misc. flags */
107 u_short dm_cmd; /* DMA controller command */
108 int dm_cur; /* current segment */
109 int dm_last; /* last segment */
110 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
113 struct dma_softc {
114 device_t sc_dev;
115 bus_space_tag_t sc_bst;
116 bus_space_handle_t sc_bsh;
118 struct dmareg *sc_dmareg; /* pointer to our hardware */
119 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
120 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
121 struct callout sc_debug_ch;
122 char sc_type; /* A, B, or C */
123 int sc_ipl; /* our interrupt level */
124 void *sc_ih; /* interrupt cookie */
127 /* types */
128 #define DMA_B 0
129 #define DMA_C 1
131 /* flags */
132 #define DMAF_PCFLUSH 0x01
133 #define DMAF_VCFLUSH 0x02
134 #define DMAF_NOINTR 0x04
136 static int dmamatch(device_t, cfdata_t, void *);
137 static void dmaattach(device_t, device_t, void *);
139 CFATTACH_DECL_NEW(dma, sizeof(struct dma_softc),
140 dmamatch, dmaattach, NULL, NULL);
142 static int dmaintr(void *);
144 #ifdef DEBUG
145 int dmadebug = 0;
146 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
147 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
148 #define DDB_FOLLOW 0x04
149 #define DDB_IO 0x08
151 static void dmatimeout(void *);
152 int dmatimo[NDMACHAN];
154 long dmahits[NDMACHAN];
155 long dmamisses[NDMACHAN];
156 long dmabyte[NDMACHAN];
157 long dmaword[NDMACHAN];
158 long dmalword[NDMACHAN];
159 #endif
161 static struct dma_softc *dma_softc;
163 static int
164 dmamatch(device_t parent, cfdata_t cf, void *aux)
166 struct intio_attach_args *ia = aux;
167 static int dmafound = 0; /* can only have one */
169 if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
170 return 0;
172 dmafound = 1;
173 return 1;
176 static void
177 dmaattach(device_t parent, device_t self, void *aux)
179 struct dma_softc *sc = device_private(self);
180 struct intio_attach_args *ia = aux;
181 struct dma_channel *dc;
182 struct dmareg *dma;
183 int i;
184 char rev;
186 sc->sc_dev = self;
188 /* There's just one. */
189 dma_softc = sc;
191 sc->sc_bst = ia->ia_bst;
192 if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
193 &sc->sc_bsh)) {
194 aprint_error(": can't map registers\n");
195 return;
198 dma = bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
199 sc->sc_dmareg = dma;
202 * Determine the DMA type. A DMA_A or DMA_B will fail the
203 * following probe.
205 * XXX Don't know how to easily differentiate the A and B cards,
206 * so we just hope nobody has an A card (A cards will work if
207 * splbio works out to ipl 3).
209 if (hp300_bus_space_probe(sc->sc_bst, sc->sc_bsh, DMA_ID2, 1) == 0) {
210 rev = 'B';
211 #if !defined(HP320)
212 aprint_normal("\n");
213 panic("%s: DMA card requires hp320 support", __func__);
214 #endif
215 } else
216 rev = dma->dma_id[2];
218 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
220 TAILQ_INIT(&sc->sc_queue);
221 callout_init(&sc->sc_debug_ch, 0);
223 for (i = 0; i < NDMACHAN; i++) {
224 dc = &sc->sc_chan[i];
225 dc->dm_job = NULL;
226 switch (i) {
227 case 0:
228 dc->dm_hwaddr = &dma->dma_chan0;
229 dc->dm_Bhwaddr = &dma->dma_Bchan0;
230 break;
232 case 1:
233 dc->dm_hwaddr = &dma->dma_chan1;
234 dc->dm_Bhwaddr = &dma->dma_Bchan1;
235 break;
237 default:
238 aprint_normal("\n");
239 panic("%s: more than 2 channels?", __func__);
240 /* NOTREACHED */
244 #ifdef DEBUG
245 /* make sure timeout is really not needed */
246 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
247 #endif
249 aprint_normal(": 98620%c, 2 channels, %d-bit DMA\n",
250 rev, (rev == 'B') ? 16 : 32);
253 * Defer hooking up our interrupt until the first
254 * DMA-using controller has hooked up theirs.
256 sc->sc_ih = NULL;
260 * Compute the ipl and (re)establish the interrupt handler
261 * for the DMA controller.
263 void
264 dmacomputeipl(void)
266 struct dma_softc *sc = dma_softc;
268 if (sc->sc_ih != NULL)
269 intr_disestablish(sc->sc_ih);
272 * Our interrupt level must be as high as the highest
273 * device using DMA (i.e. splbio).
275 sc->sc_ipl = PSLTOIPL(ipl2psl_table[IPL_VM]);
276 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_VM);
280 dmareq(struct dmaqueue *dq)
282 struct dma_softc *sc = dma_softc;
283 int i, chan, s;
285 #if 1
286 s = splhigh(); /* XXXthorpej */
287 #else
288 s = splbio();
289 #endif
291 chan = dq->dq_chan;
292 for (i = NDMACHAN - 1; i >= 0; i--) {
294 * Can we use this channel?
296 if ((chan & (1 << i)) == 0)
297 continue;
300 * We can use it; is it busy?
302 if (sc->sc_chan[i].dm_job != NULL)
303 continue;
306 * Not busy; give the caller this channel.
308 sc->sc_chan[i].dm_job = dq;
309 dq->dq_chan = i;
310 splx(s);
311 return 1;
315 * Couldn't get a channel now; put this in the queue.
317 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
318 splx(s);
319 return 0;
322 void
323 dmafree(struct dmaqueue *dq)
325 int unit = dq->dq_chan;
326 struct dma_softc *sc = dma_softc;
327 struct dma_channel *dc = &sc->sc_chan[unit];
328 struct dmaqueue *dn;
329 int chan, s;
331 #if 1
332 s = splhigh(); /* XXXthorpej */
333 #else
334 s = splbio();
335 #endif
337 #ifdef DEBUG
338 dmatimo[unit] = 0;
339 #endif
341 DMA_CLEAR(dc);
343 #if defined(CACHE_HAVE_PAC) || defined(M68040)
345 * XXX we may not always go thru the flush code in dmastop()
347 if (dc->dm_flags & DMAF_PCFLUSH) {
348 PCIA();
349 dc->dm_flags &= ~DMAF_PCFLUSH;
351 #endif
353 #if defined(CACHE_HAVE_VAC)
354 if (dc->dm_flags & DMAF_VCFLUSH) {
356 * 320/350s have VACs that may also need flushing.
357 * In our case we only flush the supervisor side
358 * because we know that if we are DMAing to user
359 * space, the physical pages will also be mapped
360 * in kernel space (via vmapbuf) and hence cache-
361 * inhibited by the pmap module due to the multiple
362 * mapping.
364 DCIS();
365 dc->dm_flags &= ~DMAF_VCFLUSH;
367 #endif
370 * Channel is now free. Look for another job to run on this
371 * channel.
373 dc->dm_job = NULL;
374 chan = 1 << unit;
375 for (dn = TAILQ_FIRST(&sc->sc_queue); dn != NULL;
376 dn = TAILQ_NEXT(dn, dq_list)) {
377 if (dn->dq_chan & chan) {
378 /* Found one... */
379 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
380 dc->dm_job = dn;
381 dn->dq_chan = dq->dq_chan;
382 splx(s);
384 /* Start the initiator. */
385 (*dn->dq_start)(dn->dq_softc);
386 return;
389 splx(s);
392 void
393 dmago(int unit, char *addr, int count, int flags)
395 struct dma_softc *sc = dma_softc;
396 struct dma_channel *dc = &sc->sc_chan[unit];
397 char *dmaend = NULL;
398 int seg, tcount;
400 if (count > MAXPHYS)
401 panic("dmago: count > MAXPHYS");
403 #if defined(HP320)
404 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
405 panic("dmago: no can do 32-bit DMA");
406 #endif
408 #ifdef DEBUG
409 if (dmadebug & DDB_FOLLOW)
410 printf("dmago(%d, %p, %x, %x)\n",
411 unit, addr, count, flags);
412 if (flags & DMAGO_LWORD)
413 dmalword[unit]++;
414 else if (flags & DMAGO_WORD)
415 dmaword[unit]++;
416 else
417 dmabyte[unit]++;
418 #endif
420 * Build the DMA chain
422 for (seg = 0; count > 0; seg++) {
423 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
424 #if defined(M68040)
426 * Push back dirty cache lines
428 if (mmutype == MMU_68040)
429 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
430 #endif
431 if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
432 tcount = count;
433 dc->dm_chain[seg].dc_count = tcount;
434 addr += tcount;
435 count -= tcount;
436 if (flags & DMAGO_LWORD)
437 tcount >>= 2;
438 else if (flags & DMAGO_WORD)
439 tcount >>= 1;
442 * Try to compact the DMA transfer if the pages are adjacent.
443 * Note: this will never happen on the first iteration.
445 if (dc->dm_chain[seg].dc_addr == dmaend
446 #if defined(HP320)
447 /* only 16-bit count on 98620B */
448 && (sc->sc_type != DMA_B ||
449 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
450 #endif
452 #ifdef DEBUG
453 dmahits[unit]++;
454 #endif
455 dmaend += dc->dm_chain[seg].dc_count;
456 dc->dm_chain[--seg].dc_count += tcount;
457 } else {
458 #ifdef DEBUG
459 dmamisses[unit]++;
460 #endif
461 dmaend = dc->dm_chain[seg].dc_addr +
462 dc->dm_chain[seg].dc_count;
463 dc->dm_chain[seg].dc_count = tcount;
466 dc->dm_cur = 0;
467 dc->dm_last = --seg;
468 dc->dm_flags = 0;
470 * Set up the command word based on flags
472 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
473 if ((flags & DMAGO_READ) == 0)
474 dc->dm_cmd |= DMA_WRT;
475 if (flags & DMAGO_LWORD)
476 dc->dm_cmd |= DMA_LWORD;
477 else if (flags & DMAGO_WORD)
478 dc->dm_cmd |= DMA_WORD;
479 if (flags & DMAGO_PRI)
480 dc->dm_cmd |= DMA_PRI;
482 #if defined(M68040)
484 * On the 68040 we need to flush (push) the data cache before a
485 * DMA (already done above) and flush again after DMA completes.
486 * In theory we should only need to flush prior to a write DMA
487 * and purge after a read DMA but if the entire page is not
488 * involved in the DMA we might purge some valid data.
490 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
491 dc->dm_flags |= DMAF_PCFLUSH;
492 #endif
494 #if defined(CACHE_HAVE_PAC)
496 * Remember if we need to flush external physical cache when
497 * DMA is done. We only do this if we are reading (writing memory).
499 if (ectype == EC_PHYS && (flags & DMAGO_READ))
500 dc->dm_flags |= DMAF_PCFLUSH;
501 #endif
503 #if defined(CACHE_HAVE_VAC)
504 if (ectype == EC_VIRT && (flags & DMAGO_READ))
505 dc->dm_flags |= DMAF_VCFLUSH;
506 #endif
509 * Remember if we can skip the dma completion interrupt on
510 * the last segment in the chain.
512 if (flags & DMAGO_NOINT) {
513 if (dc->dm_cur == dc->dm_last)
514 dc->dm_cmd &= ~DMA_ENAB;
515 else
516 dc->dm_flags |= DMAF_NOINTR;
518 #ifdef DEBUG
519 if (dmadebug & DDB_IO) {
520 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
521 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
522 printf("dmago: cmd %x, flags %x\n",
523 dc->dm_cmd, dc->dm_flags);
524 for (seg = 0; seg <= dc->dm_last; seg++)
525 printf(" %d: %d@%p\n", seg,
526 dc->dm_chain[seg].dc_count,
527 dc->dm_chain[seg].dc_addr);
530 dmatimo[unit] = 1;
531 #endif
532 DMA_ARM(sc, dc);
535 void
536 dmastop(int unit)
538 struct dma_softc *sc = dma_softc;
539 struct dma_channel *dc = &sc->sc_chan[unit];
541 #ifdef DEBUG
542 if (dmadebug & DDB_FOLLOW)
543 printf("dmastop(%d)\n", unit);
544 dmatimo[unit] = 0;
545 #endif
546 DMA_CLEAR(dc);
548 #if defined(CACHE_HAVE_PAC) || defined(M68040)
549 if (dc->dm_flags & DMAF_PCFLUSH) {
550 PCIA();
551 dc->dm_flags &= ~DMAF_PCFLUSH;
553 #endif
555 #if defined(CACHE_HAVE_VAC)
556 if (dc->dm_flags & DMAF_VCFLUSH) {
558 * 320/350s have VACs that may also need flushing.
559 * In our case we only flush the supervisor side
560 * because we know that if we are DMAing to user
561 * space, the physical pages will also be mapped
562 * in kernel space (via vmapbuf) and hence cache-
563 * inhibited by the pmap module due to the multiple
564 * mapping.
566 DCIS();
567 dc->dm_flags &= ~DMAF_VCFLUSH;
569 #endif
572 * We may get this interrupt after a device service routine
573 * has freed the dma channel. So, ignore the intr if there's
574 * nothing on the queue.
576 if (dc->dm_job != NULL)
577 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
580 static int
581 dmaintr(void *arg)
583 struct dma_softc *sc = arg;
584 struct dma_channel *dc;
585 int i, stat;
586 int found = 0;
588 #ifdef DEBUG
589 if (dmadebug & DDB_FOLLOW)
590 printf("dmaintr\n");
591 #endif
592 for (i = 0; i < NDMACHAN; i++) {
593 dc = &sc->sc_chan[i];
594 stat = DMA_STAT(dc);
595 if ((stat & DMA_INTR) == 0)
596 continue;
597 found++;
598 #ifdef DEBUG
599 if (dmadebug & DDB_IO) {
600 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
601 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
602 printf("dmaintr: flags %x unit %d stat %x "
603 "next %d\n",
604 dc->dm_flags, i, stat, dc->dm_cur + 1);
606 if (stat & DMA_ARMED)
607 printf("dma channel %d: intr when armed\n", i);
608 #endif
610 * Load the next segemnt, or finish up if we're done.
612 dc->dm_cur++;
613 if (dc->dm_cur <= dc->dm_last) {
614 #ifdef DEBUG
615 dmatimo[i] = 1;
616 #endif
618 * If we're the last segment, disable the
619 * completion interrupt, if necessary.
621 if (dc->dm_cur == dc->dm_last &&
622 (dc->dm_flags & DMAF_NOINTR))
623 dc->dm_cmd &= ~DMA_ENAB;
624 DMA_CLEAR(dc);
625 DMA_ARM(sc, dc);
626 } else
627 dmastop(i);
629 return found;
632 #ifdef DEBUG
633 static void
634 dmatimeout(void *arg)
636 int i, s;
637 struct dma_softc *sc = arg;
639 for (i = 0; i < NDMACHAN; i++) {
640 s = splbio();
641 if (dmatimo[i]) {
642 if (dmatimo[i] > 1)
643 printf("dma channel %d timeout #%d\n",
644 i, dmatimo[i]-1);
645 dmatimo[i]++;
647 splx(s);
649 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
651 #endif