Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / marvell / gtidma.c
blob68b867cedb3a5e0667f3c780912b2f8b91f47c56
1 /* $NetBSD: gtidma.c,v 1.17 2009/05/12 12:18:45 cegger Exp $ */
3 /*
4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
21 * written permission.
22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
41 * idma.c - GT-63269 IDMA driver
43 * creation Wed Sep 26 23:54:00 PDT 2001 cliff
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: gtidma.c,v 1.17 2009/05/12 12:18:45 cegger Exp $");
49 #include "opt_idma.h"
50 #include "opt_ddb.h"
51 #include "opt_allegro.h"
53 #include <sys/param.h>
54 #include <sys/device.h>
55 #include <sys/inttypes.h>
56 #include <sys/callout.h>
57 #include <sys/malloc.h>
59 #include <uvm/uvm_extern.h>
61 #include <machine/psl.h>
62 #include <sys/intr.h>
63 #include <sys/bus.h>
64 #include <machine/autoconf.h>
65 #include <powerpc/atomic.h>
67 #include <dev/marvell/gtreg.h>
68 #include <dev/marvell/gtvar.h>
69 #include <dev/marvell/gtintrreg.h>
70 #include <dev/marvell/idmareg.h>
71 #include <dev/marvell/idmavar.h>
73 #define NULL 0
75 extern int hz;
77 #ifdef DIAGNOSTIC
78 # define DIAGPRF(x) printf x
79 #else
80 # define DIAGPRF(x)
81 #endif
83 #ifdef DEBUG
84 # define STATIC
85 int idmadebug = 0;
86 # define DPRINTF(x) do { if (idmadebug) printf x ; } while (0)
87 # define DPRINTFN(n, x) do { if (idmadebug >= (n)) printf x ; } while (0)
88 #else
89 # define STATIC static
90 # define DPRINTF(x)
91 # define DPRINTFN(n, x)
92 #endif
94 #ifdef DIAGNOSTIC
96 unsigned char idmalock[CACHELINESIZE]
97 __aligned(CACHELINESIZE) = { 0 };
99 #endif
101 #ifdef DEBUG
103 # define IDDP_SANITY(idcp, iddp) do { \
104 vaddr_t base = idcp->idc_desc_mem.idm_map->dm_segs[0].ds_vaddr; \
105 vaddr_t limit = base + idcp->idc_desc_mem.idm_map->dm_segs[0].ds_len; \
106 KASSERT((((unsigned)iddp) & (sizeof(idma_desc_t) - 1)) == 0); \
107 KASSERT((vaddr_t)iddp >= base); \
108 KASSERT((vaddr_t)iddp < limit); \
109 } while (0);
111 #else
113 # define IDDP_SANITY(idcp, iddp)
115 #endif /* DEBUG */
119 * IDMA_BURST_SIZE comes from opt_idma.h for now...
122 #define IDMA_CTLLO_DFLT (IDMA_CTLL0_BURSTCODE(IDMA_BURST_SIZE) \
123 |IDMA_CTLLO_BLKMODE \
124 |IDMA_CTLLO_INTR \
125 |IDMA_CTLLO_ENB|IDMA_CTLLO_FETCHND|IDMA_CTLLO_CDEN \
126 |IDMA_CTLLO_DESCMODE)
128 static inline u_int64_t
129 _mftb(void)
131 u_long scratch;
132 u_int64_t tb;
134 __asm volatile ("1: mftbu %0; mftb %0+1; mftbu %1; cmpw 0,%0,%1; bne 1b"
135 : "=r"(tb), "=r"(scratch));
136 return tb;
140 #ifndef IDMA_COHERENT
142 * inlines to flush, invalidate cache
143 * required if DMA cache coherency is broken
144 * only 1 cache line is affected, check your size & alignment
147 #define IDMA_CACHE_FLUSH(p) idma_cache_flush(p)
148 #define IDMA_CACHE_INVALIDATE(p) idma_cache_invalidate(p)
149 #define IDMA_LIST_SYNC_PRE(c, p) idma_list_sync_pre(c, p)
150 #define IDMA_LIST_SYNC_POST(c, p) idma_list_sync_post(c, p)
152 static inline void
153 idma_cache_flush(void *p)
155 KASSERT(((unsigned int)p & (CACHELINESIZE-1)) == 0);
156 __asm volatile ("eieio; dcbf 0,%0; eieio; lwz %0,0(%0); sync;"
157 : "+r"(p):);
160 static inline void
161 idma_cache_invalidate(void *const p)
163 KASSERT(((unsigned int)p & (CACHELINESIZE-1)) == 0);
164 __asm volatile ("eieio; dcbi 0,%0; sync;" :: "r"(p));
167 static inline void
168 idma_list_sync_pre(idma_chan_t * const idcp, idma_desch_t * const iddhp)
170 idma_desch_t *iddhp_tmp;
171 idma_desc_t *iddp;
173 for(iddhp_tmp = iddhp; iddhp_tmp != 0; iddhp_tmp = iddhp_tmp->idh_next){
174 iddp = iddhp_tmp->idh_desc_va;
175 DPRINTFN(2, ("idma_list_sync_pre: "
176 "{ 0x%x, 0x%x, 0x%x, 0x%x }\n",
177 bswap32(iddp->idd_ctl),
178 bswap32(iddp->idd_src_addr),
179 bswap32(iddp->idd_dst_addr),
180 bswap32(iddp->idd_next)));
181 IDDP_SANITY(idcp, iddp);
182 IDMA_CACHE_FLUSH(iddhp_tmp->idh_desc_va);
186 static inline u_int32_t
187 idma_list_sync_post(idma_chan_t * const idcp, idma_desch_t *iddhp)
189 idma_desc_t *iddp;
190 u_int32_t rv = 0;
192 do {
193 iddp = iddhp->idh_desc_va;
194 IDMA_CACHE_INVALIDATE((void *)iddp);
195 IDDP_SANITY(idcp, iddp);
196 rv |= idma_desc_read(&iddp->idd_ctl);
197 } while ((iddhp = iddhp->idh_next) != 0);
199 rv &= (IDMA_DESC_CTL_OWN|IDMA_DESC_CTL_TERM);
201 return rv;
204 #else /* IDMA_COHERENT */
206 #define IDMA_CACHE_FLUSH(p)
207 #define IDMA_CACHE_INVALIDATE(p)
208 #define IDMA_LIST_SYNC_PRE(c, p)
209 #define IDMA_LIST_SYNC_POST(c, p) idma_list_sync_post(c, p)
211 static inline u_int32_t
212 idma_list_sync_post(idma_chan_t * const idcp, idma_desch_t *iddhp)
214 idma_desc_t *iddp;
215 u_int32_t rv = 0;
217 do {
218 iddp = iddhp->idh_desc_va;
219 IDDP_SANITY(idcp, iddp);
220 rv |= idma_desc_read(&iddp->idd_ctl);
221 } while ((iddhp = iddhp->idh_next) != 0);
223 rv &= (IDMA_DESC_CTL_OWN|IDMA_DESC_CTL_TERM);
225 return rv;
228 #endif /* IDMA_COHERENT */
231 STATIC void idma_attach (device_t, device_t, void *);
232 STATIC int idma_match (device_t, cfdata_t, void *);
233 STATIC void idma_chan_init
234 (idma_softc_t *, idma_chan_t *, unsigned int);
235 STATIC void idma_arb_init(idma_softc_t *);
236 STATIC void idma_dmamem_free(idma_softc_t *, idma_dmamem_t *);
237 STATIC int idma_dmamem_alloc
238 (idma_softc_t *, idma_dmamem_t *, int, size_t sz);
239 STATIC void idma_qstart
240 (idma_softc_t *, idma_chan_t *, unsigned int);
241 STATIC void idma_start_subr
242 (idma_softc_t *, idma_chan_t *, unsigned int, idma_desch_t *);
243 STATIC void idma_retry
244 (idma_softc_t *, idma_chan_t *, const u_int, idma_desch_t *);
245 STATIC void idma_done
246 (idma_softc_t *, idma_chan_t *, const u_int, idma_desch_t *, u_int32_t);
247 STATIC int idma_intr0_1 (void *);
248 STATIC int idma_intr2_3 (void *);
249 STATIC int idma_intr4_5 (void *);
250 STATIC int idma_intr6_7 (void *);
251 STATIC int idma_intr_comm
252 (idma_softc_t *, unsigned int, unsigned int, unsigned int, u_int32_t, char *);
254 STATIC void idma_print_active
255 (idma_softc_t *, unsigned int, idma_desch_t *);
258 struct cfattach idma_ca = {
259 sizeof(struct idma_softc), idma_match, idma_attach
262 extern struct cfdriver idma_cd;
264 idma_softc_t *idma_sc = 0;
266 STATIC int
267 idma_match(
268 device_t const parent,
269 cfdata_t const self,
270 void *const aux)
272 struct gt_attach_args * const ga = (struct gt_attach_args *)aux;
274 if (strcmp(ga->ga_name, idma_cd.cd_name) != 0)
275 return 0;
277 return 1;
280 STATIC void
281 idma_attach(
282 device_t const parent,
283 device_t const self,
284 void *const aux)
286 struct gt_softc * const gtsc = device_private(parent);
287 idma_softc_t * const sc = device_private(self);
288 struct gt_attach_args * const ga = aux;
289 unsigned int i;
290 void *ih;
291 const char *fmt = "%s: couldn't establish irq %d\n";
293 idma_sc = sc;
294 sc->idma_gt = gtsc;
295 sc->idma_bustag = ga->ga_memt; /* XXX */
296 sc->idma_dmatag = ga->ga_dmat;
297 sc->idma_bushandle = 0; /* XXX */
298 sc->idma_reg_base = IDMA_CNT_REG_BASE;
299 sc->idma_reg_size = 0x100;
300 sc->idma_ien = 0;
301 sc->idma_callout_state = 0;
302 callout_init(&sc->idma_callout, 0);
304 for (i=0; i < NIDMA_CHANS; i++)
305 idma_chan_init(sc, &sc->idma_chan[i], i);
307 idma_arb_init(sc);
308 printf("\n");
310 ih = intr_establish(IRQ_IDMA0_1, IST_LEVEL, IPL_IDMA, idma_intr0_1, sc);
311 if (ih == NULL) {
312 printf(fmt, IRQ_IDMA0_1);
313 return;
315 sc->idma_ih[0] = ih;
317 ih = intr_establish(IRQ_IDMA2_3, IST_LEVEL, IPL_IDMA, idma_intr2_3, sc);
318 if (ih == NULL) {
319 printf(fmt, IRQ_IDMA2_3);
320 return;
322 sc->idma_ih[1] = ih;
324 ih = intr_establish(IRQ_IDMA4_5, IST_LEVEL, IPL_IDMA, idma_intr4_5, sc);
325 if (ih == NULL) {
326 printf(fmt, IRQ_IDMA4_5);
327 return;
329 sc->idma_ih[2] = ih;
331 ih = intr_establish(IRQ_IDMA6_7, IST_LEVEL, IPL_IDMA, idma_intr6_7, sc);
332 if (ih == NULL) {
333 printf(fmt, IRQ_IDMA6_7);
334 return;
336 sc->idma_ih[3] = ih;
339 printf("%s: irpt at irqs %d, %d, %d, %d\n", device_xname(&sc->idma_dev),
340 IRQ_IDMA0_1, IRQ_IDMA2_3, IRQ_IDMA4_5, IRQ_IDMA6_7);
341 #ifdef IDMA_ABORT_TEST
342 printf("%s: CAUTION: IDMA_ABORT_TEST enabled\n",
343 device_xname(&sc->idma_dev));
344 #endif
349 * idma_chan_init - init soft channel state && disable the channel
351 STATIC void
352 idma_chan_init(
353 idma_softc_t * const sc,
354 idma_chan_t * const idcp,
355 const unsigned int chan)
357 u_int32_t r;
358 unsigned int s;
360 DPRINTF(("idma_chan_init %d\n", chan));
361 s = splidma();
363 memset(idcp, 0, sizeof(idma_chan_t));
364 idcp->idc_state = IDC_FREE;
365 idcp->idc_sc = sc;
366 idcp->idc_chan = chan;
367 idcp->idc_done_count = 0;
368 idcp->idc_abort_count = 0;
370 r = 0;
371 gt_write(&sc->idma_gt->gt_dev, IDMA_CTLHI_REG(chan), r);
372 DPRINTFN(2, ("idma_chan_init: 0x%x <-- 0x%x\n",
373 IDMA_CTLHI_REG(chan), r));
374 gt_write(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan), r);
375 DPRINTFN(2, ("idma_chan_init: 0x%x <-- 0x%x\n",
376 IDMA_CTLLO_REG(chan), r));
378 splx(s);
382 * idma_arb_init - configure the IDMA arbitor
384 STATIC void
385 idma_arb_init(idma_softc_t * const sc)
387 u_int32_t r;
388 unsigned int s;
390 DPRINTF(("idma_arb_init %p\n", sc));
391 s = splidma();
394 * all channels arbitrate equaly
396 r = 0x32103210;
397 gt_write(&sc->idma_gt->gt_dev, IDMA_ARB_REG(0), r);
398 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_ARB_REG(0), r));
399 gt_write(&sc->idma_gt->gt_dev, IDMA_ARB_REG(1), r);
400 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_ARB_REG(1), r));
403 * enable cross bar timeout, w/ max timeout value
405 r = 0x000100ff;
406 gt_write(&sc->idma_gt->gt_dev, IDMA_XTO_REG(0), r);
407 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_XTO_REG(0), r));
408 gt_write(&sc->idma_gt->gt_dev, IDMA_XTO_REG(1), r);
409 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_XTO_REG(1), r));
411 splx(s);
414 void
415 idma_chan_free(idma_chan_t * const idcp)
417 idma_softc_t *sc = idcp->idc_sc;
418 unsigned int chan = idcp->idc_chan;
420 DPRINTF(("idma_chan_free %d\n", chan));
421 KASSERT(cpl >= IPL_IDMA);
422 KASSERT(sc == idma_sc);
423 KASSERT(chan < NIDMA_CHANS);
424 KASSERT(idcp->idc_state != IDC_FREE);
426 idma_intr_dis(idcp);
427 idma_dmamem_free(sc, &idcp->idc_desc_mem);
428 free(idcp->idc_desch, M_DEVBUF);
429 idma_chan_init(sc, idcp, chan);
432 idma_chan_t *
433 idma_chan_alloc(
434 const unsigned int ndesc,
435 int (* const callback)(void *, idma_desch_t *, u_int32_t),
436 void *const arg)
438 idma_softc_t * const sc = idma_sc; /* XXX */
439 idma_chan_t *idcp;
440 idma_desch_t *iddhp;
441 idma_desch_t *iddhp_next;
442 idma_desc_t *iddp_va;
443 idma_desc_t *iddp_pa;
444 u_int32_t r;
445 size_t sz;
446 int err;
447 int i;
448 unsigned int s;
449 STATIC void idma_time(void *);
451 DPRINTF(("idma_chan_alloc %d %p %p\n", ndesc, callback, arg));
452 KASSERT(ndesc >= 0);
454 idcp = 0;
455 s = splidma();
456 for (i=0; i < NIDMA_CHANS; i++) {
457 if (sc->idma_chan[i].idc_state == IDC_FREE) {
458 idcp = &sc->idma_chan[i];
459 idcp->idc_state = IDC_ALLOC;
460 break;
463 splx(s);
464 if (idcp == 0)
465 return idcp;
466 KASSERT(idcp->idc_sc == sc);
469 * allocate descriptor handles
471 sz = ndesc * sizeof(idma_desch_t);
472 iddhp = (idma_desch_t *)malloc(sz, M_DEVBUF, M_NOWAIT);
473 idcp->idc_desch = iddhp;
474 if (iddhp == 0) {
475 DIAGPRF(("idma_chan_alloc: cannot malloc 0x%x\n", sz));
476 idma_chan_init(sc, idcp, idcp->idc_chan);
477 return 0;
481 * allocate descriptors
483 sz = ndesc * sizeof(idma_desc_t);
484 err = idma_dmamem_alloc(sc, &idcp->idc_desc_mem, 1, sz);
485 if (err) {
486 DIAGPRF(("idma_chan_alloc: cannot idma_dmamem_alloc 0x%x\n",
487 sz));
488 idma_chan_free(idcp);
489 return 0;
493 * clear descriptors (sanity)
494 * initialize descriptor handles
495 * link descriptors to descriptor handles
496 * link the descriptors in a circular chain using phys addr
497 * link the descriptor handles in a circular chain
499 iddp_va = (idma_desc_t *)
500 idcp->idc_desc_mem.idm_map->dm_segs[0].ds_vaddr;
501 iddp_pa = (idma_desc_t *)
502 idcp->idc_desc_mem.idm_map->dm_segs[0].ds_addr;
503 KASSERT((((unsigned)iddp_va) & (sizeof(idma_desc_t) - 1)) == 0);
504 KASSERT((((unsigned)iddp_pa) & (sizeof(idma_desc_t) - 1)) == 0);
505 DPRINTFN(2, ("idma_chan_alloc: descriptors at %p/%p, handles at %p\n",
506 iddp_va, iddp_pa, idcp->idc_desch));
507 memset(iddp_va, 0, sz);
508 iddhp_next = iddhp + 1;
509 for (i=0; i < ndesc; i++) {
510 iddhp->idh_state = IDH_FREE;
511 iddhp->idh_next = iddhp_next;
512 iddhp->idh_chan = idcp;
513 iddhp->idh_desc_va = iddp_va++;
514 iddhp->idh_desc_pa = iddp_pa++;
515 iddp_va->idd_next = 0;
516 iddhp_next++;
517 iddhp++;
519 --iddhp;
520 --iddp_va;
521 IDDP_SANITY(idcp, iddp_va);
522 iddhp->idh_next = idcp->idc_desch;
523 idcp->idc_desch_free = idcp->idc_desch;
524 iddp_va->idd_next = 0;
527 * configure IDMA channel control hi
529 r = IDMA_CTLHI_SRCPCISWAP_NONE|IDMA_CTLHI_DSTPCISWAP_NONE
530 |IDMA_CTLHI_NXTPCISWAP_NONE;
532 gt_write(&sc->idma_gt->gt_dev, IDMA_CTLHI_REG(idcp->idc_chan), r);
533 DPRINTFN(2, ("idma_chan_alloc: 0x%x <-- 0x%x\n",
534 IDMA_CTLHI_REG(idcp->idc_chan), r));
537 * finish initializing the channel
539 idcp->idc_callback = callback;
540 idcp->idc_arg = arg;
541 idcp->idc_q.idq_depth = 0;
542 SIMPLEQ_INIT(&idcp->idc_q.idq_q);
543 idcp->idc_ndesch = ndesc;
544 idcp->idc_state |= IDC_IDLE;
545 idcp->idc_active = 0;
546 idma_intr_enb(idcp);
548 if (! atomic_exch(&sc->idma_callout_state, 1))
549 callout_reset(&sc->idma_callout, hz, idma_time, sc);
551 return idcp;
554 STATIC void
555 idma_dmamem_free(idma_softc_t * const sc, idma_dmamem_t * const idmp)
557 DPRINTF(("idma_dmamem_free %p %p\n", sc, idmp));
558 if (idmp->idm_map)
559 bus_dmamap_destroy(sc->idma_dmatag, idmp->idm_map);
560 if (idmp->idm_kva)
561 bus_dmamem_unmap(sc->idma_dmatag, idmp->idm_kva,
562 idmp->idm_size);
563 if (idmp->idm_nsegs > 0)
564 bus_dmamem_free(sc->idma_dmatag, idmp->idm_segs,
565 idmp->idm_nsegs);
566 idmp->idm_map = NULL;
567 idmp->idm_kva = NULL;
568 idmp->idm_nsegs = 0;
572 STATIC int
573 idma_dmamem_alloc(
574 idma_softc_t * const sc,
575 idma_dmamem_t * const idmp,
576 const int maxsegs,
577 const size_t sz)
579 int error = 0;
581 DPRINTF(("idma_dmamem_alloc %p %p %d %d\n", sc, idmp, maxsegs, sz));
582 idmp->idm_size = sz;
583 idmp->idm_maxsegs = maxsegs;
585 error = bus_dmamem_alloc(sc->idma_dmatag, idmp->idm_size, PAGE_SIZE,
586 IDMA_BOUNDARY, idmp->idm_segs, idmp->idm_maxsegs,
587 &idmp->idm_nsegs, BUS_DMA_NOWAIT);
588 if (error) {
589 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamem_alloc\n"));
590 goto fail;
592 DPRINTFN(2, ("idma_dmamem_alloc: bus_dmamem_alloc ret idm_nsegs %d\n",
593 idmp->idm_nsegs));
594 KASSERT(idmp->idm_nsegs == 1);
596 error = bus_dmamem_map(sc->idma_dmatag, idmp->idm_segs, idmp->idm_nsegs,
597 idmp->idm_size, &idmp->idm_kva, BUS_DMA_NOWAIT);
598 if (error) {
599 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamem_map\n"));
600 goto fail;
602 KASSERT((((unsigned)(idmp->idm_kva)) & 0x1f) == 0);
603 /* enforce CACHELINESIZE alignment */
605 error = bus_dmamap_create(sc->idma_dmatag, idmp->idm_size,
606 idmp->idm_nsegs, idmp->idm_size, IDMA_BOUNDARY,
607 BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &idmp->idm_map);
608 if (error) {
609 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamap_create\n"));
610 goto fail;
613 error = bus_dmamap_load(sc->idma_dmatag, idmp->idm_map, idmp->idm_kva,
614 idmp->idm_size, NULL, BUS_DMA_NOWAIT);
615 if (error) {
616 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamap_load\n"));
619 #ifdef DEBUG
620 if (idmadebug >= 2) {
621 int seg;
623 for (seg = 0; seg < idmp->idm_map->dm_nsegs; seg++) {
624 DPRINTFN(2, ("idma_dmamem_alloc: "
625 "seg %d sz %ld va %lx pa %#lx\n",
626 seg, idmp->idm_map->dm_segs[seg].ds_len,
627 idmp->idm_map->dm_segs[seg].ds_vaddr,
628 idmp->idm_map->dm_segs[seg].ds_addr));
631 #endif
634 fail:
635 if (error) {
636 idma_dmamem_free(sc, idmp);
638 return error;
642 * idma_intr_enb - enable IDMA irpts for given chan
644 void
645 idma_intr_enb(idma_chan_t * const idcp)
647 idma_softc_t * const sc = idcp->idc_sc;
648 const unsigned int chan = idcp->idc_chan;
649 u_int32_t ibits;
651 DPRINTF(("idma_intr_enb %p chan %d\n", idcp, chan));
652 KASSERT(cpl >= IPL_IDMA);
653 KASSERT(sc == idma_sc);
654 KASSERT(chan < NIDMA_CHANS);
656 ibits = IDMA_MASK(chan, IDMA_INTR_BITS);
657 sc->idma_ien |= ibits;
660 * clear existing irpts for chan
662 gt_write(&sc->idma_gt->gt_dev, IDMA_CAUSE_REG(chan),
663 (sc->idma_ien & ~ibits));
664 DPRINTFN(2, ("idma_intr_enb: 0x%x <-- 0x%x\n", IDMA_CAUSE_REG(chan),
665 (sc->idma_ien & ~ibits)));
668 * set new mask
670 gt_write(&sc->idma_gt->gt_dev, IDMA_MASK_REG(chan), sc->idma_ien);
671 DPRINTFN(2, ("idma_intr_enb: 0x%x <-- 0x%x\n", IDMA_MASK_REG(chan),
672 sc->idma_ien));
676 * idma_intr_dis - disable IDMA irpts for given chan
678 void
679 idma_intr_dis(idma_chan_t *idcp)
681 idma_softc_t * const sc = idcp->idc_sc;
682 const unsigned int chan = idcp->idc_chan;
683 unsigned int shift;
685 DPRINTF(("idma_intr_dis %p chan %d\n", idcp, chan));
686 KASSERT(cpl >= IPL_IDMA);
687 KASSERT(sc == idma_sc);
688 KASSERT(chan < NIDMA_CHANS);
690 shift = IDMA_INTR_SHIFT * ((chan < 4) ? chan : (chan - 4));
691 sc->idma_ien &= ~(IDMA_INTR_BITS << shift);
694 * set new mask
696 gt_write(&sc->idma_gt->gt_dev, IDMA_MASK_REG(chan), sc->idma_ien);
697 DPRINTFN(2, ("idma_intr_dis: 0x%x <-- 0x%x\n", IDMA_MASK_REG(chan),
698 sc->idma_ien));
702 * idma_desch_free - free the descriptor handle
704 void
705 idma_desch_free(idma_desch_t * const iddhp)
707 idma_desch_t *iddhp_next;
708 idma_chan_t *idcp = iddhp->idh_chan;
709 #ifdef DEBUG
710 idma_desc_t *iddp;
711 #endif
713 DPRINTFN(2, ("idma_desch_free %p\n", iddhp));
714 KASSERT(cpl >= IPL_IDMA);
715 KASSERT(iddhp->idh_state != IDH_FREE);
716 KASSERT(iddhp->idh_state != IDH_QWAIT);
717 KASSERT(iddhp->idh_state != IDH_PENDING);
718 KASSERT(iddhp != 0);
719 if (iddhp == 0)
720 return;
722 #ifdef DEBUG
723 iddp = iddhp->idh_desc_va;
724 KASSERT(iddp->idd_next == 0); /* use idma_desch_list_free */
725 idma_desc_write(&iddp->idd_next, 0);
726 #endif
728 iddhp_next = iddhp + 1;
729 if (iddhp_next >= &idcp->idc_desch[ idcp->idc_ndesch ])
730 iddhp_next = &idcp->idc_desch[ 0 ];
731 iddhp->idh_next = iddhp_next;
732 iddhp->idh_aux = 0;
733 iddhp->idh_state = IDH_FREE;
737 * idma_desch_alloc - allocate the next free descriptor handle in the chain
739 idma_desch_t *
740 idma_desch_alloc(idma_chan_t * const idcp)
742 idma_desch_t *iddhp;
744 DPRINTFN(2, ("idma_desch_alloc %p\n", idcp));
745 KASSERT(cpl >= IPL_IDMA);
747 iddhp = idcp->idc_desch_free;
748 DPRINTFN(2, ("idma_desch_alloc: "
749 "idc_desch_free %p iddhp %p idh_state %d\n",
750 idcp->idc_desch_free, iddhp, iddhp->idh_state));
751 if (iddhp->idh_state != IDH_FREE)
752 return 0;
754 KASSERT(iddhp->idh_next != 0);
755 idcp->idc_desch_free = iddhp->idh_next;
756 iddhp->idh_next = 0;
757 iddhp->idh_state = IDH_ALLOC;
759 return iddhp;
763 * idma_desch_list_free - free the descriptor handle list
765 void
766 idma_desch_list_free(idma_desch_t * iddhp)
768 idma_desch_t *iddhp_tail;
769 idma_chan_t * const idcp = iddhp->idh_chan;
771 DPRINTFN(2, ("idma_desch_list_free %p\n", iddhp));
772 KASSERT(cpl >= IPL_IDMA);
773 KASSERT(iddhp != 0);
774 if (iddhp == 0)
775 return;
777 do {
778 idma_desc_write(&iddhp->idh_desc_va->idd_next, 0);
779 iddhp->idh_aux = 0;
780 iddhp->idh_state = IDH_FREE;
781 iddhp_tail = iddhp;
782 iddhp = iddhp->idh_next;
783 DPRINTFN(2, ("idma_desch_list_free: next iddhp %p\n", iddhp));
784 KASSERT((iddhp == 0) || (iddhp == (iddhp_tail + 1))
785 || ((iddhp_tail == &idcp->idc_desch[idcp->idc_ndesch-1])
786 && (iddhp == &idcp->idc_desch[0])));
787 } while (iddhp);
789 iddhp = iddhp_tail + 1;
790 if (iddhp >= &idcp->idc_desch[ idcp->idc_ndesch ])
791 iddhp = &idcp->idc_desch[ 0 ];
792 iddhp_tail->idh_next = iddhp;
796 * idma_desch_list_alloc - allocate `n' linked descriptor handles
798 idma_desch_t *
799 idma_desch_list_alloc(idma_chan_t * const idcp, unsigned int n)
801 idma_desch_t *iddhp_head;
802 idma_desch_t *iddhp_tail;
803 idma_desch_t *iddhp;
804 idma_desc_t *iddp_prev = 0;
806 DPRINTFN(2, ("idma_desch_list_alloc %p %d\n", idcp, n));
807 KASSERT(cpl >= IPL_IDMA);
808 if (n == 0)
809 return 0;
811 iddhp_head = iddhp_tail = iddhp = idcp->idc_desch_free;
812 KASSERT(iddhp_head != 0);
813 do {
814 if (iddhp->idh_state != IDH_FREE) {
815 DPRINTFN(2, ("idma_desch_list_alloc: "
816 "n %d iddhp %p idh_state %d, bail\n",
817 n, iddhp, iddhp->idh_state));
818 iddhp_tail->idh_next = 0;
819 idma_desch_list_free(iddhp_head);
820 return 0;
822 iddhp->idh_state = IDH_ALLOC;
824 if (iddp_prev != 0)
825 idma_desc_write(&iddp_prev->idd_next,
826 (u_int32_t)iddhp->idh_desc_pa);
827 iddp_prev = iddhp->idh_desc_va;
829 iddhp_tail = iddhp;
830 iddhp = iddhp->idh_next;
831 KASSERT(iddhp != 0);
832 DPRINTFN(2, ("idma_desch_list_alloc: iddhp %p iddhp_tail %p\n",
833 iddhp, iddhp_tail));
834 KASSERT((iddhp == (iddhp_tail + 1))
835 || ((iddhp_tail == &idcp->idc_desch[idcp->idc_ndesch-1])
836 && (iddhp == &idcp->idc_desch[0])));
837 } while (--n);
839 idma_desc_write(&iddp_prev->idd_next, 0);
840 iddhp_tail->idh_next = 0;
841 idcp->idc_desch_free = iddhp;
843 return iddhp_head;
846 #if defined(DEBUG)
847 STATIC void
848 idma_intr_check(idma_softc_t *sc, u_int chan)
850 extern volatile imask_t ipending;
851 extern volatile imask_t imen;
852 extern unsigned int gtbase;
853 u_int reg;
854 u_int irq = (chan >> 1) + 4;
855 u_int32_t r;
856 u_int32_t irqbit = 1 << irq;
857 u_int mask;
859 printf("chan %d IRQ %d, ", chan, irq);
861 reg = 0xc18;
862 r = gt_read(&sc->idma_gt->gt_dev, reg);
863 r &= irqbit;
864 printf("MIC %s, ", (r == 0) ? "clr" : "set");
866 reg = 0xc1c;
867 r = gt_read(&sc->idma_gt->gt_dev, reg);
868 r &= irqbit;
869 printf("CIM %s, ", (r == 0) ? "clr" : "set");
871 r = ipending[IMASK_ICU_LO];
872 r &= irqbit;
873 printf("ipending %s, ", (r == 0) ? "clr" : "set");
875 r = imen[IMASK_ICU_LO];
876 r &= irqbit;
877 printf("imen %s, ", (r == 0) ? "clr" : "set");
879 mask = IDMA_MASK(chan, IDMA_MASK_BITS);
880 reg = IDMA_CAUSE_REG(chan);
881 r = gt_read(&sc->idma_gt->gt_dev, reg);
882 r &= mask;
883 printf("cause reg %#x mask %#x bits %#x (%#x), ",
884 reg, mask, r, r & mask);
886 mask = IDMA_MASK(chan, IDMA_MASK_BITS);
887 reg = IDMA_MASK_REG(chan);
888 r = gt_read(&sc->idma_gt->gt_dev, reg);
889 r &= mask;
890 printf("mask reg %#x mask %#x bits %#x (%#x)\n",
891 reg, mask, r, r & mask);
893 #if defined(DDB) && 0
894 Debugger();
895 #endif
897 #endif /* DEBUG */
899 void
900 idma_abort(idma_desch_t *iddhp, unsigned int flags, const char *str)
902 idma_desc_t *iddp;
903 idma_chan_t * const idcp = iddhp->idh_chan;
904 idma_softc_t *sc;
905 unsigned int chan;
906 u_int32_t sts;
907 u_int32_t r;
908 unsigned int try;
909 idma_desch_t *iddhp_tmp;
910 int want_abort;
912 sc = idcp->idc_sc;
913 KASSERT(sc == idma_sc);
914 chan = idcp->idc_chan;
916 idcp->idc_abort_count++;
918 #ifndef IDMA_ABORT_TEST
919 DPRINTF(("idma_abort: chan %d, desc %p, reason: \"%s\", count %ld\n",
920 chan, iddhp, str, idcp->idc_abort_count));
921 DPRINTF(("idma_abort: xfers: %lu, aborts %lu\n",
922 idcp->idc_done_count,
923 idcp->idc_abort_count));
925 KASSERT(cpl >= IPL_IDMA);
926 KASSERT(iddhp != 0);
928 if (idcp == 0) {
929 DIAGPRF(("idma_abort: idh_chan NULL\n"));
930 return;
932 KASSERT(idcp->idc_callback != 0);
933 if (idcp->idc_active != iddhp) {
934 DPRINTF(("idma_abort: not pending\n"));
935 return;
937 #endif
939 idcp->idc_active = NULL;
940 iddhp->idh_state = IDH_ABORT;
942 sts = IDMA_LIST_SYNC_POST(idcp, iddhp);
943 r = gt_read(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan));
944 DPRINTF(("idma_abort: channel %s\n",
945 ((r & IDMA_CTLLO_ACTIVE) == 0) ? "idle" : "active"));
946 #ifdef DEBUG
947 idma_print_active(sc, chan, iddhp);
948 #endif
949 switch (sts) {
950 case 0:
951 if ((r & IDMA_CTLLO_ACTIVE) == 0) {
952 DIAGPRF(("idma_abort: transfer done, no irpt\n"));
953 if ((flags & IDMA_ABORT_CANCEL) == 0) {
954 #if defined(DEBUG)
955 idma_intr_check(sc, chan);
956 #endif
957 idma_done(sc, idcp, chan, iddhp, 1);
959 return;
960 } else {
961 DIAGPRF(("idma_abort: transfer done, hung\n"));
963 break;
964 case IDMA_DESC_CTL_OWN:
965 DIAGPRF(("idma_abort: transfer pending, hung\n"));
966 break;
967 case IDMA_DESC_CTL_TERM:
968 DIAGPRF(("idma_abort: transfer done, terminated, no irpt?\n"));
969 break;
970 case (IDMA_DESC_CTL_OWN|IDMA_DESC_CTL_TERM):
971 DIAGPRF(("idma_abort: transfer pending, terminated, hung\n"));
972 break;
975 if ((r & IDMA_CTLLO_ACTIVE) != 0) {
976 DPRINTF(("idma_abort: channel active, aborting...\n"));
978 r |= IDMA_CTLLO_ABORT;
979 gt_write(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan), r);
980 DPRINTFN(2, ("idma_abort: 0x%x <-- 0x%x\n",
981 IDMA_CTLLO_REG(chan), r));
983 for (try = 0; try < 100; try++) {
985 DELAY(1);
987 r = gt_read(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan));
988 DPRINTFN(2, ("idma_abort: 0x%x --> 0x%x\n",
989 IDMA_CTLLO_REG(chan), r));
991 if ((r & (IDMA_CTLLO_ABORT|IDMA_CTLLO_ACTIVE)) == 0)
992 break;
995 DPRINTFN(2, ("idma_abort: tries %d\n", try));
997 if (try >= 100)
998 panic("%s: idma_abort %p failed\n",
999 device_xname(&sc->idma_dev), iddhp);
1002 if ((flags & IDMA_ABORT_CANCEL) == 0)
1003 idma_retry(sc, idcp, chan, iddhp);
1006 void
1007 idma_qflush(idma_chan_t * const idcp)
1009 idma_desch_t *iddhp;
1011 DPRINTF(("idma_qflush %p\n", idcp));
1012 KASSERT(cpl >= IPL_IDMA);
1014 while ((iddhp = SIMPLEQ_FIRST(&idcp->idc_q.idq_q)) != NULL) {
1015 SIMPLEQ_REMOVE_HEAD(&idcp->idc_q.idq_q, iddhp, idh_q);
1016 KASSERT(iddhp->idh_state == IDH_QWAIT);
1017 iddhp->idh_state = IDH_CANCEL;
1020 idcp->idc_q.idq_depth = 0;
1024 idma_start(idma_desch_t * const iddhp)
1026 u_int32_t ctl;
1027 idma_desch_t *iddhp_tmp = iddhp;
1028 idma_chan_t * const idcp = iddhp->idh_chan;
1029 idma_softc_t * const sc = idcp->idc_sc;
1030 const unsigned int chan = idcp->idc_chan;
1031 idma_desc_t *iddp;
1033 DPRINTFN(2, ("idma_start %p\n", iddhp));
1034 KASSERT(cpl >= IPL_IDMA);
1035 KASSERT(sc == idma_sc);
1036 KASSERT(idcp->idc_callback != 0);
1038 iddp = iddhp->idh_desc_va;
1039 IDDP_SANITY(idcp, iddp);
1041 do {
1042 iddhp_tmp->idh_state = IDH_QWAIT;
1043 iddp = iddhp_tmp->idh_desc_va;
1044 ctl = idma_desc_read(&iddp->idd_ctl);
1045 ctl &= IDMA_DESC_CTL_CNT;
1048 * "The Burst Limit must be smaller than the IDMA byte count."
1049 * Ensure the transfer crosses a IDMA_BURST_SIZE boundary.
1051 if (ctl <= IDMA_BURST_SIZE)
1052 ctl = IDMA_BURST_SIZE + sizeof(u_int32_t);
1054 ctl |= IDMA_DESC_CTL_OWN;
1055 idma_desc_write(&iddp->idd_ctl, ctl);
1056 } while ((iddhp_tmp = iddhp_tmp->idh_next) != 0);
1058 SIMPLEQ_INSERT_TAIL(&idcp->idc_q.idq_q, iddhp, idh_q);
1059 idcp->idc_q.idq_depth++;
1061 if (idcp->idc_active == 0)
1062 idma_qstart(sc, idcp, chan);
1063 #ifdef DEBUG
1064 else
1065 DPRINTFN(2, ("idma_start: ACTIVE\n"));
1066 #endif
1068 return 1;
1071 STATIC void
1072 idma_qstart(
1073 idma_softc_t * const sc,
1074 idma_chan_t * const idcp,
1075 const unsigned int chan)
1077 idma_desch_t *iddhp;
1079 DPRINTFN(2, ("idma_qstart %p %p %d\n", sc, idcp, chan));
1080 KASSERT(cpl >= IPL_IDMA);
1081 KASSERT(idcp->idc_active == 0);
1083 if ((iddhp = SIMPLEQ_FIRST(&idcp->idc_q.idq_q)) != NULL) {
1084 SIMPLEQ_REMOVE_HEAD(&idcp->idc_q.idq_q, iddhp, idh_q);
1085 KASSERT(iddhp->idh_state == IDH_QWAIT);
1086 idcp->idc_q.idq_depth--;
1087 idma_start_subr(sc, idcp, chan, iddhp);
1089 #ifdef DEBUG
1090 else
1091 DPRINTFN(2, ("idma_qstart: EMPTY\n"));
1092 #endif
1096 STATIC void
1097 idma_start_subr(
1098 idma_softc_t * const sc,
1099 idma_chan_t * const idcp,
1100 const unsigned int chan,
1101 idma_desch_t * const iddhp)
1103 u_int32_t r;
1105 KASSERT(cpl >= IPL_IDMA);
1106 KASSERT(iddhp->idh_state != IDH_FREE);
1107 KASSERT(iddhp->idh_state != IDH_PENDING);
1108 KASSERT(iddhp->idh_state != IDH_DONE);
1109 KASSERT(iddhp->idh_state != IDH_CANCEL);
1110 KASSERT(iddhp->idh_state != IDH_ABORT);
1111 KASSERT(iddhp->idh_aux != 0);
1112 DPRINTFN(2, ("idma_start_subr %p %p %d %p\n", sc, idcp, chan, iddhp));
1114 #ifdef DIAGNOSTIC
1115 r = gt_read(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan));
1116 DPRINTFN(2, ("idma_start_subr: 0x%x --> 0x%x\n",
1117 IDMA_CTLLO_REG(chan), r));
1118 if ((r & IDMA_CTLLO_ACTIVE) != 0) {
1119 printf("idma_start_subr: IDMA_CTLLO_ACTIVE\n");
1120 idma_print_active(sc, chan, idcp->idc_active);
1121 #if defined(DEBUG) && defined(DDB)
1122 if (idmadebug > 1)
1123 Debugger();
1124 #endif
1126 KASSERT((r & IDMA_CTLLO_ACTIVE) == 0);
1127 #endif
1129 iddhp->tb = _mftb();
1130 DPRINTFN(8, ("dma_start_subr: tb %lld\n", iddhp->tb));
1132 IDMA_LIST_SYNC_PRE(idcp, iddhp);
1134 iddhp->idh_state = IDH_PENDING;
1135 idcp->idc_active = iddhp;
1137 gt_write(&sc->idma_gt->gt_dev, IDMA_NXT_REG(chan),
1138 (u_int32_t)iddhp->idh_desc_pa);
1139 DPRINTFN(2, ("idma_start_subr: 0x%x <-- 0x%x\n", IDMA_NXT_REG(chan),
1140 (u_int32_t)iddhp->idh_desc_pa));
1142 r = IDMA_CTLLO_DFLT;
1143 #ifdef NOTYET
1144 r |= iddhp->idh_hold;
1145 #endif
1146 gt_write(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan), r);
1147 (void)gt_read(&sc->idma_gt->gt_dev, IDMA_CTLLO_REG(chan)); /* R.A.W. */
1149 #ifdef IDMA_ABORT_TEST
1151 static unsigned int want_abort = 0;
1153 want_abort ^= 1;
1154 if (want_abort) {
1155 idma_abort(iddhp, 0, "test abort");
1158 #endif
1162 * idma_retry - re-start a botched transfer
1164 STATIC void
1165 idma_retry(
1166 idma_softc_t * const sc,
1167 idma_chan_t * const idcp,
1168 const unsigned int chan,
1169 idma_desch_t * const iddhp)
1171 idma_desch_t *iddhp_tmp = iddhp;
1172 idma_desc_t *iddp;
1173 u_int32_t ctl;
1175 DPRINTF(("idma_retry\n"));
1176 iddhp->idh_state = IDH_RETRY;
1177 iddhp_tmp = iddhp;
1178 do {
1179 iddp = iddhp_tmp->idh_desc_va;
1180 IDMA_CACHE_INVALIDATE((void *)iddp);
1181 IDDP_SANITY(idcp, iddp);
1182 ctl = idma_desc_read(&iddp->idd_ctl);
1183 ctl &= ~IDMA_DESC_CTL_TERM;
1184 ctl |= IDMA_DESC_CTL_OWN;
1185 idma_desc_write(&iddp->idd_ctl, ctl);
1186 } while ((iddhp_tmp = iddhp_tmp->idh_next) != 0);
1187 idma_start_subr(sc, idcp, chan, iddhp);
1191 * idma_done - complete a done transfer
1193 STATIC void
1194 idma_done(
1195 idma_softc_t * const sc,
1196 idma_chan_t * const idcp,
1197 const unsigned int chan,
1198 idma_desch_t * const iddhp,
1199 u_int32_t ccause)
1201 int (*callback)(void *, idma_desch_t *, u_int32_t);
1203 idcp->idc_active = NULL;
1204 idcp->idc_done_count++;
1205 iddhp->idh_state = IDH_DONE;
1206 idma_qstart(sc, idcp, chan);
1207 callback = idcp->idc_callback;
1208 if (callback == 0) {
1209 DIAGPRF(("%s: idma_done: chan %d no callback\n",
1210 device_xname(&sc->idma_dev), chan));
1211 idma_desch_free(iddhp);
1213 (*callback)(idcp->idc_arg, iddhp, ccause);
1216 STATIC int
1217 idma_intr0_1(void *const arg)
1219 unsigned int reg = IDMA_CAUSE_REG(0);
1220 unsigned int shift = IDMA_MASK_SHIFT(0);
1221 u_int32_t mask =
1222 IDMA_MASK(0, IDMA_MASK_BITS) | IDMA_MASK(1, IDMA_MASK_BITS);
1224 return idma_intr_comm((idma_softc_t *)arg, 0, reg, shift, mask, "0,1");
1227 STATIC int
1228 idma_intr2_3(void *const arg)
1230 unsigned int reg = IDMA_CAUSE_REG(2);
1231 unsigned int shift = IDMA_MASK_SHIFT(2);
1232 u_int32_t mask =
1233 IDMA_MASK(2, IDMA_MASK_BITS) | IDMA_MASK(3, IDMA_MASK_BITS);
1235 return idma_intr_comm((idma_softc_t *)arg, 2, reg, shift, mask, "2,3");
1238 STATIC int
1239 idma_intr4_5(void *const arg)
1241 unsigned int reg = IDMA_CAUSE_REG(4);
1242 unsigned int shift = IDMA_MASK_SHIFT(4);
1243 u_int32_t mask =
1244 IDMA_MASK(4, IDMA_MASK_BITS) | IDMA_MASK(5, IDMA_MASK_BITS);
1246 return idma_intr_comm((idma_softc_t *)arg, 4, reg, shift, mask, "4,5");
1249 STATIC int
1250 idma_intr6_7(void *const arg)
1252 unsigned int reg = IDMA_CAUSE_REG(6);
1253 unsigned int shift = IDMA_MASK_SHIFT(6);
1254 u_int32_t mask =
1255 IDMA_MASK(6, IDMA_MASK_BITS) | IDMA_MASK(7, IDMA_MASK_BITS);
1257 return idma_intr_comm((idma_softc_t *)arg, 6, reg, shift, mask, "6,7");
1260 STATIC int
1261 idma_intr_comm(
1262 idma_softc_t * const sc,
1263 unsigned int chan,
1264 unsigned int reg,
1265 unsigned int shift,
1266 u_int32_t mask,
1267 char * const str)
1269 u_int32_t rcause;
1270 u_int32_t ccause;
1271 idma_chan_t *idcp;
1272 idma_desch_t *iddhp;
1273 int limit;
1275 KASSERT(atomic_exch(idmalock, 1) == 0);
1276 KASSERT(cpl >= IPL_IDMA);
1277 KASSERT(sc == idma_sc);
1279 rcause = gt_read(&sc->idma_gt->gt_dev, reg);
1280 rcause &= mask;
1281 gt_write(&sc->idma_gt->gt_dev, reg, ~rcause);
1282 (void)gt_read(&sc->idma_gt->gt_dev, reg); /* R.A.W. */
1284 rcause &= ~IDMA_CAUSE_RES;
1285 DPRINTFN(2, ("idma_intr_comm: %s rcause 0x%x\n", str, rcause));
1286 if (rcause == 0) {
1287 KASSERT(atomic_exch(idmalock, 0) == 1);
1288 return 0;
1291 if (((rcause & mask) & IDMA_INTR_ALL_ERRS) != 0) {
1292 u_int32_t err_sel;
1293 u_int32_t err_addr;
1295 err_sel = gt_read(&sc->idma_gt->gt_dev, IDMA_ESEL_REG(chan));
1296 err_addr = gt_read(&sc->idma_gt->gt_dev, IDMA_EADDR_REG(chan));
1297 DIAGPRF(("idma_intr_comm: %s rcause 0x%x sel 0x%x addr 0x%x\n",
1298 str, rcause, err_sel, err_addr));
1299 #if defined(DEBUG) && defined(DDB)
1300 if (idmadebug > 8)
1301 Debugger();
1302 #endif
1305 rcause >>= shift;
1306 idcp = &sc->idma_chan[chan];
1307 limit = chan + 2;
1308 do {
1309 ccause = rcause & IDMA_INTR_BITS;
1310 rcause >>= IDMA_INTR_SHIFT;
1311 if (ccause == 0)
1312 goto next;
1314 iddhp = idcp->idc_active;
1315 if (iddhp == 0) {
1316 DIAGPRF(("%s: idma_intr_comm: chan %d ccause 0x%x"
1317 " idc_active == 0\n",
1318 device_xname(&sc->idma_dev),
1319 chan, ccause));
1320 idma_qstart(sc, idcp, chan);
1321 goto next;
1324 DPRINTFN(2, ("idma_intr_comm: idh_state %d\n",
1325 iddhp->idh_state));
1327 if (iddhp->idh_state == IDH_ABORT) {
1328 idma_retry(sc, idcp, chan, iddhp);
1329 goto next;
1332 KASSERT(iddhp->idh_state == IDH_PENDING);
1334 switch (IDMA_LIST_SYNC_POST(idcp, iddhp)) {
1335 case 0:
1336 break; /* normal completion */
1337 case IDMA_DESC_CTL_OWN:
1338 DIAGPRF(("%s: idma_intr_comm: chan %d "
1339 "descriptor OWN error, abort\n",
1340 device_xname(&sc->idma_dev), chan));
1341 idma_abort(iddhp, 0, "idma_intr_comm: OWN error");
1342 goto next;
1343 case IDMA_DESC_CTL_TERM:
1344 case (IDMA_DESC_CTL_OWN|IDMA_DESC_CTL_TERM):
1345 DIAGPRF(("%s: idma_intr_comm: chan %d "
1346 "transfer terminated, retry\n",
1347 device_xname(&sc->idma_dev), chan));
1348 idma_retry(sc, idcp, chan, iddhp);
1349 goto next;
1352 idma_done(sc, idcp, chan, iddhp, ccause);
1354 next:
1355 if (rcause == 0)
1356 break;
1357 chan++;
1358 idcp++;
1359 } while (chan < limit);
1361 KASSERT(atomic_exch(idmalock, 0) == 1);
1362 return 1;
1365 STATIC void
1366 idma_time(void *const arg)
1368 idma_softc_t * const sc = (idma_softc_t *)arg;
1369 idma_chan_t *idcp;
1370 u_int64_t now;
1371 u_int64_t dt;
1372 u_int64_t limit;
1373 unsigned int chan;
1374 unsigned int s;
1376 KASSERT((sc == idma_sc));
1377 s = splidma();
1378 if (atomic_add(&sc->idma_callout_state, 0)) {
1379 extern u_long tbhz;
1381 KASSERT(atomic_exch(idmalock, 2) == 0);
1382 now = _mftb();
1383 limit = tbhz >> 3; /* XXX 1/8 sec ??? */
1384 idcp = sc->idma_chan;
1385 for (chan=0; chan < NIDMA_CHANS; chan++) {
1386 if ((idcp->idc_state & IDC_ALLOC)
1387 && (idcp->idc_active != 0)) {
1388 dt = now - idcp->idc_active->tb;
1389 if (dt > limit) {
1390 DPRINTFN(8, ("idma_time: "
1391 "now %lld, tb %lld, dt %lld\n",
1392 now, idcp->idc_active->tb, dt));
1393 idma_abort(idcp->idc_active, 0,
1394 "timed out");
1397 idcp++;
1399 callout_reset(&sc->idma_callout, hz, idma_time, sc);
1400 KASSERT(atomic_exch(idmalock, 0) == 2);
1402 splx(s);
1405 STATIC void
1406 idma_print_active(
1407 idma_softc_t * const sc,
1408 const unsigned int chan,
1409 idma_desch_t *iddhp)
1411 idma_desc_t *iddp;
1412 u_int32_t cnt;
1413 u_int32_t src;
1414 u_int32_t dst;
1415 u_int32_t nxt;
1416 u_int32_t cur;
1418 cnt = gt_read(&sc->idma_gt->gt_dev, IDMA_CNT_REG(chan));
1419 src = gt_read(&sc->idma_gt->gt_dev, IDMA_SRC_REG(chan));
1420 dst = gt_read(&sc->idma_gt->gt_dev, IDMA_DST_REG(chan));
1421 nxt = gt_read(&sc->idma_gt->gt_dev, IDMA_NXT_REG(chan));
1422 cur = gt_read(&sc->idma_gt->gt_dev, IDMA_CUR_REG(chan));
1424 printf("%s: regs { %#x, %#x, %#x, %#x } current %#x\n",
1425 device_xname(&sc->idma_dev), cnt, src, dst, nxt, cur);
1427 do {
1428 iddp = iddhp->idh_desc_va;
1429 printf("%s: desc %p/%p { %#x, %#x, %#x, %#x }\n",
1430 device_xname(&sc->idma_dev),
1431 iddhp->idh_desc_va, iddhp->idh_desc_pa,
1432 idma_desc_read(&iddp->idd_ctl),
1433 idma_desc_read(&iddp->idd_src_addr),
1434 idma_desc_read(&iddp->idd_dst_addr),
1435 idma_desc_read(&iddp->idd_next));
1436 iddhp = iddhp->idh_next;
1437 } while (iddhp);