1 /* $NetBSD: gtidma.c,v 1.17 2009/05/12 12:18:45 cegger Exp $ */
4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
41 * idma.c - GT-63269 IDMA driver
43 * creation Wed Sep 26 23:54:00 PDT 2001 cliff
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: gtidma.c,v 1.17 2009/05/12 12:18:45 cegger Exp $");
51 #include "opt_allegro.h"
53 #include <sys/param.h>
54 #include <sys/device.h>
55 #include <sys/inttypes.h>
56 #include <sys/callout.h>
57 #include <sys/malloc.h>
59 #include <uvm/uvm_extern.h>
61 #include <machine/psl.h>
64 #include <machine/autoconf.h>
65 #include <powerpc/atomic.h>
67 #include <dev/marvell/gtreg.h>
68 #include <dev/marvell/gtvar.h>
69 #include <dev/marvell/gtintrreg.h>
70 #include <dev/marvell/idmareg.h>
71 #include <dev/marvell/idmavar.h>
78 # define DIAGPRF(x) printf x
86 # define DPRINTF(x) do { if (idmadebug) printf x ; } while (0)
87 # define DPRINTFN(n, x) do { if (idmadebug >= (n)) printf x ; } while (0)
89 # define STATIC static
91 # define DPRINTFN(n, x)
96 unsigned char idmalock
[CACHELINESIZE
]
97 __aligned(CACHELINESIZE
) = { 0 };
103 # define IDDP_SANITY(idcp, iddp) do { \
104 vaddr_t base = idcp->idc_desc_mem.idm_map->dm_segs[0].ds_vaddr; \
105 vaddr_t limit = base + idcp->idc_desc_mem.idm_map->dm_segs[0].ds_len; \
106 KASSERT((((unsigned)iddp) & (sizeof(idma_desc_t) - 1)) == 0); \
107 KASSERT((vaddr_t)iddp >= base); \
108 KASSERT((vaddr_t)iddp < limit); \
113 # define IDDP_SANITY(idcp, iddp)
119 * IDMA_BURST_SIZE comes from opt_idma.h for now...
122 #define IDMA_CTLLO_DFLT (IDMA_CTLL0_BURSTCODE(IDMA_BURST_SIZE) \
123 |IDMA_CTLLO_BLKMODE \
125 |IDMA_CTLLO_ENB|IDMA_CTLLO_FETCHND|IDMA_CTLLO_CDEN \
126 |IDMA_CTLLO_DESCMODE)
128 static inline u_int64_t
134 __asm
volatile ("1: mftbu %0; mftb %0+1; mftbu %1; cmpw 0,%0,%1; bne 1b"
135 : "=r"(tb
), "=r"(scratch
));
140 #ifndef IDMA_COHERENT
142 * inlines to flush, invalidate cache
143 * required if DMA cache coherency is broken
144 * only 1 cache line is affected, check your size & alignment
147 #define IDMA_CACHE_FLUSH(p) idma_cache_flush(p)
148 #define IDMA_CACHE_INVALIDATE(p) idma_cache_invalidate(p)
149 #define IDMA_LIST_SYNC_PRE(c, p) idma_list_sync_pre(c, p)
150 #define IDMA_LIST_SYNC_POST(c, p) idma_list_sync_post(c, p)
153 idma_cache_flush(void *p
)
155 KASSERT(((unsigned int)p
& (CACHELINESIZE
-1)) == 0);
156 __asm
volatile ("eieio; dcbf 0,%0; eieio; lwz %0,0(%0); sync;"
161 idma_cache_invalidate(void *const p
)
163 KASSERT(((unsigned int)p
& (CACHELINESIZE
-1)) == 0);
164 __asm
volatile ("eieio; dcbi 0,%0; sync;" :: "r"(p
));
168 idma_list_sync_pre(idma_chan_t
* const idcp
, idma_desch_t
* const iddhp
)
170 idma_desch_t
*iddhp_tmp
;
173 for(iddhp_tmp
= iddhp
; iddhp_tmp
!= 0; iddhp_tmp
= iddhp_tmp
->idh_next
){
174 iddp
= iddhp_tmp
->idh_desc_va
;
175 DPRINTFN(2, ("idma_list_sync_pre: "
176 "{ 0x%x, 0x%x, 0x%x, 0x%x }\n",
177 bswap32(iddp
->idd_ctl
),
178 bswap32(iddp
->idd_src_addr
),
179 bswap32(iddp
->idd_dst_addr
),
180 bswap32(iddp
->idd_next
)));
181 IDDP_SANITY(idcp
, iddp
);
182 IDMA_CACHE_FLUSH(iddhp_tmp
->idh_desc_va
);
186 static inline u_int32_t
187 idma_list_sync_post(idma_chan_t
* const idcp
, idma_desch_t
*iddhp
)
193 iddp
= iddhp
->idh_desc_va
;
194 IDMA_CACHE_INVALIDATE((void *)iddp
);
195 IDDP_SANITY(idcp
, iddp
);
196 rv
|= idma_desc_read(&iddp
->idd_ctl
);
197 } while ((iddhp
= iddhp
->idh_next
) != 0);
199 rv
&= (IDMA_DESC_CTL_OWN
|IDMA_DESC_CTL_TERM
);
204 #else /* IDMA_COHERENT */
206 #define IDMA_CACHE_FLUSH(p)
207 #define IDMA_CACHE_INVALIDATE(p)
208 #define IDMA_LIST_SYNC_PRE(c, p)
209 #define IDMA_LIST_SYNC_POST(c, p) idma_list_sync_post(c, p)
211 static inline u_int32_t
212 idma_list_sync_post(idma_chan_t
* const idcp
, idma_desch_t
*iddhp
)
218 iddp
= iddhp
->idh_desc_va
;
219 IDDP_SANITY(idcp
, iddp
);
220 rv
|= idma_desc_read(&iddp
->idd_ctl
);
221 } while ((iddhp
= iddhp
->idh_next
) != 0);
223 rv
&= (IDMA_DESC_CTL_OWN
|IDMA_DESC_CTL_TERM
);
228 #endif /* IDMA_COHERENT */
231 STATIC
void idma_attach (device_t
, device_t
, void *);
232 STATIC
int idma_match (device_t
, cfdata_t
, void *);
233 STATIC
void idma_chan_init
234 (idma_softc_t
*, idma_chan_t
*, unsigned int);
235 STATIC
void idma_arb_init(idma_softc_t
*);
236 STATIC
void idma_dmamem_free(idma_softc_t
*, idma_dmamem_t
*);
237 STATIC
int idma_dmamem_alloc
238 (idma_softc_t
*, idma_dmamem_t
*, int, size_t sz
);
239 STATIC
void idma_qstart
240 (idma_softc_t
*, idma_chan_t
*, unsigned int);
241 STATIC
void idma_start_subr
242 (idma_softc_t
*, idma_chan_t
*, unsigned int, idma_desch_t
*);
243 STATIC
void idma_retry
244 (idma_softc_t
*, idma_chan_t
*, const u_int
, idma_desch_t
*);
245 STATIC
void idma_done
246 (idma_softc_t
*, idma_chan_t
*, const u_int
, idma_desch_t
*, u_int32_t
);
247 STATIC
int idma_intr0_1 (void *);
248 STATIC
int idma_intr2_3 (void *);
249 STATIC
int idma_intr4_5 (void *);
250 STATIC
int idma_intr6_7 (void *);
251 STATIC
int idma_intr_comm
252 (idma_softc_t
*, unsigned int, unsigned int, unsigned int, u_int32_t
, char *);
254 STATIC
void idma_print_active
255 (idma_softc_t
*, unsigned int, idma_desch_t
*);
258 struct cfattach idma_ca
= {
259 sizeof(struct idma_softc
), idma_match
, idma_attach
262 extern struct cfdriver idma_cd
;
264 idma_softc_t
*idma_sc
= 0;
268 device_t
const parent
,
272 struct gt_attach_args
* const ga
= (struct gt_attach_args
*)aux
;
274 if (strcmp(ga
->ga_name
, idma_cd
.cd_name
) != 0)
282 device_t
const parent
,
286 struct gt_softc
* const gtsc
= device_private(parent
);
287 idma_softc_t
* const sc
= device_private(self
);
288 struct gt_attach_args
* const ga
= aux
;
291 const char *fmt
= "%s: couldn't establish irq %d\n";
295 sc
->idma_bustag
= ga
->ga_memt
; /* XXX */
296 sc
->idma_dmatag
= ga
->ga_dmat
;
297 sc
->idma_bushandle
= 0; /* XXX */
298 sc
->idma_reg_base
= IDMA_CNT_REG_BASE
;
299 sc
->idma_reg_size
= 0x100;
301 sc
->idma_callout_state
= 0;
302 callout_init(&sc
->idma_callout
, 0);
304 for (i
=0; i
< NIDMA_CHANS
; i
++)
305 idma_chan_init(sc
, &sc
->idma_chan
[i
], i
);
310 ih
= intr_establish(IRQ_IDMA0_1
, IST_LEVEL
, IPL_IDMA
, idma_intr0_1
, sc
);
312 printf(fmt
, IRQ_IDMA0_1
);
317 ih
= intr_establish(IRQ_IDMA2_3
, IST_LEVEL
, IPL_IDMA
, idma_intr2_3
, sc
);
319 printf(fmt
, IRQ_IDMA2_3
);
324 ih
= intr_establish(IRQ_IDMA4_5
, IST_LEVEL
, IPL_IDMA
, idma_intr4_5
, sc
);
326 printf(fmt
, IRQ_IDMA4_5
);
331 ih
= intr_establish(IRQ_IDMA6_7
, IST_LEVEL
, IPL_IDMA
, idma_intr6_7
, sc
);
333 printf(fmt
, IRQ_IDMA6_7
);
339 printf("%s: irpt at irqs %d, %d, %d, %d\n", device_xname(&sc
->idma_dev
),
340 IRQ_IDMA0_1
, IRQ_IDMA2_3
, IRQ_IDMA4_5
, IRQ_IDMA6_7
);
341 #ifdef IDMA_ABORT_TEST
342 printf("%s: CAUTION: IDMA_ABORT_TEST enabled\n",
343 device_xname(&sc
->idma_dev
));
349 * idma_chan_init - init soft channel state && disable the channel
353 idma_softc_t
* const sc
,
354 idma_chan_t
* const idcp
,
355 const unsigned int chan
)
360 DPRINTF(("idma_chan_init %d\n", chan
));
363 memset(idcp
, 0, sizeof(idma_chan_t
));
364 idcp
->idc_state
= IDC_FREE
;
366 idcp
->idc_chan
= chan
;
367 idcp
->idc_done_count
= 0;
368 idcp
->idc_abort_count
= 0;
371 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_CTLHI_REG(chan
), r
);
372 DPRINTFN(2, ("idma_chan_init: 0x%x <-- 0x%x\n",
373 IDMA_CTLHI_REG(chan
), r
));
374 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
), r
);
375 DPRINTFN(2, ("idma_chan_init: 0x%x <-- 0x%x\n",
376 IDMA_CTLLO_REG(chan
), r
));
382 * idma_arb_init - configure the IDMA arbitor
385 idma_arb_init(idma_softc_t
* const sc
)
390 DPRINTF(("idma_arb_init %p\n", sc
));
394 * all channels arbitrate equaly
397 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_ARB_REG(0), r
);
398 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_ARB_REG(0), r
));
399 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_ARB_REG(1), r
);
400 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_ARB_REG(1), r
));
403 * enable cross bar timeout, w/ max timeout value
406 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_XTO_REG(0), r
);
407 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_XTO_REG(0), r
));
408 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_XTO_REG(1), r
);
409 DPRINTFN(2, ("idma_arb_init: 0x%x <-- 0x%x\n", IDMA_XTO_REG(1), r
));
415 idma_chan_free(idma_chan_t
* const idcp
)
417 idma_softc_t
*sc
= idcp
->idc_sc
;
418 unsigned int chan
= idcp
->idc_chan
;
420 DPRINTF(("idma_chan_free %d\n", chan
));
421 KASSERT(cpl
>= IPL_IDMA
);
422 KASSERT(sc
== idma_sc
);
423 KASSERT(chan
< NIDMA_CHANS
);
424 KASSERT(idcp
->idc_state
!= IDC_FREE
);
427 idma_dmamem_free(sc
, &idcp
->idc_desc_mem
);
428 free(idcp
->idc_desch
, M_DEVBUF
);
429 idma_chan_init(sc
, idcp
, chan
);
434 const unsigned int ndesc
,
435 int (* const callback
)(void *, idma_desch_t
*, u_int32_t
),
438 idma_softc_t
* const sc
= idma_sc
; /* XXX */
441 idma_desch_t
*iddhp_next
;
442 idma_desc_t
*iddp_va
;
443 idma_desc_t
*iddp_pa
;
449 STATIC
void idma_time(void *);
451 DPRINTF(("idma_chan_alloc %d %p %p\n", ndesc
, callback
, arg
));
456 for (i
=0; i
< NIDMA_CHANS
; i
++) {
457 if (sc
->idma_chan
[i
].idc_state
== IDC_FREE
) {
458 idcp
= &sc
->idma_chan
[i
];
459 idcp
->idc_state
= IDC_ALLOC
;
466 KASSERT(idcp
->idc_sc
== sc
);
469 * allocate descriptor handles
471 sz
= ndesc
* sizeof(idma_desch_t
);
472 iddhp
= (idma_desch_t
*)malloc(sz
, M_DEVBUF
, M_NOWAIT
);
473 idcp
->idc_desch
= iddhp
;
475 DIAGPRF(("idma_chan_alloc: cannot malloc 0x%x\n", sz
));
476 idma_chan_init(sc
, idcp
, idcp
->idc_chan
);
481 * allocate descriptors
483 sz
= ndesc
* sizeof(idma_desc_t
);
484 err
= idma_dmamem_alloc(sc
, &idcp
->idc_desc_mem
, 1, sz
);
486 DIAGPRF(("idma_chan_alloc: cannot idma_dmamem_alloc 0x%x\n",
488 idma_chan_free(idcp
);
493 * clear descriptors (sanity)
494 * initialize descriptor handles
495 * link descriptors to descriptor handles
496 * link the descriptors in a circular chain using phys addr
497 * link the descriptor handles in a circular chain
499 iddp_va
= (idma_desc_t
*)
500 idcp
->idc_desc_mem
.idm_map
->dm_segs
[0].ds_vaddr
;
501 iddp_pa
= (idma_desc_t
*)
502 idcp
->idc_desc_mem
.idm_map
->dm_segs
[0].ds_addr
;
503 KASSERT((((unsigned)iddp_va
) & (sizeof(idma_desc_t
) - 1)) == 0);
504 KASSERT((((unsigned)iddp_pa
) & (sizeof(idma_desc_t
) - 1)) == 0);
505 DPRINTFN(2, ("idma_chan_alloc: descriptors at %p/%p, handles at %p\n",
506 iddp_va
, iddp_pa
, idcp
->idc_desch
));
507 memset(iddp_va
, 0, sz
);
508 iddhp_next
= iddhp
+ 1;
509 for (i
=0; i
< ndesc
; i
++) {
510 iddhp
->idh_state
= IDH_FREE
;
511 iddhp
->idh_next
= iddhp_next
;
512 iddhp
->idh_chan
= idcp
;
513 iddhp
->idh_desc_va
= iddp_va
++;
514 iddhp
->idh_desc_pa
= iddp_pa
++;
515 iddp_va
->idd_next
= 0;
521 IDDP_SANITY(idcp
, iddp_va
);
522 iddhp
->idh_next
= idcp
->idc_desch
;
523 idcp
->idc_desch_free
= idcp
->idc_desch
;
524 iddp_va
->idd_next
= 0;
527 * configure IDMA channel control hi
529 r
= IDMA_CTLHI_SRCPCISWAP_NONE
|IDMA_CTLHI_DSTPCISWAP_NONE
530 |IDMA_CTLHI_NXTPCISWAP_NONE
;
532 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_CTLHI_REG(idcp
->idc_chan
), r
);
533 DPRINTFN(2, ("idma_chan_alloc: 0x%x <-- 0x%x\n",
534 IDMA_CTLHI_REG(idcp
->idc_chan
), r
));
537 * finish initializing the channel
539 idcp
->idc_callback
= callback
;
541 idcp
->idc_q
.idq_depth
= 0;
542 SIMPLEQ_INIT(&idcp
->idc_q
.idq_q
);
543 idcp
->idc_ndesch
= ndesc
;
544 idcp
->idc_state
|= IDC_IDLE
;
545 idcp
->idc_active
= 0;
548 if (! atomic_exch(&sc
->idma_callout_state
, 1))
549 callout_reset(&sc
->idma_callout
, hz
, idma_time
, sc
);
555 idma_dmamem_free(idma_softc_t
* const sc
, idma_dmamem_t
* const idmp
)
557 DPRINTF(("idma_dmamem_free %p %p\n", sc
, idmp
));
559 bus_dmamap_destroy(sc
->idma_dmatag
, idmp
->idm_map
);
561 bus_dmamem_unmap(sc
->idma_dmatag
, idmp
->idm_kva
,
563 if (idmp
->idm_nsegs
> 0)
564 bus_dmamem_free(sc
->idma_dmatag
, idmp
->idm_segs
,
566 idmp
->idm_map
= NULL
;
567 idmp
->idm_kva
= NULL
;
574 idma_softc_t
* const sc
,
575 idma_dmamem_t
* const idmp
,
581 DPRINTF(("idma_dmamem_alloc %p %p %d %d\n", sc
, idmp
, maxsegs
, sz
));
583 idmp
->idm_maxsegs
= maxsegs
;
585 error
= bus_dmamem_alloc(sc
->idma_dmatag
, idmp
->idm_size
, PAGE_SIZE
,
586 IDMA_BOUNDARY
, idmp
->idm_segs
, idmp
->idm_maxsegs
,
587 &idmp
->idm_nsegs
, BUS_DMA_NOWAIT
);
589 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamem_alloc\n"));
592 DPRINTFN(2, ("idma_dmamem_alloc: bus_dmamem_alloc ret idm_nsegs %d\n",
594 KASSERT(idmp
->idm_nsegs
== 1);
596 error
= bus_dmamem_map(sc
->idma_dmatag
, idmp
->idm_segs
, idmp
->idm_nsegs
,
597 idmp
->idm_size
, &idmp
->idm_kva
, BUS_DMA_NOWAIT
);
599 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamem_map\n"));
602 KASSERT((((unsigned)(idmp
->idm_kva
)) & 0x1f) == 0);
603 /* enforce CACHELINESIZE alignment */
605 error
= bus_dmamap_create(sc
->idma_dmatag
, idmp
->idm_size
,
606 idmp
->idm_nsegs
, idmp
->idm_size
, IDMA_BOUNDARY
,
607 BUS_DMA_ALLOCNOW
|BUS_DMA_NOWAIT
, &idmp
->idm_map
);
609 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamap_create\n"));
613 error
= bus_dmamap_load(sc
->idma_dmatag
, idmp
->idm_map
, idmp
->idm_kva
,
614 idmp
->idm_size
, NULL
, BUS_DMA_NOWAIT
);
616 DPRINTF(("idma_dmamem_alloc: cannot bus_dmamap_load\n"));
620 if (idmadebug
>= 2) {
623 for (seg
= 0; seg
< idmp
->idm_map
->dm_nsegs
; seg
++) {
624 DPRINTFN(2, ("idma_dmamem_alloc: "
625 "seg %d sz %ld va %lx pa %#lx\n",
626 seg
, idmp
->idm_map
->dm_segs
[seg
].ds_len
,
627 idmp
->idm_map
->dm_segs
[seg
].ds_vaddr
,
628 idmp
->idm_map
->dm_segs
[seg
].ds_addr
));
636 idma_dmamem_free(sc
, idmp
);
642 * idma_intr_enb - enable IDMA irpts for given chan
645 idma_intr_enb(idma_chan_t
* const idcp
)
647 idma_softc_t
* const sc
= idcp
->idc_sc
;
648 const unsigned int chan
= idcp
->idc_chan
;
651 DPRINTF(("idma_intr_enb %p chan %d\n", idcp
, chan
));
652 KASSERT(cpl
>= IPL_IDMA
);
653 KASSERT(sc
== idma_sc
);
654 KASSERT(chan
< NIDMA_CHANS
);
656 ibits
= IDMA_MASK(chan
, IDMA_INTR_BITS
);
657 sc
->idma_ien
|= ibits
;
660 * clear existing irpts for chan
662 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_CAUSE_REG(chan
),
663 (sc
->idma_ien
& ~ibits
));
664 DPRINTFN(2, ("idma_intr_enb: 0x%x <-- 0x%x\n", IDMA_CAUSE_REG(chan
),
665 (sc
->idma_ien
& ~ibits
)));
670 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_MASK_REG(chan
), sc
->idma_ien
);
671 DPRINTFN(2, ("idma_intr_enb: 0x%x <-- 0x%x\n", IDMA_MASK_REG(chan
),
676 * idma_intr_dis - disable IDMA irpts for given chan
679 idma_intr_dis(idma_chan_t
*idcp
)
681 idma_softc_t
* const sc
= idcp
->idc_sc
;
682 const unsigned int chan
= idcp
->idc_chan
;
685 DPRINTF(("idma_intr_dis %p chan %d\n", idcp
, chan
));
686 KASSERT(cpl
>= IPL_IDMA
);
687 KASSERT(sc
== idma_sc
);
688 KASSERT(chan
< NIDMA_CHANS
);
690 shift
= IDMA_INTR_SHIFT
* ((chan
< 4) ? chan
: (chan
- 4));
691 sc
->idma_ien
&= ~(IDMA_INTR_BITS
<< shift
);
696 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_MASK_REG(chan
), sc
->idma_ien
);
697 DPRINTFN(2, ("idma_intr_dis: 0x%x <-- 0x%x\n", IDMA_MASK_REG(chan
),
702 * idma_desch_free - free the descriptor handle
705 idma_desch_free(idma_desch_t
* const iddhp
)
707 idma_desch_t
*iddhp_next
;
708 idma_chan_t
*idcp
= iddhp
->idh_chan
;
713 DPRINTFN(2, ("idma_desch_free %p\n", iddhp
));
714 KASSERT(cpl
>= IPL_IDMA
);
715 KASSERT(iddhp
->idh_state
!= IDH_FREE
);
716 KASSERT(iddhp
->idh_state
!= IDH_QWAIT
);
717 KASSERT(iddhp
->idh_state
!= IDH_PENDING
);
723 iddp
= iddhp
->idh_desc_va
;
724 KASSERT(iddp
->idd_next
== 0); /* use idma_desch_list_free */
725 idma_desc_write(&iddp
->idd_next
, 0);
728 iddhp_next
= iddhp
+ 1;
729 if (iddhp_next
>= &idcp
->idc_desch
[ idcp
->idc_ndesch
])
730 iddhp_next
= &idcp
->idc_desch
[ 0 ];
731 iddhp
->idh_next
= iddhp_next
;
733 iddhp
->idh_state
= IDH_FREE
;
737 * idma_desch_alloc - allocate the next free descriptor handle in the chain
740 idma_desch_alloc(idma_chan_t
* const idcp
)
744 DPRINTFN(2, ("idma_desch_alloc %p\n", idcp
));
745 KASSERT(cpl
>= IPL_IDMA
);
747 iddhp
= idcp
->idc_desch_free
;
748 DPRINTFN(2, ("idma_desch_alloc: "
749 "idc_desch_free %p iddhp %p idh_state %d\n",
750 idcp
->idc_desch_free
, iddhp
, iddhp
->idh_state
));
751 if (iddhp
->idh_state
!= IDH_FREE
)
754 KASSERT(iddhp
->idh_next
!= 0);
755 idcp
->idc_desch_free
= iddhp
->idh_next
;
757 iddhp
->idh_state
= IDH_ALLOC
;
763 * idma_desch_list_free - free the descriptor handle list
766 idma_desch_list_free(idma_desch_t
* iddhp
)
768 idma_desch_t
*iddhp_tail
;
769 idma_chan_t
* const idcp
= iddhp
->idh_chan
;
771 DPRINTFN(2, ("idma_desch_list_free %p\n", iddhp
));
772 KASSERT(cpl
>= IPL_IDMA
);
778 idma_desc_write(&iddhp
->idh_desc_va
->idd_next
, 0);
780 iddhp
->idh_state
= IDH_FREE
;
782 iddhp
= iddhp
->idh_next
;
783 DPRINTFN(2, ("idma_desch_list_free: next iddhp %p\n", iddhp
));
784 KASSERT((iddhp
== 0) || (iddhp
== (iddhp_tail
+ 1))
785 || ((iddhp_tail
== &idcp
->idc_desch
[idcp
->idc_ndesch
-1])
786 && (iddhp
== &idcp
->idc_desch
[0])));
789 iddhp
= iddhp_tail
+ 1;
790 if (iddhp
>= &idcp
->idc_desch
[ idcp
->idc_ndesch
])
791 iddhp
= &idcp
->idc_desch
[ 0 ];
792 iddhp_tail
->idh_next
= iddhp
;
796 * idma_desch_list_alloc - allocate `n' linked descriptor handles
799 idma_desch_list_alloc(idma_chan_t
* const idcp
, unsigned int n
)
801 idma_desch_t
*iddhp_head
;
802 idma_desch_t
*iddhp_tail
;
804 idma_desc_t
*iddp_prev
= 0;
806 DPRINTFN(2, ("idma_desch_list_alloc %p %d\n", idcp
, n
));
807 KASSERT(cpl
>= IPL_IDMA
);
811 iddhp_head
= iddhp_tail
= iddhp
= idcp
->idc_desch_free
;
812 KASSERT(iddhp_head
!= 0);
814 if (iddhp
->idh_state
!= IDH_FREE
) {
815 DPRINTFN(2, ("idma_desch_list_alloc: "
816 "n %d iddhp %p idh_state %d, bail\n",
817 n
, iddhp
, iddhp
->idh_state
));
818 iddhp_tail
->idh_next
= 0;
819 idma_desch_list_free(iddhp_head
);
822 iddhp
->idh_state
= IDH_ALLOC
;
825 idma_desc_write(&iddp_prev
->idd_next
,
826 (u_int32_t
)iddhp
->idh_desc_pa
);
827 iddp_prev
= iddhp
->idh_desc_va
;
830 iddhp
= iddhp
->idh_next
;
832 DPRINTFN(2, ("idma_desch_list_alloc: iddhp %p iddhp_tail %p\n",
834 KASSERT((iddhp
== (iddhp_tail
+ 1))
835 || ((iddhp_tail
== &idcp
->idc_desch
[idcp
->idc_ndesch
-1])
836 && (iddhp
== &idcp
->idc_desch
[0])));
839 idma_desc_write(&iddp_prev
->idd_next
, 0);
840 iddhp_tail
->idh_next
= 0;
841 idcp
->idc_desch_free
= iddhp
;
848 idma_intr_check(idma_softc_t
*sc
, u_int chan
)
850 extern volatile imask_t ipending
;
851 extern volatile imask_t imen
;
852 extern unsigned int gtbase
;
854 u_int irq
= (chan
>> 1) + 4;
856 u_int32_t irqbit
= 1 << irq
;
859 printf("chan %d IRQ %d, ", chan
, irq
);
862 r
= gt_read(&sc
->idma_gt
->gt_dev
, reg
);
864 printf("MIC %s, ", (r
== 0) ? "clr" : "set");
867 r
= gt_read(&sc
->idma_gt
->gt_dev
, reg
);
869 printf("CIM %s, ", (r
== 0) ? "clr" : "set");
871 r
= ipending
[IMASK_ICU_LO
];
873 printf("ipending %s, ", (r
== 0) ? "clr" : "set");
875 r
= imen
[IMASK_ICU_LO
];
877 printf("imen %s, ", (r
== 0) ? "clr" : "set");
879 mask
= IDMA_MASK(chan
, IDMA_MASK_BITS
);
880 reg
= IDMA_CAUSE_REG(chan
);
881 r
= gt_read(&sc
->idma_gt
->gt_dev
, reg
);
883 printf("cause reg %#x mask %#x bits %#x (%#x), ",
884 reg
, mask
, r
, r
& mask
);
886 mask
= IDMA_MASK(chan
, IDMA_MASK_BITS
);
887 reg
= IDMA_MASK_REG(chan
);
888 r
= gt_read(&sc
->idma_gt
->gt_dev
, reg
);
890 printf("mask reg %#x mask %#x bits %#x (%#x)\n",
891 reg
, mask
, r
, r
& mask
);
893 #if defined(DDB) && 0
900 idma_abort(idma_desch_t
*iddhp
, unsigned int flags
, const char *str
)
903 idma_chan_t
* const idcp
= iddhp
->idh_chan
;
909 idma_desch_t
*iddhp_tmp
;
913 KASSERT(sc
== idma_sc
);
914 chan
= idcp
->idc_chan
;
916 idcp
->idc_abort_count
++;
918 #ifndef IDMA_ABORT_TEST
919 DPRINTF(("idma_abort: chan %d, desc %p, reason: \"%s\", count %ld\n",
920 chan
, iddhp
, str
, idcp
->idc_abort_count
));
921 DPRINTF(("idma_abort: xfers: %lu, aborts %lu\n",
922 idcp
->idc_done_count
,
923 idcp
->idc_abort_count
));
925 KASSERT(cpl
>= IPL_IDMA
);
929 DIAGPRF(("idma_abort: idh_chan NULL\n"));
932 KASSERT(idcp
->idc_callback
!= 0);
933 if (idcp
->idc_active
!= iddhp
) {
934 DPRINTF(("idma_abort: not pending\n"));
939 idcp
->idc_active
= NULL
;
940 iddhp
->idh_state
= IDH_ABORT
;
942 sts
= IDMA_LIST_SYNC_POST(idcp
, iddhp
);
943 r
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
));
944 DPRINTF(("idma_abort: channel %s\n",
945 ((r
& IDMA_CTLLO_ACTIVE
) == 0) ? "idle" : "active"));
947 idma_print_active(sc
, chan
, iddhp
);
951 if ((r
& IDMA_CTLLO_ACTIVE
) == 0) {
952 DIAGPRF(("idma_abort: transfer done, no irpt\n"));
953 if ((flags
& IDMA_ABORT_CANCEL
) == 0) {
955 idma_intr_check(sc
, chan
);
957 idma_done(sc
, idcp
, chan
, iddhp
, 1);
961 DIAGPRF(("idma_abort: transfer done, hung\n"));
964 case IDMA_DESC_CTL_OWN
:
965 DIAGPRF(("idma_abort: transfer pending, hung\n"));
967 case IDMA_DESC_CTL_TERM
:
968 DIAGPRF(("idma_abort: transfer done, terminated, no irpt?\n"));
970 case (IDMA_DESC_CTL_OWN
|IDMA_DESC_CTL_TERM
):
971 DIAGPRF(("idma_abort: transfer pending, terminated, hung\n"));
975 if ((r
& IDMA_CTLLO_ACTIVE
) != 0) {
976 DPRINTF(("idma_abort: channel active, aborting...\n"));
978 r
|= IDMA_CTLLO_ABORT
;
979 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
), r
);
980 DPRINTFN(2, ("idma_abort: 0x%x <-- 0x%x\n",
981 IDMA_CTLLO_REG(chan
), r
));
983 for (try = 0; try < 100; try++) {
987 r
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
));
988 DPRINTFN(2, ("idma_abort: 0x%x --> 0x%x\n",
989 IDMA_CTLLO_REG(chan
), r
));
991 if ((r
& (IDMA_CTLLO_ABORT
|IDMA_CTLLO_ACTIVE
)) == 0)
995 DPRINTFN(2, ("idma_abort: tries %d\n", try));
998 panic("%s: idma_abort %p failed\n",
999 device_xname(&sc
->idma_dev
), iddhp
);
1002 if ((flags
& IDMA_ABORT_CANCEL
) == 0)
1003 idma_retry(sc
, idcp
, chan
, iddhp
);
1007 idma_qflush(idma_chan_t
* const idcp
)
1009 idma_desch_t
*iddhp
;
1011 DPRINTF(("idma_qflush %p\n", idcp
));
1012 KASSERT(cpl
>= IPL_IDMA
);
1014 while ((iddhp
= SIMPLEQ_FIRST(&idcp
->idc_q
.idq_q
)) != NULL
) {
1015 SIMPLEQ_REMOVE_HEAD(&idcp
->idc_q
.idq_q
, iddhp
, idh_q
);
1016 KASSERT(iddhp
->idh_state
== IDH_QWAIT
);
1017 iddhp
->idh_state
= IDH_CANCEL
;
1020 idcp
->idc_q
.idq_depth
= 0;
1024 idma_start(idma_desch_t
* const iddhp
)
1027 idma_desch_t
*iddhp_tmp
= iddhp
;
1028 idma_chan_t
* const idcp
= iddhp
->idh_chan
;
1029 idma_softc_t
* const sc
= idcp
->idc_sc
;
1030 const unsigned int chan
= idcp
->idc_chan
;
1033 DPRINTFN(2, ("idma_start %p\n", iddhp
));
1034 KASSERT(cpl
>= IPL_IDMA
);
1035 KASSERT(sc
== idma_sc
);
1036 KASSERT(idcp
->idc_callback
!= 0);
1038 iddp
= iddhp
->idh_desc_va
;
1039 IDDP_SANITY(idcp
, iddp
);
1042 iddhp_tmp
->idh_state
= IDH_QWAIT
;
1043 iddp
= iddhp_tmp
->idh_desc_va
;
1044 ctl
= idma_desc_read(&iddp
->idd_ctl
);
1045 ctl
&= IDMA_DESC_CTL_CNT
;
1048 * "The Burst Limit must be smaller than the IDMA byte count."
1049 * Ensure the transfer crosses a IDMA_BURST_SIZE boundary.
1051 if (ctl
<= IDMA_BURST_SIZE
)
1052 ctl
= IDMA_BURST_SIZE
+ sizeof(u_int32_t
);
1054 ctl
|= IDMA_DESC_CTL_OWN
;
1055 idma_desc_write(&iddp
->idd_ctl
, ctl
);
1056 } while ((iddhp_tmp
= iddhp_tmp
->idh_next
) != 0);
1058 SIMPLEQ_INSERT_TAIL(&idcp
->idc_q
.idq_q
, iddhp
, idh_q
);
1059 idcp
->idc_q
.idq_depth
++;
1061 if (idcp
->idc_active
== 0)
1062 idma_qstart(sc
, idcp
, chan
);
1065 DPRINTFN(2, ("idma_start: ACTIVE\n"));
1073 idma_softc_t
* const sc
,
1074 idma_chan_t
* const idcp
,
1075 const unsigned int chan
)
1077 idma_desch_t
*iddhp
;
1079 DPRINTFN(2, ("idma_qstart %p %p %d\n", sc
, idcp
, chan
));
1080 KASSERT(cpl
>= IPL_IDMA
);
1081 KASSERT(idcp
->idc_active
== 0);
1083 if ((iddhp
= SIMPLEQ_FIRST(&idcp
->idc_q
.idq_q
)) != NULL
) {
1084 SIMPLEQ_REMOVE_HEAD(&idcp
->idc_q
.idq_q
, iddhp
, idh_q
);
1085 KASSERT(iddhp
->idh_state
== IDH_QWAIT
);
1086 idcp
->idc_q
.idq_depth
--;
1087 idma_start_subr(sc
, idcp
, chan
, iddhp
);
1091 DPRINTFN(2, ("idma_qstart: EMPTY\n"));
1098 idma_softc_t
* const sc
,
1099 idma_chan_t
* const idcp
,
1100 const unsigned int chan
,
1101 idma_desch_t
* const iddhp
)
1105 KASSERT(cpl
>= IPL_IDMA
);
1106 KASSERT(iddhp
->idh_state
!= IDH_FREE
);
1107 KASSERT(iddhp
->idh_state
!= IDH_PENDING
);
1108 KASSERT(iddhp
->idh_state
!= IDH_DONE
);
1109 KASSERT(iddhp
->idh_state
!= IDH_CANCEL
);
1110 KASSERT(iddhp
->idh_state
!= IDH_ABORT
);
1111 KASSERT(iddhp
->idh_aux
!= 0);
1112 DPRINTFN(2, ("idma_start_subr %p %p %d %p\n", sc
, idcp
, chan
, iddhp
));
1115 r
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
));
1116 DPRINTFN(2, ("idma_start_subr: 0x%x --> 0x%x\n",
1117 IDMA_CTLLO_REG(chan
), r
));
1118 if ((r
& IDMA_CTLLO_ACTIVE
) != 0) {
1119 printf("idma_start_subr: IDMA_CTLLO_ACTIVE\n");
1120 idma_print_active(sc
, chan
, idcp
->idc_active
);
1121 #if defined(DEBUG) && defined(DDB)
1126 KASSERT((r
& IDMA_CTLLO_ACTIVE
) == 0);
1129 iddhp
->tb
= _mftb();
1130 DPRINTFN(8, ("dma_start_subr: tb %lld\n", iddhp
->tb
));
1132 IDMA_LIST_SYNC_PRE(idcp
, iddhp
);
1134 iddhp
->idh_state
= IDH_PENDING
;
1135 idcp
->idc_active
= iddhp
;
1137 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_NXT_REG(chan
),
1138 (u_int32_t
)iddhp
->idh_desc_pa
);
1139 DPRINTFN(2, ("idma_start_subr: 0x%x <-- 0x%x\n", IDMA_NXT_REG(chan
),
1140 (u_int32_t
)iddhp
->idh_desc_pa
));
1142 r
= IDMA_CTLLO_DFLT
;
1144 r
|= iddhp
->idh_hold
;
1146 gt_write(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
), r
);
1147 (void)gt_read(&sc
->idma_gt
->gt_dev
, IDMA_CTLLO_REG(chan
)); /* R.A.W. */
1149 #ifdef IDMA_ABORT_TEST
1151 static unsigned int want_abort
= 0;
1155 idma_abort(iddhp
, 0, "test abort");
1162 * idma_retry - re-start a botched transfer
1166 idma_softc_t
* const sc
,
1167 idma_chan_t
* const idcp
,
1168 const unsigned int chan
,
1169 idma_desch_t
* const iddhp
)
1171 idma_desch_t
*iddhp_tmp
= iddhp
;
1175 DPRINTF(("idma_retry\n"));
1176 iddhp
->idh_state
= IDH_RETRY
;
1179 iddp
= iddhp_tmp
->idh_desc_va
;
1180 IDMA_CACHE_INVALIDATE((void *)iddp
);
1181 IDDP_SANITY(idcp
, iddp
);
1182 ctl
= idma_desc_read(&iddp
->idd_ctl
);
1183 ctl
&= ~IDMA_DESC_CTL_TERM
;
1184 ctl
|= IDMA_DESC_CTL_OWN
;
1185 idma_desc_write(&iddp
->idd_ctl
, ctl
);
1186 } while ((iddhp_tmp
= iddhp_tmp
->idh_next
) != 0);
1187 idma_start_subr(sc
, idcp
, chan
, iddhp
);
1191 * idma_done - complete a done transfer
1195 idma_softc_t
* const sc
,
1196 idma_chan_t
* const idcp
,
1197 const unsigned int chan
,
1198 idma_desch_t
* const iddhp
,
1201 int (*callback
)(void *, idma_desch_t
*, u_int32_t
);
1203 idcp
->idc_active
= NULL
;
1204 idcp
->idc_done_count
++;
1205 iddhp
->idh_state
= IDH_DONE
;
1206 idma_qstart(sc
, idcp
, chan
);
1207 callback
= idcp
->idc_callback
;
1208 if (callback
== 0) {
1209 DIAGPRF(("%s: idma_done: chan %d no callback\n",
1210 device_xname(&sc
->idma_dev
), chan
));
1211 idma_desch_free(iddhp
);
1213 (*callback
)(idcp
->idc_arg
, iddhp
, ccause
);
1217 idma_intr0_1(void *const arg
)
1219 unsigned int reg
= IDMA_CAUSE_REG(0);
1220 unsigned int shift
= IDMA_MASK_SHIFT(0);
1222 IDMA_MASK(0, IDMA_MASK_BITS
) | IDMA_MASK(1, IDMA_MASK_BITS
);
1224 return idma_intr_comm((idma_softc_t
*)arg
, 0, reg
, shift
, mask
, "0,1");
1228 idma_intr2_3(void *const arg
)
1230 unsigned int reg
= IDMA_CAUSE_REG(2);
1231 unsigned int shift
= IDMA_MASK_SHIFT(2);
1233 IDMA_MASK(2, IDMA_MASK_BITS
) | IDMA_MASK(3, IDMA_MASK_BITS
);
1235 return idma_intr_comm((idma_softc_t
*)arg
, 2, reg
, shift
, mask
, "2,3");
1239 idma_intr4_5(void *const arg
)
1241 unsigned int reg
= IDMA_CAUSE_REG(4);
1242 unsigned int shift
= IDMA_MASK_SHIFT(4);
1244 IDMA_MASK(4, IDMA_MASK_BITS
) | IDMA_MASK(5, IDMA_MASK_BITS
);
1246 return idma_intr_comm((idma_softc_t
*)arg
, 4, reg
, shift
, mask
, "4,5");
1250 idma_intr6_7(void *const arg
)
1252 unsigned int reg
= IDMA_CAUSE_REG(6);
1253 unsigned int shift
= IDMA_MASK_SHIFT(6);
1255 IDMA_MASK(6, IDMA_MASK_BITS
) | IDMA_MASK(7, IDMA_MASK_BITS
);
1257 return idma_intr_comm((idma_softc_t
*)arg
, 6, reg
, shift
, mask
, "6,7");
1262 idma_softc_t
* const sc
,
1272 idma_desch_t
*iddhp
;
1275 KASSERT(atomic_exch(idmalock
, 1) == 0);
1276 KASSERT(cpl
>= IPL_IDMA
);
1277 KASSERT(sc
== idma_sc
);
1279 rcause
= gt_read(&sc
->idma_gt
->gt_dev
, reg
);
1281 gt_write(&sc
->idma_gt
->gt_dev
, reg
, ~rcause
);
1282 (void)gt_read(&sc
->idma_gt
->gt_dev
, reg
); /* R.A.W. */
1284 rcause
&= ~IDMA_CAUSE_RES
;
1285 DPRINTFN(2, ("idma_intr_comm: %s rcause 0x%x\n", str
, rcause
));
1287 KASSERT(atomic_exch(idmalock
, 0) == 1);
1291 if (((rcause
& mask
) & IDMA_INTR_ALL_ERRS
) != 0) {
1295 err_sel
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_ESEL_REG(chan
));
1296 err_addr
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_EADDR_REG(chan
));
1297 DIAGPRF(("idma_intr_comm: %s rcause 0x%x sel 0x%x addr 0x%x\n",
1298 str
, rcause
, err_sel
, err_addr
));
1299 #if defined(DEBUG) && defined(DDB)
1306 idcp
= &sc
->idma_chan
[chan
];
1309 ccause
= rcause
& IDMA_INTR_BITS
;
1310 rcause
>>= IDMA_INTR_SHIFT
;
1314 iddhp
= idcp
->idc_active
;
1316 DIAGPRF(("%s: idma_intr_comm: chan %d ccause 0x%x"
1317 " idc_active == 0\n",
1318 device_xname(&sc
->idma_dev
),
1320 idma_qstart(sc
, idcp
, chan
);
1324 DPRINTFN(2, ("idma_intr_comm: idh_state %d\n",
1327 if (iddhp
->idh_state
== IDH_ABORT
) {
1328 idma_retry(sc
, idcp
, chan
, iddhp
);
1332 KASSERT(iddhp
->idh_state
== IDH_PENDING
);
1334 switch (IDMA_LIST_SYNC_POST(idcp
, iddhp
)) {
1336 break; /* normal completion */
1337 case IDMA_DESC_CTL_OWN
:
1338 DIAGPRF(("%s: idma_intr_comm: chan %d "
1339 "descriptor OWN error, abort\n",
1340 device_xname(&sc
->idma_dev
), chan
));
1341 idma_abort(iddhp
, 0, "idma_intr_comm: OWN error");
1343 case IDMA_DESC_CTL_TERM
:
1344 case (IDMA_DESC_CTL_OWN
|IDMA_DESC_CTL_TERM
):
1345 DIAGPRF(("%s: idma_intr_comm: chan %d "
1346 "transfer terminated, retry\n",
1347 device_xname(&sc
->idma_dev
), chan
));
1348 idma_retry(sc
, idcp
, chan
, iddhp
);
1352 idma_done(sc
, idcp
, chan
, iddhp
, ccause
);
1359 } while (chan
< limit
);
1361 KASSERT(atomic_exch(idmalock
, 0) == 1);
1366 idma_time(void *const arg
)
1368 idma_softc_t
* const sc
= (idma_softc_t
*)arg
;
1376 KASSERT((sc
== idma_sc
));
1378 if (atomic_add(&sc
->idma_callout_state
, 0)) {
1381 KASSERT(atomic_exch(idmalock
, 2) == 0);
1383 limit
= tbhz
>> 3; /* XXX 1/8 sec ??? */
1384 idcp
= sc
->idma_chan
;
1385 for (chan
=0; chan
< NIDMA_CHANS
; chan
++) {
1386 if ((idcp
->idc_state
& IDC_ALLOC
)
1387 && (idcp
->idc_active
!= 0)) {
1388 dt
= now
- idcp
->idc_active
->tb
;
1390 DPRINTFN(8, ("idma_time: "
1391 "now %lld, tb %lld, dt %lld\n",
1392 now
, idcp
->idc_active
->tb
, dt
));
1393 idma_abort(idcp
->idc_active
, 0,
1399 callout_reset(&sc
->idma_callout
, hz
, idma_time
, sc
);
1400 KASSERT(atomic_exch(idmalock
, 0) == 2);
1407 idma_softc_t
* const sc
,
1408 const unsigned int chan
,
1409 idma_desch_t
*iddhp
)
1418 cnt
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_CNT_REG(chan
));
1419 src
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_SRC_REG(chan
));
1420 dst
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_DST_REG(chan
));
1421 nxt
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_NXT_REG(chan
));
1422 cur
= gt_read(&sc
->idma_gt
->gt_dev
, IDMA_CUR_REG(chan
));
1424 printf("%s: regs { %#x, %#x, %#x, %#x } current %#x\n",
1425 device_xname(&sc
->idma_dev
), cnt
, src
, dst
, nxt
, cur
);
1428 iddp
= iddhp
->idh_desc_va
;
1429 printf("%s: desc %p/%p { %#x, %#x, %#x, %#x }\n",
1430 device_xname(&sc
->idma_dev
),
1431 iddhp
->idh_desc_va
, iddhp
->idh_desc_pa
,
1432 idma_desc_read(&iddp
->idd_ctl
),
1433 idma_desc_read(&iddp
->idd_src_addr
),
1434 idma_desc_read(&iddp
->idd_dst_addr
),
1435 idma_desc_read(&iddp
->idd_next
));
1436 iddhp
= iddhp
->idh_next
;