1 /* $NetBSD: pxa2x0_dmac.c,v 1.5 2007/03/04 05:59:38 christos Exp $ */
4 * Copyright (c) 2003, 2005 Wasabi Systems, Inc.
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include "opt_pxa2x0_dmac.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/queue.h>
47 #include <uvm/uvm_param.h> /* For PAGE_SIZE */
49 #include <machine/intr.h>
50 #include <machine/bus.h>
52 #include <dev/dmover/dmovervar.h>
54 #include <arm/xscale/pxa2x0reg.h>
55 #include <arm/xscale/pxa2x0var.h>
56 #include <arm/xscale/pxa2x0cpu.h>
58 #include <arm/xscale/pxa2x0_dmac.h>
62 #undef DMAC_N_PRIORITIES
63 #ifndef PXA2X0_DMAC_FIXED_PRIORITY
64 #define DMAC_N_PRIORITIES 3
65 #define DMAC_PRI(p) (p)
67 #define DMAC_N_PRIORITIES 1
68 #define DMAC_PRI(p) (0)
72 SLIST_ENTRY(dmac_desc
) d_link
;
73 struct pxa2x0_dma_desc
*d_desc
;
78 * This is used to maintain state for an in-progress transfer.
79 * It tracks the current DMA segment, and offset within the segment
80 * in the case where we had to split a request into several DMA
81 * operations due to a shortage of DMAC descriptors.
83 struct dmac_desc_segs
{
84 bus_dma_segment_t
*ds_curseg
; /* Current segment */
85 u_int ds_nsegs
; /* Remaining segments */
86 bus_size_t ds_offset
; /* Offset within current seg */
89 SIMPLEQ_HEAD(dmac_xfer_state_head
, dmac_xfer_state
);
91 struct dmac_xfer_state
{
92 struct dmac_xfer dxs_xfer
;
93 #define dxs_cookie dxs_xfer.dx_cookie
94 #define dxs_done dxs_xfer.dx_done
95 #define dxs_priority dxs_xfer.dx_priority
96 #define dxs_peripheral dxs_xfer.dx_peripheral
97 #define dxs_flow dxs_xfer.dx_flow
98 #define dxs_dev_width dxs_xfer.dx_dev_width
99 #define dxs_burst_size dxs_xfer.dx_burst_size
100 #define dxs_loop_notify dxs_xfer.dx_loop_notify
101 #define dxs_desc dxs_xfer.dx_desc
102 SIMPLEQ_ENTRY(dmac_xfer_state
) dxs_link
;
103 SLIST_HEAD(, dmac_desc
) dxs_descs
;
104 struct dmac_xfer_state_head
*dxs_queue
;
106 #define DMAC_NO_CHANNEL (~0)
108 struct dmac_desc_segs dxs_segs
[2];
109 bool dxs_misaligned_flag
;
113 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
115 * This structure is used to maintain state for the dmover(9) backend
116 * part of the driver. We can have a number of concurrent dmover
117 * requests in progress at any given time. The exact number is given
118 * by the PXA2X0_DMAC_DMOVER_CONCURRENCY compile-time constant. One of
119 * these structures is allocated for each concurrent request.
121 struct dmac_dmover_state
{
122 LIST_ENTRY(dmac_dmover_state
) ds_link
; /* List of idle dmover chans */
123 struct pxadmac_softc
*ds_sc
; /* Uplink to pxadmac softc */
124 struct dmover_request
*ds_current
; /* Current dmover request */
125 struct dmac_xfer_state ds_xfer
;
126 bus_dmamap_t ds_src_dmap
;
127 bus_dmamap_t ds_dst_dmap
;
129 * There is no inherent size limit in the DMA engine.
130 * The following limit is somewhat arbitrary.
132 #define DMAC_DMOVER_MAX_XFER (8*1024*1024)
134 /* This would require 16KB * 2 just for segments... */
135 #define DMAC_DMOVER_NSEGS ((DMAC_DMOVER_MAX_XFER / PAGE_SIZE) + 1)
137 #define DMAC_DMOVER_NSEGS 512 /* XXX: Only enough for 2MB */
139 bus_dma_segment_t ds_zero_seg
; /* Used for zero-fill ops */
141 bus_dma_segment_t ds_fill_seg
; /* Used for fill8 ops */
144 #define ds_src_addr_hold ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_addr_hold
145 #define ds_dst_addr_hold ds_xfer.dxs_desc[DMAC_DESC_DST].xd_addr_hold
146 #define ds_src_burst ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_burst_size
147 #define ds_dst_burst ds_xfer.dxs_desc[DMAC_DESC_DST].xd_burst_size
148 #define ds_src_dma_segs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_dma_segs
149 #define ds_dst_dma_segs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_dma_segs
150 #define ds_src_nsegs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_nsegs
151 #define ds_dst_nsegs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_nsegs
155 * Overall dmover(9) backend state
158 struct dmover_backend dd_backend
;
160 LIST_HEAD(, dmac_dmover_state
) dd_free
;
161 struct dmac_dmover_state dd_state
[PXA2X0_DMAC_DMOVER_CONCURRENCY
];
165 struct pxadmac_softc
{
166 struct device sc_dev
;
167 bus_space_tag_t sc_bust
;
168 bus_dma_tag_t sc_dmat
;
169 bus_space_handle_t sc_bush
;
173 * Queue of pending requests, per priority
175 struct dmac_xfer_state_head sc_queue
[DMAC_N_PRIORITIES
];
178 * Queue of pending requests, per peripheral
181 struct dmac_xfer_state_head sp_queue
;
183 } sc_periph
[DMAC_N_PERIPH
];
186 * Active requests, per channel.
188 struct dmac_xfer_state
*sc_active
[DMAC_N_CHANNELS
];
191 * Channel Priority Allocation
195 u_int8_t p_pri
[DMAC_N_CHANNELS
];
196 } sc_prio
[DMAC_N_PRIORITIES
];
197 #define DMAC_PRIO_END (~0)
198 u_int8_t sc_channel_priority
[DMAC_N_CHANNELS
];
201 * DMA descriptor management
203 bus_dmamap_t sc_desc_map
;
204 bus_dma_segment_t sc_segs
;
205 #define DMAC_N_DESCS ((PAGE_SIZE * 2) / sizeof(struct pxa2x0_dma_desc))
206 #define DMAC_DESCS_SIZE (DMAC_N_DESCS * sizeof(struct pxa2x0_dma_desc))
207 struct dmac_desc sc_all_descs
[DMAC_N_DESCS
];
209 SLIST_HEAD(, dmac_desc
) sc_descs
;
211 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
213 * dmover(9) backend state
215 struct dmac_dmover sc_dmover
;
219 static int pxadmac_match(struct device
*, struct cfdata
*, void *);
220 static void pxadmac_attach(struct device
*, struct device
*, void *);
222 CFATTACH_DECL(pxadmac
, sizeof(struct pxadmac_softc
),
223 pxadmac_match
, pxadmac_attach
, NULL
, NULL
);
225 static struct pxadmac_softc
*pxadmac_sc
;
227 static void dmac_start(struct pxadmac_softc
*, dmac_priority_t
);
228 static int dmac_continue_xfer(struct pxadmac_softc
*, struct dmac_xfer_state
*);
229 static u_int
dmac_channel_intr(struct pxadmac_softc
*, u_int
);
230 static int dmac_intr(void *);
232 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
233 static void dmac_dmover_attach(struct pxadmac_softc
*);
234 static void dmac_dmover_process(struct dmover_backend
*);
235 static void dmac_dmover_run(struct dmover_backend
*);
236 static void dmac_dmover_done(struct dmac_xfer
*, int);
239 static inline u_int32_t
240 dmac_reg_read(struct pxadmac_softc
*sc
, int reg
)
243 return (bus_space_read_4(sc
->sc_bust
, sc
->sc_bush
, reg
));
247 dmac_reg_write(struct pxadmac_softc
*sc
, int reg
, u_int32_t val
)
250 bus_space_write_4(sc
->sc_bust
, sc
->sc_bush
, reg
, val
);
254 dmac_allocate_channel(struct pxadmac_softc
*sc
, dmac_priority_t priority
,
259 KDASSERT((u_int
)priority
< DMAC_N_PRIORITIES
);
261 if ((channel
= sc
->sc_prio
[priority
].p_first
) == DMAC_PRIO_END
)
263 sc
->sc_prio
[priority
].p_first
= sc
->sc_prio
[priority
].p_pri
[channel
];
270 dmac_free_channel(struct pxadmac_softc
*sc
, dmac_priority_t priority
,
274 KDASSERT((u_int
)priority
< DMAC_N_PRIORITIES
);
276 sc
->sc_prio
[priority
].p_pri
[channel
] = sc
->sc_prio
[priority
].p_first
;
277 sc
->sc_prio
[priority
].p_first
= channel
;
281 pxadmac_match(struct device
*parent
, struct cfdata
*cf
, void *aux
)
283 struct pxaip_attach_args
*pxa
= aux
;
285 if (pxadmac_sc
|| pxa
->pxa_addr
!= PXA2X0_DMAC_BASE
||
286 pxa
->pxa_intr
!= PXA2X0_INT_DMA
)
289 pxa
->pxa_size
= PXA2X0_DMAC_SIZE
;
295 pxadmac_attach(struct device
*parent
, struct device
*self
, void *aux
)
297 struct pxadmac_softc
*sc
= (struct pxadmac_softc
*)self
;
298 struct pxaip_attach_args
*pxa
= aux
;
299 struct pxa2x0_dma_desc
*dd
;
302 sc
->sc_bust
= pxa
->pxa_iot
;
303 sc
->sc_dmat
= pxa
->pxa_dmat
;
305 aprint_normal(": DMA Controller\n");
307 if (bus_space_map(sc
->sc_bust
, pxa
->pxa_addr
, pxa
->pxa_size
, 0,
309 aprint_error("%s: Can't map registers!\n", sc
->sc_dev
.dv_xname
);
316 * Make sure the DMAC is quiescent
318 for (i
= 0; i
< DMAC_N_CHANNELS
; i
++) {
319 dmac_reg_write(sc
, DMAC_DCSR(i
), 0);
320 dmac_reg_write(sc
, DMAC_DRCMR(i
), 0);
321 sc
->sc_active
[i
] = NULL
;
323 dmac_reg_write(sc
, DMAC_DINT
,
324 dmac_reg_read(sc
, DMAC_DINT
) & DMAC_DINT_MASK
);
327 * Initialise the request queues
329 for (i
= 0; i
< DMAC_N_PRIORITIES
; i
++)
330 SIMPLEQ_INIT(&sc
->sc_queue
[i
]);
333 * Initialise the request queues
335 for (i
= 0; i
< DMAC_N_PERIPH
; i
++) {
336 sc
->sc_periph
[i
].sp_busy
= 0;
337 SIMPLEQ_INIT(&sc
->sc_periph
[i
].sp_queue
);
341 * Initialise the channel priority metadata
343 memset(sc
->sc_prio
, DMAC_PRIO_END
, sizeof(sc
->sc_prio
));
344 for (i
= 0; i
< DMAC_N_CHANNELS
; i
++) {
345 #if (DMAC_N_PRIORITIES > 1)
347 dmac_free_channel(sc
, DMAC_PRIORITY_HIGH
, i
);
350 dmac_free_channel(sc
, DMAC_PRIORITY_MED
, i
);
352 dmac_free_channel(sc
, DMAC_PRIORITY_LOW
, i
);
354 dmac_free_channel(sc
, DMAC_PRIORITY_NORMAL
, i
);
359 * Initialise DMA descriptors and associated metadata
361 if (bus_dmamem_alloc(sc
->sc_dmat
, DMAC_DESCS_SIZE
, DMAC_DESCS_SIZE
, 0,
362 &sc
->sc_segs
, 1, &nsegs
, BUS_DMA_NOWAIT
))
363 panic("dmac_pxaip_attach: bus_dmamem_alloc failed");
365 if (bus_dmamem_map(sc
->sc_dmat
, &sc
->sc_segs
, 1, DMAC_DESCS_SIZE
,
366 (void *)&dd
, BUS_DMA_COHERENT
|BUS_DMA_NOCACHE
))
367 panic("dmac_pxaip_attach: bus_dmamem_map failed");
369 if (bus_dmamap_create(sc
->sc_dmat
, DMAC_DESCS_SIZE
, 1,
370 DMAC_DESCS_SIZE
, 0, BUS_DMA_NOWAIT
, &sc
->sc_desc_map
))
371 panic("dmac_pxaip_attach: bus_dmamap_create failed");
373 if (bus_dmamap_load(sc
->sc_dmat
, sc
->sc_desc_map
, (void *)dd
,
374 DMAC_DESCS_SIZE
, NULL
, BUS_DMA_NOWAIT
))
375 panic("dmac_pxaip_attach: bus_dmamap_load failed");
377 SLIST_INIT(&sc
->sc_descs
);
378 sc
->sc_free_descs
= DMAC_N_DESCS
;
379 for (i
= 0; i
< DMAC_N_DESCS
; i
++, dd
++) {
380 SLIST_INSERT_HEAD(&sc
->sc_descs
, &sc
->sc_all_descs
[i
], d_link
);
381 sc
->sc_all_descs
[i
].d_desc
= dd
;
382 sc
->sc_all_descs
[i
].d_desc_pa
=
383 sc
->sc_segs
.ds_addr
+ (sizeof(struct pxa2x0_dma_desc
) * i
);
386 sc
->sc_irqcookie
= pxa2x0_intr_establish(pxa
->pxa_intr
, IPL_BIO
,
388 KASSERT(sc
->sc_irqcookie
!= NULL
);
390 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
391 dmac_dmover_attach(sc
);
395 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
397 * We support the following dmover(9) operations
399 static const struct dmover_algdesc dmac_dmover_algdescs
[] = {
400 {DMOVER_FUNC_ZERO
, NULL
, 0}, /* Zero-fill */
401 {DMOVER_FUNC_FILL8
, NULL
, 0}, /* Fill with 8-bit immediate value */
402 {DMOVER_FUNC_COPY
, NULL
, 1} /* Copy */
404 #define DMAC_DMOVER_ALGDESC_COUNT \
405 (sizeof(dmac_dmover_algdescs) / sizeof(dmac_dmover_algdescs[0]))
408 dmac_dmover_attach(struct pxadmac_softc
*sc
)
410 struct dmac_dmover
*dd
= &sc
->sc_dmover
;
411 struct dmac_dmover_state
*ds
;
415 * Describe ourselves to the dmover(9) code
417 dd
->dd_backend
.dmb_name
= "pxadmac";
418 dd
->dd_backend
.dmb_speed
= 100*1024*1024; /* XXX */
419 dd
->dd_backend
.dmb_cookie
= sc
;
420 dd
->dd_backend
.dmb_algdescs
= dmac_dmover_algdescs
;
421 dd
->dd_backend
.dmb_nalgdescs
= DMAC_DMOVER_ALGDESC_COUNT
;
422 dd
->dd_backend
.dmb_process
= dmac_dmover_process
;
424 LIST_INIT(&dd
->dd_free
);
426 for (i
= 0; i
< PXA2X0_DMAC_DMOVER_CONCURRENCY
; i
++) {
427 ds
= &dd
->dd_state
[i
];
429 ds
->ds_current
= NULL
;
430 ds
->ds_xfer
.dxs_cookie
= ds
;
431 ds
->ds_xfer
.dxs_done
= dmac_dmover_done
;
432 ds
->ds_xfer
.dxs_priority
= DMAC_PRIORITY_NORMAL
;
433 ds
->ds_xfer
.dxs_peripheral
= DMAC_PERIPH_NONE
;
434 ds
->ds_xfer
.dxs_flow
= DMAC_FLOW_CTRL_NONE
;
435 ds
->ds_xfer
.dxs_dev_width
= DMAC_DEV_WIDTH_DEFAULT
;
436 ds
->ds_xfer
.dxs_burst_size
= DMAC_BURST_SIZE_8
; /* XXX */
437 ds
->ds_xfer
.dxs_loop_notify
= DMAC_DONT_LOOP
;
438 ds
->ds_src_addr_hold
= false;
439 ds
->ds_dst_addr_hold
= false;
440 ds
->ds_src_nsegs
= 0;
441 ds
->ds_dst_nsegs
= 0;
442 LIST_INSERT_HEAD(&dd
->dd_free
, ds
, ds_link
);
445 * Create dma maps for both source and destination buffers.
447 if (bus_dmamap_create(sc
->sc_dmat
, DMAC_DMOVER_MAX_XFER
,
448 DMAC_DMOVER_NSEGS
, DMAC_DMOVER_MAX_XFER
,
449 0, BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
,
451 bus_dmamap_create(sc
->sc_dmat
, DMAC_DMOVER_MAX_XFER
,
452 DMAC_DMOVER_NSEGS
, DMAC_DMOVER_MAX_XFER
,
453 0, BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
,
455 panic("dmac_dmover_attach: bus_dmamap_create failed");
459 * Allocate some dma memory to be used as source buffers
460 * for the zero-fill and fill-8 operations. We only need
461 * small buffers here, since we set up the DMAC source
462 * descriptor with 'ds_addr_hold' set to true.
464 if (bus_dmamem_alloc(sc
->sc_dmat
,
465 arm_pdcache_line_size
, arm_pdcache_line_size
, 0,
466 &ds
->ds_zero_seg
, 1, &dummy
, BUS_DMA_NOWAIT
) ||
467 bus_dmamem_alloc(sc
->sc_dmat
,
468 arm_pdcache_line_size
, arm_pdcache_line_size
, 0,
469 &ds
->ds_fill_seg
, 1, &dummy
, BUS_DMA_NOWAIT
)) {
470 panic("dmac_dmover_attach: bus_dmamem_alloc failed");
473 if (bus_dmamem_map(sc
->sc_dmat
, &ds
->ds_zero_seg
, 1,
474 arm_pdcache_line_size
, &ds
->ds_zero_va
,
476 bus_dmamem_map(sc
->sc_dmat
, &ds
->ds_fill_seg
, 1,
477 arm_pdcache_line_size
, &ds
->ds_fill_va
,
479 panic("dmac_dmover_attach: bus_dmamem_map failed");
483 * Make sure the zero-fill source buffer really is zero filled
485 memset(ds
->ds_zero_va
, 0, arm_pdcache_line_size
);
488 dmover_backend_register(&sc
->sc_dmover
.dd_backend
);
492 dmac_dmover_process(struct dmover_backend
*dmb
)
494 struct pxadmac_softc
*sc
= dmb
->dmb_cookie
;
498 * If the backend is currently idle, go process the queue.
500 if (sc
->sc_dmover
.dd_busy
== 0)
501 dmac_dmover_run(&sc
->sc_dmover
.dd_backend
);
506 dmac_dmover_run(struct dmover_backend
*dmb
)
508 struct dmover_request
*dreq
;
509 struct pxadmac_softc
*sc
;
510 struct dmac_dmover
*dd
;
511 struct dmac_dmover_state
*ds
;
512 size_t len_src
, len_dst
;
515 sc
= dmb
->dmb_cookie
;
517 sc
->sc_dmover
.dd_busy
= 1;
520 * As long as we can queue up dmover requests...
522 while ((dreq
= TAILQ_FIRST(&dmb
->dmb_pendreqs
)) != NULL
&&
523 (ds
= LIST_FIRST(&dd
->dd_free
)) != NULL
) {
525 * Pull the request off the queue, mark it 'running',
526 * and make it 'current'.
528 dmover_backend_remque(dmb
, dreq
);
529 dreq
->dreq_flags
|= DMOVER_REQ_RUNNING
;
530 LIST_REMOVE(ds
, ds_link
);
531 ds
->ds_current
= dreq
;
533 switch (dreq
->dreq_outbuf_type
) {
534 case DMOVER_BUF_LINEAR
:
535 len_dst
= dreq
->dreq_outbuf
.dmbuf_linear
.l_len
;
538 len_dst
= dreq
->dreq_outbuf
.dmbuf_uio
->uio_resid
;
545 * Fix up the appropriate DMA 'source' buffer
547 if (dreq
->dreq_assignment
->das_algdesc
->dad_ninputs
) {
550 * This is a 'copy' operation.
551 * Load up the specified source buffer
553 switch (dreq
->dreq_inbuf_type
) {
554 case DMOVER_BUF_LINEAR
:
555 len_src
= dreq
->dreq_inbuf
[0].dmbuf_linear
.l_len
;
556 if (len_src
!= len_dst
)
558 if (bus_dmamap_load(sc
->sc_dmat
,ds
->ds_src_dmap
,
559 dreq
->dreq_inbuf
[0].dmbuf_linear
.l_addr
,
561 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
|
567 uio
= dreq
->dreq_inbuf
[0].dmbuf_uio
;
568 len_src
= uio
->uio_resid
;
569 if (uio
->uio_rw
!= UIO_WRITE
||
572 if (bus_dmamap_load_uio(sc
->sc_dmat
,
573 ds
->ds_src_dmap
, uio
,
574 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
|
583 ds
->ds_src_addr_hold
= false;
585 if (dreq
->dreq_assignment
->das_algdesc
->dad_name
==
588 * Zero-fill operation.
589 * Simply load up the pre-zeroed source buffer
591 if (bus_dmamap_load(sc
->sc_dmat
, ds
->ds_src_dmap
,
592 ds
->ds_zero_va
, arm_pdcache_line_size
, NULL
,
593 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
| BUS_DMA_READ
))
596 ds
->ds_src_addr_hold
= true;
598 if (dreq
->dreq_assignment
->das_algdesc
->dad_name
==
602 * Initialise our fill-8 buffer, and load it up.
604 * XXX: Experiment with exactly how much of the
605 * source buffer needs to be filled. Particularly WRT
606 * burst size (which is hardcoded to 8 for dmover).
608 memset(ds
->ds_fill_va
, dreq
->dreq_immediate
[0],
609 arm_pdcache_line_size
);
611 if (bus_dmamap_load(sc
->sc_dmat
, ds
->ds_src_dmap
,
612 ds
->ds_fill_va
, arm_pdcache_line_size
, NULL
,
613 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
| BUS_DMA_READ
))
616 ds
->ds_src_addr_hold
= true;
622 * Now do the same for the destination buffer
624 switch (dreq
->dreq_outbuf_type
) {
625 case DMOVER_BUF_LINEAR
:
626 if (bus_dmamap_load(sc
->sc_dmat
, ds
->ds_dst_dmap
,
627 dreq
->dreq_outbuf
.dmbuf_linear
.l_addr
,
629 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
| BUS_DMA_WRITE
))
630 goto error_unload_src
;
634 if (dreq
->dreq_outbuf
.dmbuf_uio
->uio_rw
!= UIO_READ
)
635 goto error_unload_src
;
636 if (bus_dmamap_load_uio(sc
->sc_dmat
, ds
->ds_dst_dmap
,
637 dreq
->dreq_outbuf
.dmbuf_uio
,
638 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
| BUS_DMA_WRITE
))
639 goto error_unload_src
;
644 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_src_dmap
);
646 dreq
->dreq_error
= EINVAL
;
647 dreq
->dreq_flags
|= DMOVER_REQ_ERROR
;
648 ds
->ds_current
= NULL
;
649 LIST_INSERT_HEAD(&dd
->dd_free
, ds
, ds_link
);
655 * The last step before shipping the request off to the
656 * DMAC driver is to sync the dma maps.
658 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_src_dmap
, 0,
659 ds
->ds_src_dmap
->dm_mapsize
, BUS_DMASYNC_PREWRITE
);
660 ds
->ds_src_dma_segs
= ds
->ds_src_dmap
->dm_segs
;
661 ds
->ds_src_nsegs
= ds
->ds_src_dmap
->dm_nsegs
;
663 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dst_dmap
, 0,
664 ds
->ds_dst_dmap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
665 ds
->ds_dst_dma_segs
= ds
->ds_dst_dmap
->dm_segs
;
666 ds
->ds_dst_nsegs
= ds
->ds_dst_dmap
->dm_nsegs
;
669 * Hand the request over to the dmac section of the driver.
671 if ((rv
= pxa2x0_dmac_start_xfer(&ds
->ds_xfer
.dxs_xfer
)) != 0) {
672 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_src_dmap
);
673 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dst_dmap
);
674 dreq
->dreq_error
= rv
;
675 dreq
->dreq_flags
|= DMOVER_REQ_ERROR
;
676 ds
->ds_current
= NULL
;
677 LIST_INSERT_HEAD(&dd
->dd_free
, ds
, ds_link
);
683 sc
->sc_dmover
.dd_busy
= 0;
687 dmac_dmover_done(struct dmac_xfer
*dx
, int error
)
689 struct dmac_dmover_state
*ds
= dx
->dx_cookie
;
690 struct pxadmac_softc
*sc
= ds
->ds_sc
;
691 struct dmover_request
*dreq
= ds
->ds_current
;
694 * A dmover(9) request has just completed.
697 KDASSERT(dreq
!= NULL
);
700 * Sync and unload the DMA maps
702 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_src_dmap
, 0,
703 ds
->ds_src_dmap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
704 bus_dmamap_sync(sc
->sc_dmat
, ds
->ds_dst_dmap
, 0,
705 ds
->ds_dst_dmap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
707 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_src_dmap
);
708 bus_dmamap_unload(sc
->sc_dmat
, ds
->ds_dst_dmap
);
710 ds
->ds_current
= NULL
;
711 LIST_INSERT_HEAD(&sc
->sc_dmover
.dd_free
, ds
, ds_link
);
714 * Record the completion status of the transfer
717 dreq
->dreq_error
= error
;
718 dreq
->dreq_flags
|= DMOVER_REQ_ERROR
;
720 if (dreq
->dreq_outbuf_type
== DMOVER_BUF_UIO
)
721 dreq
->dreq_outbuf
.dmbuf_uio
->uio_resid
= 0;
722 if (dreq
->dreq_assignment
->das_algdesc
->dad_ninputs
&&
723 dreq
->dreq_inbuf_type
== DMOVER_BUF_UIO
)
724 dreq
->dreq_inbuf
[0].dmbuf_uio
->uio_resid
= 0;
733 * See if we can start some more dmover(9) requests.
735 * Note: We're already at splbio() here.
737 if (sc
->sc_dmover
.dd_busy
== 0)
738 dmac_dmover_run(&sc
->sc_dmover
.dd_backend
);
743 pxa2x0_dmac_allocate_xfer(int flags
)
745 struct dmac_xfer_state
*dxs
;
747 dxs
= malloc(sizeof(struct dmac_xfer_state
), M_DEVBUF
, flags
);
749 return ((struct dmac_xfer
*)dxs
);
753 pxa2x0_dmac_free_xfer(struct dmac_xfer
*dx
)
757 * XXX: Should verify the DMAC is not actively using this
758 * structure before freeing...
764 dmac_validate_desc(struct dmac_xfer_desc
*xd
, size_t *psize
,
765 bool *misaligned_flag
)
771 * Make sure the transfer parameters are acceptable.
774 if (xd
->xd_addr_hold
&&
775 (xd
->xd_nsegs
!= 1 || xd
->xd_dma_segs
[0].ds_len
== 0))
778 for (i
= 0, size
= 0; i
< xd
->xd_nsegs
; i
++) {
779 if (xd
->xd_dma_segs
[i
].ds_addr
& 0x7) {
782 *misaligned_flag
= true;
784 size
+= xd
->xd_dma_segs
[i
].ds_len
;
792 dmac_init_desc(struct dmac_desc_segs
*ds
, struct dmac_xfer_desc
*xd
,
793 size_t *psize
, bool *misaligned_flag
)
797 if ((err
= dmac_validate_desc(xd
, psize
, misaligned_flag
)))
800 ds
->ds_curseg
= xd
->xd_dma_segs
;
801 ds
->ds_nsegs
= xd
->xd_nsegs
;
807 pxa2x0_dmac_start_xfer(struct dmac_xfer
*dx
)
809 struct pxadmac_softc
*sc
= pxadmac_sc
;
810 struct dmac_xfer_state
*dxs
= (struct dmac_xfer_state
*)dx
;
811 struct dmac_xfer_desc
*src
, *dst
;
815 if (dxs
->dxs_peripheral
!= DMAC_PERIPH_NONE
&&
816 dxs
->dxs_peripheral
>= DMAC_N_PERIPH
)
819 src
= &dxs
->dxs_desc
[DMAC_DESC_SRC
];
820 dst
= &dxs
->dxs_desc
[DMAC_DESC_DST
];
822 dxs
->dxs_misaligned_flag
= false;
824 if ((err
= dmac_init_desc(&dxs
->dxs_segs
[DMAC_DESC_SRC
], src
, &size
,
825 &dxs
->dxs_misaligned_flag
)))
827 if (src
->xd_addr_hold
== false &&
828 dxs
->dxs_loop_notify
!= DMAC_DONT_LOOP
&&
829 (size
% dxs
->dxs_loop_notify
) != 0)
832 if ((err
= dmac_init_desc(&dxs
->dxs_segs
[DMAC_DESC_DST
], dst
, &size
,
833 &dxs
->dxs_misaligned_flag
)))
835 if (dst
->xd_addr_hold
== false &&
836 dxs
->dxs_loop_notify
!= DMAC_DONT_LOOP
&&
837 (size
% dxs
->dxs_loop_notify
) != 0)
840 SLIST_INIT(&dxs
->dxs_descs
);
841 dxs
->dxs_channel
= DMAC_NO_CHANNEL
;
842 dxs
->dxs_dcmd
= (((u_int32_t
)dxs
->dxs_dev_width
) << DCMD_WIDTH_SHIFT
) |
843 (((u_int32_t
)dxs
->dxs_burst_size
) << DCMD_SIZE_SHIFT
);
845 switch (dxs
->dxs_flow
) {
846 case DMAC_FLOW_CTRL_NONE
:
848 case DMAC_FLOW_CTRL_SRC
:
849 dxs
->dxs_dcmd
|= DCMD_FLOWSRC
;
851 case DMAC_FLOW_CTRL_DEST
:
852 dxs
->dxs_dcmd
|= DCMD_FLOWTRG
;
856 if (src
->xd_addr_hold
== false)
857 dxs
->dxs_dcmd
|= DCMD_INCSRCADDR
;
858 if (dst
->xd_addr_hold
== false)
859 dxs
->dxs_dcmd
|= DCMD_INCTRGADDR
;
862 if (dxs
->dxs_peripheral
== DMAC_PERIPH_NONE
||
863 sc
->sc_periph
[dxs
->dxs_peripheral
].sp_busy
== 0) {
864 dxs
->dxs_queue
= &sc
->sc_queue
[DMAC_PRI(dxs
->dxs_priority
)];
865 SIMPLEQ_INSERT_TAIL(dxs
->dxs_queue
, dxs
, dxs_link
);
866 if (dxs
->dxs_peripheral
!= DMAC_PERIPH_NONE
)
867 sc
->sc_periph
[dxs
->dxs_peripheral
].sp_busy
++;
868 dmac_start(sc
, DMAC_PRI(dxs
->dxs_priority
));
870 dxs
->dxs_queue
= &sc
->sc_periph
[dxs
->dxs_peripheral
].sp_queue
;
871 SIMPLEQ_INSERT_TAIL(dxs
->dxs_queue
, dxs
, dxs_link
);
872 sc
->sc_periph
[dxs
->dxs_peripheral
].sp_busy
++;
880 pxa2x0_dmac_abort_xfer(struct dmac_xfer
*dx
)
882 struct pxadmac_softc
*sc
= pxadmac_sc
;
883 struct dmac_xfer_state
*ndxs
, *dxs
= (struct dmac_xfer_state
*)dx
;
884 struct dmac_desc
*desc
, *ndesc
;
885 struct dmac_xfer_state_head
*queue
;
887 int s
, timeout
, need_start
= 0;
891 queue
= dxs
->dxs_queue
;
893 if (dxs
->dxs_channel
== DMAC_NO_CHANNEL
) {
895 * The request has not yet started, or it has already
896 * completed. If the request is not on a queue, just
904 dxs
->dxs_queue
= NULL
;
905 SIMPLEQ_REMOVE(queue
, dxs
, dmac_xfer_state
, dxs_link
);
908 * The request is in progress. This is a bit trickier.
910 dmac_reg_write(sc
, DMAC_DCSR(dxs
->dxs_channel
), 0);
912 for (timeout
= 5000; timeout
; timeout
--) {
913 rv
= dmac_reg_read(sc
, DMAC_DCSR(dxs
->dxs_channel
));
914 if (rv
& DCSR_STOPSTATE
)
919 if ((rv
& DCSR_STOPSTATE
) == 0)
921 "pxa2x0_dmac_abort_xfer: channel %d failed to abort",
925 * Free resources allocated to the request
927 for (desc
= SLIST_FIRST(&dxs
->dxs_descs
); desc
; desc
= ndesc
) {
928 ndesc
= SLIST_NEXT(desc
, d_link
);
929 SLIST_INSERT_HEAD(&sc
->sc_descs
, desc
, d_link
);
933 sc
->sc_active
[dxs
->dxs_channel
] = NULL
;
934 dmac_free_channel(sc
, DMAC_PRI(dxs
->dxs_priority
),
937 if (dxs
->dxs_peripheral
!= DMAC_PERIPH_NONE
)
938 dmac_reg_write(sc
, DMAC_DRCMR(dxs
->dxs_peripheral
), 0);
941 dxs
->dxs_queue
= NULL
;
944 if (dxs
->dxs_peripheral
== DMAC_PERIPH_NONE
||
945 sc
->sc_periph
[dxs
->dxs_peripheral
].sp_busy
-- == 1 ||
946 queue
== &sc
->sc_periph
[dxs
->dxs_peripheral
].sp_queue
)
950 * We've just removed the current item for this
951 * peripheral, and there is at least one more
952 * pending item waiting. Make it current.
954 ndxs
= SIMPLEQ_FIRST(&sc
->sc_periph
[dxs
->dxs_peripheral
].sp_queue
);
956 KDASSERT(dxs
!= NULL
);
957 SIMPLEQ_REMOVE_HEAD(&sc
->sc_periph
[dxs
->dxs_peripheral
].sp_queue
,
960 dxs
->dxs_queue
= &sc
->sc_queue
[DMAC_PRI(dxs
->dxs_priority
)];
961 SIMPLEQ_INSERT_TAIL(dxs
->dxs_queue
, dxs
, dxs_link
);
965 * Try to start any pending requests with the same
970 dmac_start(sc
, DMAC_PRI(dxs
->dxs_priority
));
975 dmac_start(struct pxadmac_softc
*sc
, dmac_priority_t priority
)
977 struct dmac_xfer_state
*dxs
;
980 while (sc
->sc_free_descs
&&
981 (dxs
= SIMPLEQ_FIRST(&sc
->sc_queue
[priority
])) != NULL
&&
982 dmac_allocate_channel(sc
, priority
, &channel
) == 0) {
984 * Yay, got some descriptors, a transfer request, and
985 * an available DMA channel.
987 KDASSERT(sc
->sc_active
[channel
] == NULL
);
988 SIMPLEQ_REMOVE_HEAD(&sc
->sc_queue
[priority
], dxs_link
);
990 /* set DMA alignment register */
994 dalgn
= dmac_reg_read(sc
, DMAC_DALGN
);
995 dalgn
&= ~(1U << channel
);
996 if (dxs
->dxs_misaligned_flag
)
997 dalgn
|= (1U << channel
);
998 dmac_reg_write(sc
, DMAC_DALGN
, dalgn
);
1001 dxs
->dxs_channel
= channel
;
1002 sc
->sc_active
[channel
] = dxs
;
1003 (void) dmac_continue_xfer(sc
, dxs
);
1005 * XXX: Deal with descriptor allocation failure for loops
1011 dmac_continue_xfer(struct pxadmac_softc
*sc
, struct dmac_xfer_state
*dxs
)
1013 struct dmac_desc
*desc
, *prev_desc
;
1014 struct pxa2x0_dma_desc
*dd
;
1015 struct dmac_desc_segs
*src_ds
, *dst_ds
;
1016 struct dmac_xfer_desc
*src_xd
, *dst_xd
;
1017 bus_dma_segment_t
*src_seg
, *dst_seg
;
1018 bus_addr_t src_mem_addr
, dst_mem_addr
;
1019 bus_size_t src_size
, dst_size
, this_size
;
1024 src_ds
= &dxs
->dxs_segs
[DMAC_DESC_SRC
];
1025 dst_ds
= &dxs
->dxs_segs
[DMAC_DESC_DST
];
1026 src_xd
= &dxs
->dxs_desc
[DMAC_DESC_SRC
];
1027 dst_xd
= &dxs
->dxs_desc
[DMAC_DESC_DST
];
1028 SLIST_INIT(&dxs
->dxs_descs
);
1031 * As long as the source/destination buffers have DMA segments,
1032 * and we have free descriptors, build a DMA chain.
1034 while (src_ds
->ds_nsegs
&& dst_ds
->ds_nsegs
&& sc
->sc_free_descs
) {
1035 src_seg
= src_ds
->ds_curseg
;
1036 src_mem_addr
= src_seg
->ds_addr
+ src_ds
->ds_offset
;
1037 if (src_xd
->xd_addr_hold
== false &&
1038 dxs
->dxs_loop_notify
!= DMAC_DONT_LOOP
)
1039 src_size
= dxs
->dxs_loop_notify
;
1041 src_size
= src_seg
->ds_len
- src_ds
->ds_offset
;
1043 dst_seg
= dst_ds
->ds_curseg
;
1044 dst_mem_addr
= dst_seg
->ds_addr
+ dst_ds
->ds_offset
;
1045 if (dst_xd
->xd_addr_hold
== false &&
1046 dxs
->dxs_loop_notify
!= DMAC_DONT_LOOP
)
1047 dst_size
= dxs
->dxs_loop_notify
;
1049 dst_size
= dst_seg
->ds_len
- dst_ds
->ds_offset
;
1052 * We may need to split a source or destination segment
1053 * across two or more DMAC descriptors.
1055 while (src_size
&& dst_size
&&
1056 (desc
= SLIST_FIRST(&sc
->sc_descs
)) != NULL
) {
1057 SLIST_REMOVE_HEAD(&sc
->sc_descs
, d_link
);
1058 sc
->sc_free_descs
--;
1061 * Decide how much data we're going to transfer
1062 * using this DMAC descriptor.
1064 if (src_xd
->xd_addr_hold
)
1065 this_size
= dst_size
;
1067 if (dst_xd
->xd_addr_hold
)
1068 this_size
= src_size
;
1070 this_size
= min(dst_size
, src_size
);
1073 * But clamp the transfer size to the DMAC
1074 * descriptor's maximum.
1076 this_size
= min(this_size
, DCMD_LENGTH_MASK
& ~0x1f);
1079 * Fill in the DMAC descriptor
1082 dd
->dd_dsadr
= src_mem_addr
;
1083 dd
->dd_dtadr
= dst_mem_addr
;
1084 dd
->dd_dcmd
= dxs
->dxs_dcmd
| this_size
;
1087 * Link it into the chain
1090 SLIST_INSERT_AFTER(prev_desc
, desc
, d_link
);
1091 prev_desc
->d_desc
->dd_ddadr
= desc
->d_desc_pa
;
1093 SLIST_INSERT_HEAD(&dxs
->dxs_descs
, desc
,
1099 * Update the source/destination pointers
1101 if (src_xd
->xd_addr_hold
== false) {
1102 src_size
-= this_size
;
1103 src_ds
->ds_offset
+= this_size
;
1104 if (src_ds
->ds_offset
== src_seg
->ds_len
) {
1105 KDASSERT(src_size
== 0);
1106 src_ds
->ds_curseg
= ++src_seg
;
1107 src_ds
->ds_offset
= 0;
1110 src_mem_addr
+= this_size
;
1113 if (dst_xd
->xd_addr_hold
== false) {
1114 dst_size
-= this_size
;
1115 dst_ds
->ds_offset
+= this_size
;
1116 if (dst_ds
->ds_offset
== dst_seg
->ds_len
) {
1117 KDASSERT(dst_size
== 0);
1118 dst_ds
->ds_curseg
= ++dst_seg
;
1119 dst_ds
->ds_offset
= 0;
1122 dst_mem_addr
+= this_size
;
1126 if (dxs
->dxs_loop_notify
!= DMAC_DONT_LOOP
) {
1128 * We must be able to allocate descriptors for the
1129 * entire loop. Otherwise, return them to the pool
1133 struct dmac_desc
*ndesc
;
1134 for (desc
= SLIST_FIRST(&dxs
->dxs_descs
);
1135 desc
; desc
= ndesc
) {
1136 ndesc
= SLIST_NEXT(desc
, d_link
);
1137 SLIST_INSERT_HEAD(&sc
->sc_descs
, desc
,
1139 sc
->sc_free_descs
++;
1145 KASSERT(dd
!= NULL
);
1146 dd
->dd_dcmd
|= DCMD_ENDIRQEN
;
1151 * Did we manage to build a chain?
1152 * If not, just return.
1157 if (dxs
->dxs_loop_notify
== DMAC_DONT_LOOP
) {
1158 dd
->dd_dcmd
|= DCMD_ENDIRQEN
;
1159 dd
->dd_ddadr
= DMAC_DESC_LAST
;
1161 dd
->dd_ddadr
= SLIST_FIRST(&dxs
->dxs_descs
)->d_desc_pa
;
1163 if (dxs
->dxs_peripheral
!= DMAC_PERIPH_NONE
) {
1164 dmac_reg_write(sc
, DMAC_DRCMR(dxs
->dxs_peripheral
),
1165 dxs
->dxs_channel
| DRCMR_MAPVLD
);
1167 dmac_reg_write(sc
, DMAC_DDADR(dxs
->dxs_channel
),
1168 SLIST_FIRST(&dxs
->dxs_descs
)->d_desc_pa
);
1169 dmac_reg_write(sc
, DMAC_DCSR(dxs
->dxs_channel
),
1170 DCSR_ENDINTR
| DCSR_RUN
);
1176 dmac_channel_intr(struct pxadmac_softc
*sc
, u_int channel
)
1178 struct dmac_xfer_state
*dxs
;
1179 struct dmac_desc
*desc
, *ndesc
;
1183 dcsr
= dmac_reg_read(sc
, DMAC_DCSR(channel
));
1184 dmac_reg_write(sc
, DMAC_DCSR(channel
), dcsr
);
1185 if (dmac_reg_read(sc
, DMAC_DCSR(channel
)) & DCSR_STOPSTATE
)
1186 dmac_reg_write(sc
, DMAC_DCSR(channel
), dcsr
& ~DCSR_RUN
);
1188 if ((dxs
= sc
->sc_active
[channel
]) == NULL
) {
1189 printf("%s: Stray DMAC interrupt for unallocated channel %d\n",
1190 sc
->sc_dev
.dv_xname
, channel
);
1195 * Clear down the interrupt in the DMA Interrupt Register
1197 dmac_reg_write(sc
, DMAC_DINT
, (1u << channel
));
1200 * If this is a looping request, invoke the 'done' callback and
1201 * return immediately.
1203 if (dxs
->dxs_loop_notify
!= DMAC_DONT_LOOP
&&
1204 (dcsr
& DCSR_BUSERRINTR
) == 0) {
1205 (dxs
->dxs_done
)(&dxs
->dxs_xfer
, 0);
1210 * Free the descriptors allocated to the completed transfer
1212 * XXX: If there is more data to transfer in this request,
1213 * we could simply reuse some or all of the descriptors
1214 * already allocated for the transfer which just completed.
1216 for (desc
= SLIST_FIRST(&dxs
->dxs_descs
); desc
; desc
= ndesc
) {
1217 ndesc
= SLIST_NEXT(desc
, d_link
);
1218 SLIST_INSERT_HEAD(&sc
->sc_descs
, desc
, d_link
);
1219 sc
->sc_free_descs
++;
1222 if ((dcsr
& DCSR_BUSERRINTR
) || dmac_continue_xfer(sc
, dxs
) == 0) {
1224 * The transfer completed (possibly due to an error),
1225 * -OR- we were unable to continue any remaining
1226 * segment of the transfer due to a lack of descriptors.
1228 * In either case, we have to free up DMAC resources
1229 * allocated to the request.
1231 sc
->sc_active
[channel
] = NULL
;
1232 dmac_free_channel(sc
, DMAC_PRI(dxs
->dxs_priority
), channel
);
1233 dxs
->dxs_channel
= DMAC_NO_CHANNEL
;
1234 if (dxs
->dxs_peripheral
!= DMAC_PERIPH_NONE
)
1235 dmac_reg_write(sc
, DMAC_DRCMR(dxs
->dxs_peripheral
), 0);
1237 if (dxs
->dxs_segs
[DMAC_DESC_SRC
].ds_nsegs
== 0 ||
1238 dxs
->dxs_segs
[DMAC_DESC_DST
].ds_nsegs
== 0 ||
1239 (dcsr
& DCSR_BUSERRINTR
)) {
1242 * The transfer is complete.
1244 dxs
->dxs_queue
= NULL
;
1245 rv
= 1u << DMAC_PRI(dxs
->dxs_priority
);
1247 if (dxs
->dxs_peripheral
!= DMAC_PERIPH_NONE
&&
1248 --sc
->sc_periph
[dxs
->dxs_peripheral
].sp_busy
!= 0) {
1249 struct dmac_xfer_state
*ndxs
;
1251 * We've just removed the current item for this
1252 * peripheral, and there is at least one more
1253 * pending item waiting. Make it current.
1255 ndxs
= SIMPLEQ_FIRST(
1256 &sc
->sc_periph
[dxs
->dxs_peripheral
].sp_queue
);
1257 KDASSERT(ndxs
!= NULL
);
1258 SIMPLEQ_REMOVE_HEAD(
1259 &sc
->sc_periph
[dxs
->dxs_peripheral
].sp_queue
,
1263 &sc
->sc_queue
[DMAC_PRI(dxs
->dxs_priority
)];
1264 SIMPLEQ_INSERT_TAIL(ndxs
->dxs_queue
, ndxs
,
1268 (dxs
->dxs_done
)(&dxs
->dxs_xfer
,
1269 (dcsr
& DCSR_BUSERRINTR
) ? EFAULT
: 0);
1272 * The request is not yet complete, but we were unable
1273 * to make any headway at this time because there are
1274 * no free descriptors. Put the request back at the
1275 * head of the appropriate priority queue. It'll be
1276 * dealt with as other in-progress transfers complete.
1278 SIMPLEQ_INSERT_HEAD(
1279 &sc
->sc_queue
[DMAC_PRI(dxs
->dxs_priority
)], dxs
,
1288 dmac_intr(void *arg
)
1290 struct pxadmac_softc
*sc
= arg
;
1294 rv
= dmac_reg_read(sc
, DMAC_DINT
);
1295 if ((rv
& DMAC_DINT_MASK
) == 0)
1299 * Deal with completed transfers
1301 for (chan
= 0, mask
= 1u, pri
= 0;
1302 chan
< DMAC_N_CHANNELS
; chan
++, mask
<<= 1) {
1304 pri
|= dmac_channel_intr(sc
, chan
);
1308 * Now try to start any queued transfers
1310 #if (DMAC_N_PRIORITIES > 1)
1311 if (pri
& (1u << DMAC_PRIORITY_HIGH
))
1312 dmac_start(sc
, DMAC_PRIORITY_HIGH
);
1313 if (pri
& (1u << DMAC_PRIORITY_MED
))
1314 dmac_start(sc
, DMAC_PRIORITY_MED
);
1315 if (pri
& (1u << DMAC_PRIORITY_LOW
))
1316 dmac_start(sc
, DMAC_PRIORITY_LOW
);
1319 dmac_start(sc
, DMAC_PRIORITY_NORMAL
);