1 /* $NetBSD: bus_dma.c,v 1.19 2009/03/14 21:04:14 dsl Exp $ */
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.19 2009/03/14 21:04:14 dsl Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
47 #include <uvm/uvm_extern.h>
49 #define _POWERPC_BUS_DMA_PRIVATE
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52 #include <machine/cpu.h> /* for CACHELINESIZE */
56 # define DPRINTF(x) do { if (busdmadebug) printf x ; } while (0)
62 invaldcache(vaddr_t va
, bus_size_t sz
)
66 DPRINTF(("invaldcache: %#lx %ld\n", va
, (long) sz
));
69 __asm
volatile("eieio;");
70 off
= (u_int
)va
& (curcpu()->ci_ci
.dcache_line_size
- 1);
74 __asm
volatile("dcbi 0, %0;" :: "r"(va
));
75 va
+= curcpu()->ci_ci
.dcache_line_size
;
76 sz
-= curcpu()->ci_ci
.dcache_line_size
;
78 __asm
volatile("sync;");
82 flushdcache(vaddr_t va
, bus_size_t sz
)
86 DPRINTF(("flushdcache: %#lx %ld\n", va
, (long) sz
));
89 __asm
volatile("eieio;");
90 off
= (u_int
)va
& (curcpu()->ci_ci
.dcache_line_size
- 1);
93 while ((int)sz
> curcpu()->ci_ci
.dcache_line_size
) {
94 __asm
volatile("dcbf 0, %0;" :: "r"(va
));
95 va
+= curcpu()->ci_ci
.dcache_line_size
;
96 sz
-= curcpu()->ci_ci
.dcache_line_size
;
100 * eieio ensures the last cache line flushed is ordered last
101 * read-after-write ensures last cache line
102 * (and therefore all cache lines) made it to memory
104 __asm
volatile("eieio; dcbf 0, %0;" :: "r"(va
));
105 __asm
volatile("lwz %0,0(%0); sync;" : "+r"(va
));
109 storedcache(vaddr_t va
, bus_size_t sz
)
113 DPRINTF(("storedcache: %#lx %ld\n", va
, (long) sz
));
116 __asm
volatile("eieio;");
117 off
= (u_int
)va
& (curcpu()->ci_ci
.dcache_line_size
- 1);
120 while ((int)sz
> 0) {
121 __asm
volatile("dcbst 0, %0;" :: "r"(va
));
122 va
+= curcpu()->ci_ci
.dcache_line_size
;
123 sz
-= curcpu()->ci_ci
.dcache_line_size
;
125 __asm
volatile("sync;");
128 int _bus_dmamap_load_buffer(bus_dma_tag_t
, bus_dmamap_t
, void *,
129 bus_size_t
, struct vmspace
*, int, paddr_t
*, int *, int);
132 * Common function for DMA map creation. May be called by bus-specific
133 * DMA map creation functions.
136 _bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
, bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
138 struct powerpc_bus_dmamap
*map
;
143 * Allocate and initialize the DMA map. The end of the map
144 * is a variable-sized array of segments, so we allocate enough
145 * room for them in one shot.
147 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
148 * of ALLOCNOW notifies others that we've reserved these resources,
149 * and they are not to be freed.
151 * The bus_dmamap_t includes one bus_dma_segment_t, hence
152 * the (nsegments - 1).
154 mapsize
= sizeof(struct powerpc_bus_dmamap
) +
155 (sizeof(bus_dma_segment_t
) * (nsegments
- 1));
156 if ((mapstore
= malloc(mapsize
, M_DMAMAP
,
157 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
)
160 memset(mapstore
, 0, mapsize
);
161 map
= (struct powerpc_bus_dmamap
*)mapstore
;
162 map
->_dm_size
= size
;
163 map
->_dm_segcnt
= nsegments
;
164 map
->_dm_maxmaxsegsz
= maxsegsz
;
165 map
->_dm_boundary
= boundary
;
166 map
->_dm_bounce_thresh
= t
->_bounce_thresh
;
167 map
->_dm_flags
= flags
& ~(BUS_DMA_WAITOK
|BUS_DMA_NOWAIT
);
168 map
->dm_maxsegsz
= maxsegsz
;
169 map
->dm_mapsize
= 0; /* no valid mappings */
177 * Common function for DMA map destruction. May be called by bus-specific
178 * DMA map destruction functions.
181 _bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
188 * Utility function to load a linear buffer. lastaddrp holds state
189 * between invocations (for multiple-buffer loads). segp contains
190 * the starting segment on entrance, and the ending segment on exit.
191 * first indicates if this is the first invocation of this function.
194 _bus_dmamap_load_buffer(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
, bus_size_t buflen
, struct vmspace
*vm
, int flags
, paddr_t
*lastaddrp
, int *segp
, int first
)
197 bus_addr_t curaddr
, lastaddr
, baddr
, bmask
;
198 vaddr_t vaddr
= (vaddr_t
)buf
;
201 lastaddr
= *lastaddrp
;
202 bmask
= ~(map
->_dm_boundary
- 1);
204 for (seg
= *segp
; buflen
> 0 ; ) {
206 * Get the physical address for this segment.
208 if (!VMSPACE_IS_KERNEL_P(vm
))
209 (void) pmap_extract(vm_map_pmap(&vm
->vm_map
),
210 vaddr
, (paddr_t
*)&curaddr
);
212 curaddr
= vtophys(vaddr
);
215 * If we're beyond the bounce threshold, notify
218 if (map
->_dm_bounce_thresh
!= 0 &&
219 curaddr
>= map
->_dm_bounce_thresh
)
223 * Compute the segment size, and adjust counts.
225 sgsize
= PAGE_SIZE
- ((u_long
)vaddr
& PGOFSET
);
230 * Make sure we don't cross any boundaries.
232 if (map
->_dm_boundary
> 0) {
233 baddr
= (curaddr
+ map
->_dm_boundary
) & bmask
;
234 if (sgsize
> (baddr
- curaddr
))
235 sgsize
= (baddr
- curaddr
);
239 * Insert chunk into a segment, coalescing with
240 * the previous segment if possible.
243 map
->dm_segs
[seg
].ds_addr
= curaddr
;
244 map
->dm_segs
[seg
].ds_len
= sgsize
;
245 map
->dm_segs
[seg
].ds_vaddr
= vaddr
;
248 if ((curaddr
== lastaddr
) &&
249 (vaddr
== map
->dm_segs
[seg
].ds_vaddr
+
250 (curaddr
- map
->dm_segs
[seg
].ds_addr
)) &&
251 ((map
->dm_segs
[seg
].ds_len
+ sgsize
) <=
253 ((map
->_dm_boundary
== 0) ||
254 ((map
->dm_segs
[seg
].ds_addr
& bmask
) ==
255 (curaddr
& bmask
)))) {
256 map
->dm_segs
[seg
].ds_len
+= sgsize
;
258 if (++seg
>= map
->_dm_segcnt
) {
260 panic("_bus_dmamap_load_buffer: "
261 "seg %d >= _dm_segcnt %d\n",
262 seg
, map
->_dm_segcnt
);
266 map
->dm_segs
[seg
].ds_addr
= curaddr
;
267 map
->dm_segs
[seg
].ds_len
= sgsize
;
268 map
->dm_segs
[seg
].ds_vaddr
= vaddr
;
272 lastaddr
= curaddr
+ sgsize
;
278 *lastaddrp
= lastaddr
;
284 return (EFBIG
); /* XXX better return value here? */
290 * Common function for loading a DMA map with a linear buffer. May
291 * be called by bus-specific DMA map load functions.
294 _bus_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
, bus_size_t buflen
, struct proc
*p
, int flags
)
301 * Make sure that on error condition we return "no valid mappings".
305 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
307 if (buflen
> map
->_dm_size
)
313 vm
= vmspace_kernel();
317 error
= _bus_dmamap_load_buffer(t
, map
, buf
, buflen
, vm
, flags
,
320 map
->dm_mapsize
= buflen
;
321 map
->dm_nsegs
= seg
+ 1;
327 * Like _bus_dmamap_load(), but for mbufs.
330 _bus_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
, struct mbuf
*m0
, int flags
)
333 int seg
, error
, first
;
337 * Make sure that on error condition we return "no valid mappings."
341 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
344 if ((m0
->m_flags
& M_PKTHDR
) == 0)
345 panic("_bus_dmamap_load_mbuf: no packet header");
348 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
354 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
357 error
= _bus_dmamap_load_buffer(t
, map
, m
->m_data
, m
->m_len
,
358 vmspace_kernel(), flags
, &lastaddr
, &seg
, first
);
362 map
->dm_mapsize
= m0
->m_pkthdr
.len
;
363 map
->dm_nsegs
= seg
+ 1;
369 * Like _bus_dmamap_load(), but for uios.
372 _bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
, int flags
)
375 int seg
, i
, error
, first
;
376 bus_size_t minlen
, resid
;
381 * Make sure that on error condition we return "no valid mappings."
385 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
387 resid
= uio
->uio_resid
;
393 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && error
== 0; i
++) {
395 * Now at the first iovec to load. Load each iovec
396 * until we have exhausted the residual count.
398 minlen
= resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
399 addr
= (void *)iov
[i
].iov_base
;
401 error
= _bus_dmamap_load_buffer(t
, map
, addr
, minlen
,
402 uio
->uio_vmspace
, flags
, &lastaddr
, &seg
, first
);
408 map
->dm_mapsize
= uio
->uio_resid
;
409 map
->dm_nsegs
= seg
+ 1;
415 * Like _bus_dmamap_load(), but for raw memory.
418 _bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
, bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
421 panic("_bus_dmamap_load_raw: not implemented");
425 * Common function for unloading a DMA map. May be called by
426 * chipset-specific DMA map unload functions.
429 _bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
432 * No resources to free; just mark the mappings as
435 map
->dm_maxsegsz
= map
->_dm_maxmaxsegsz
;
441 * DMA map synchronization, provides software coherency
444 _bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
, bus_size_t len
, int ops
)
450 DPRINTF(("_bus_dmamap_sync %p %p %#lx %ld %#x\n",
451 t
, map
, (unsigned long) offset
, (unsigned long) len
, ops
));
454 * Mixing PRE and POST operations is not allowed.
456 if (((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0) &&
457 ((ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0))
458 panic("_bus_dmamap_sync: mix PRE and POST");
461 if (offset
>= map
->dm_mapsize
)
462 panic("_bus_dmamap_sync: bad offset %#lx (map size is %#lx)",
463 (unsigned long) offset
, (unsigned long) map
->dm_mapsize
);
464 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
465 panic("_bus_dmamap_sync: bad length");
469 case BUS_DMASYNC_PREWRITE
:
470 for (i
=0; i
< map
->dm_nsegs
&& len
!= 0; i
++) {
471 /* find the beginning segment */
472 dslen
= map
->dm_segs
[i
].ds_len
;
473 if (offset
>= dslen
) {
481 storedcache(map
->dm_segs
[i
].ds_vaddr
+ offset
, minlen
);
486 case BUS_DMASYNC_PREREAD
:
487 for (i
=0; i
< map
->dm_nsegs
&& len
!= 0; i
++) {
490 /* find the beginning segment */
491 dslen
= map
->dm_segs
[i
].ds_len
;
492 if (offset
>= dslen
) {
500 va
= map
->dm_segs
[i
].ds_vaddr
+ offset
;
501 if (va
& (curcpu()->ci_ci
.dcache_line_size
-1))
504 if (va
& (curcpu()->ci_ci
.dcache_line_size
-1))
506 invaldcache(map
->dm_segs
[i
].ds_vaddr
+ offset
, minlen
);
511 case BUS_DMASYNC_POSTREAD
:
512 for (i
=0; i
< map
->dm_nsegs
&& len
!= 0; i
++) {
513 /* find the beginning segment */
514 dslen
= map
->dm_segs
[i
].ds_len
;
515 if (offset
>= dslen
) {
523 invaldcache(map
->dm_segs
[i
].ds_vaddr
+ offset
, minlen
);
528 case BUS_DMASYNC_POSTWRITE
:
530 case BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
:
531 for (i
=0; i
< map
->dm_nsegs
&& len
!= 0; i
++) {
532 /* find the beginning segment */
533 dslen
= map
->dm_segs
[i
].ds_len
;
534 if (offset
>= dslen
) {
542 flushdcache(map
->dm_segs
[i
].ds_vaddr
+ offset
, minlen
);
547 case BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
:
548 for (i
=0; i
< map
->dm_nsegs
&& len
!= 0; i
++) {
549 /* find the beginning segment */
550 dslen
= map
->dm_segs
[i
].ds_len
;
551 if (offset
>= dslen
) {
559 invaldcache(map
->dm_segs
[i
].ds_vaddr
+ offset
, minlen
);
568 * Common function for DMA-safe memory allocation. May be called
569 * by bus-specific DMA memory allocation functions.
572 _bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
, int flags
)
574 paddr_t avail_start
= 0xffffffff, avail_end
= 0;
575 paddr_t curaddr
, lastaddr
, high
;
578 int curseg
, error
, bank
;
580 for (bank
= 0; bank
< vm_nphysseg
; bank
++) {
581 if (avail_start
> vm_physmem
[bank
].avail_start
<< PGSHIFT
)
582 avail_start
= vm_physmem
[bank
].avail_start
<< PGSHIFT
;
583 if (avail_end
< vm_physmem
[bank
].avail_end
<< PGSHIFT
)
584 avail_end
= vm_physmem
[bank
].avail_end
<< PGSHIFT
;
588 /* Always round the size. */
589 size
= round_page(size
);
591 high
= avail_end
- PAGE_SIZE
;
594 * Allocate pages from the VM system.
597 error
= uvm_pglistalloc(size
, avail_start
, high
, alignment
, boundary
,
598 &mlist
, nsegs
, (flags
& BUS_DMA_NOWAIT
) == 0);
603 * Compute the location, size, and number of segments actually
604 * returned by the VM code.
608 lastaddr
= segs
[curseg
].ds_addr
= VM_PAGE_TO_PHYS(m
);
609 segs
[curseg
].ds_len
= PAGE_SIZE
;
610 segs
[curseg
].ds_vaddr
= (vaddr_t
)0xdeadbeef;
611 m
= m
->pageq
.queue
.tqe_next
;
613 for (; m
!= NULL
; m
= m
->pageq
.queue
.tqe_next
) {
614 curaddr
= VM_PAGE_TO_PHYS(m
);
616 if (curaddr
< avail_start
|| curaddr
>= high
) {
617 printf("vm_page_alloc_memory returned non-sensical"
618 " address 0x%lx\n", curaddr
);
619 panic("_bus_dmamem_alloc");
622 if (curaddr
== (lastaddr
+ PAGE_SIZE
))
623 segs
[curseg
].ds_len
+= PAGE_SIZE
;
626 segs
[curseg
].ds_addr
= curaddr
;
627 segs
[curseg
].ds_len
= PAGE_SIZE
;
628 segs
[curseg
].ds_vaddr
= (vaddr_t
)0xdeadbeef;
639 * Common function for freeing DMA-safe memory. May be called by
640 * bus-specific DMA memory free functions.
643 _bus_dmamem_free(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
)
651 * Build a list of pages to free back to the VM system.
654 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
655 for (addr
= segs
[curseg
].ds_addr
;
656 addr
< (segs
[curseg
].ds_addr
+ segs
[curseg
].ds_len
);
658 m
= PHYS_TO_VM_PAGE(addr
);
659 TAILQ_INSERT_TAIL(&mlist
, m
, pageq
.queue
);
663 uvm_pglistfree(&mlist
);
667 * Common function for mapping DMA-safe memory. May be called by
668 * bus-specific DMA memory map functions.
671 _bus_dmamem_map(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
, size_t size
, void **kvap
, int flags
)
676 const uvm_flag_t kmflags
=
677 (flags
& BUS_DMA_NOWAIT
) != 0 ? UVM_KMF_NOWAIT
: 0;
679 size
= round_page(size
);
681 va
= uvm_km_alloc(kernel_map
, size
, 0, UVM_KMF_VAONLY
| kmflags
);
688 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
689 for (addr
= segs
[curseg
].ds_addr
;
690 addr
< (segs
[curseg
].ds_addr
+ segs
[curseg
].ds_len
);
691 addr
+= PAGE_SIZE
, va
+= PAGE_SIZE
, size
-= PAGE_SIZE
) {
693 panic("_bus_dmamem_map: size botch");
694 pmap_enter(pmap_kernel(), va
, addr
,
695 VM_PROT_READ
| VM_PROT_WRITE
,
696 VM_PROT_READ
| VM_PROT_WRITE
| PMAP_WIRED
|
697 (flags
& BUS_DMA_COHERENT
? PMAP_NC
: 0));
705 * Common function for unmapping DMA-safe memory. May be called by
706 * bus-specific DMA memory unmapping functions.
709 _bus_dmamem_unmap(bus_dma_tag_t t
, void *kva
, size_t size
)
713 if ((u_long
)kva
& PGOFSET
)
714 panic("_bus_dmamem_unmap");
717 size
= round_page(size
);
719 pmap_remove(pmap_kernel(), (vaddr_t
)kva
, (vaddr_t
)kva
+ size
);
720 pmap_update(pmap_kernel());
721 uvm_km_free(kernel_map
, (vaddr_t
)kva
, size
, UVM_KMF_VAONLY
);
725 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
726 * bus-specific DMA mmap(2)'ing functions.
729 _bus_dmamem_mmap(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
, off_t off
, int prot
, int flags
)
733 for (i
= 0; i
< nsegs
; i
++) {
736 panic("_bus_dmamem_mmap: offset unaligned");
737 if (segs
[i
].ds_addr
& PGOFSET
)
738 panic("_bus_dmamem_mmap: segment unaligned");
739 if (segs
[i
].ds_len
& PGOFSET
)
740 panic("_bus_dmamem_mmap: segment size not multiple"
743 if (off
>= segs
[i
].ds_len
) {
744 off
-= segs
[i
].ds_len
;
748 return (segs
[i
].ds_addr
+ off
);
751 /* Page not found. */