1 /* $NetBSD: bus_dma.c,v 1.51 2008/04/28 20:23:13 martin Exp $ */
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #define _ARM32_BUS_DMA_PRIVATE
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.51 2008/04/28 20:23:13 martin Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
43 #include <sys/reboot.h>
46 #include <sys/malloc.h>
48 #include <sys/vnode.h>
49 #include <sys/device.h>
51 #include <uvm/uvm_extern.h>
53 #include <machine/bus.h>
54 #include <machine/cpu.h>
56 #include <arm/cpufunc.h>
58 int _bus_dmamap_load_buffer(bus_dma_tag_t
, bus_dmamap_t
, void *,
59 bus_size_t
, struct vmspace
*, int);
60 struct arm32_dma_range
*_bus_dma_inrange(struct arm32_dma_range
*,
64 * Check to see if the specified page is in an allowed DMA range.
66 inline struct arm32_dma_range
*
67 _bus_dma_inrange(struct arm32_dma_range
*ranges
, int nranges
,
70 struct arm32_dma_range
*dr
;
73 for (i
= 0, dr
= ranges
; i
< nranges
; i
++, dr
++) {
74 if (curaddr
>= dr
->dr_sysbase
&&
75 round_page(curaddr
) <= (dr
->dr_sysbase
+ dr
->dr_len
))
83 * Common function to load the specified physical address into the
84 * DMA map, coalescing segments and boundary checking as necessary.
87 _bus_dmamap_load_paddr(bus_dma_tag_t t
, bus_dmamap_t map
,
88 bus_addr_t paddr
, bus_size_t size
)
90 bus_dma_segment_t
* const segs
= map
->dm_segs
;
91 int nseg
= map
->dm_nsegs
;
92 bus_addr_t lastaddr
= 0xdead; /* XXX gcc */
93 bus_addr_t bmask
= ~(map
->_dm_boundary
- 1);
98 lastaddr
= segs
[nseg
-1].ds_addr
+ segs
[nseg
-1].ds_len
;
102 /* Make sure we're in an allowed DMA range. */
103 if (t
->_ranges
!= NULL
) {
104 /* XXX cache last result? */
105 const struct arm32_dma_range
* const dr
=
106 _bus_dma_inrange(t
->_ranges
, t
->_nranges
, paddr
);
111 * In a valid DMA range. Translate the physical
112 * memory address to an address in the DMA window.
114 curaddr
= (paddr
- dr
->dr_sysbase
) + dr
->dr_busbase
;
119 * Make sure we don't cross any boundaries.
121 if (map
->_dm_boundary
> 0) {
122 bus_addr_t baddr
; /* next boundary address */
124 baddr
= (curaddr
+ map
->_dm_boundary
) & bmask
;
125 if (sgsize
> (baddr
- curaddr
))
126 sgsize
= (baddr
- curaddr
);
130 * Insert chunk into a segment, coalescing with the
131 * previous segment if possible.
133 if (nseg
> 0 && curaddr
== lastaddr
&&
134 segs
[nseg
-1].ds_len
+ sgsize
<= map
->dm_maxsegsz
&&
135 (map
->_dm_boundary
== 0 ||
136 (segs
[nseg
-1].ds_addr
& bmask
) == (curaddr
& bmask
))) {
138 segs
[nseg
-1].ds_len
+= sgsize
;
139 } else if (nseg
>= map
->_dm_segcnt
) {
143 segs
[nseg
].ds_addr
= curaddr
;
144 segs
[nseg
].ds_len
= sgsize
;
148 lastaddr
= curaddr
+ sgsize
;
155 map
->dm_nsegs
= nseg
;
160 * Common function for DMA map creation. May be called by bus-specific
161 * DMA map creation functions.
164 _bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
,
165 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
167 struct arm32_bus_dmamap
*map
;
172 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
173 t
, size
, nsegments
, maxsegsz
, boundary
, flags
);
174 #endif /* DEBUG_DMA */
177 * Allocate and initialize the DMA map. The end of the map
178 * is a variable-sized array of segments, so we allocate enough
179 * room for them in one shot.
181 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
182 * of ALLOCNOW notifies others that we've reserved these resources,
183 * and they are not to be freed.
185 * The bus_dmamap_t includes one bus_dma_segment_t, hence
186 * the (nsegments - 1).
188 mapsize
= sizeof(struct arm32_bus_dmamap
) +
189 (sizeof(bus_dma_segment_t
) * (nsegments
- 1));
190 if ((mapstore
= malloc(mapsize
, M_DMAMAP
,
191 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
)
194 memset(mapstore
, 0, mapsize
);
195 map
= (struct arm32_bus_dmamap
*)mapstore
;
196 map
->_dm_size
= size
;
197 map
->_dm_segcnt
= nsegments
;
198 map
->_dm_maxmaxsegsz
= maxsegsz
;
199 map
->_dm_boundary
= boundary
;
200 map
->_dm_flags
= flags
& ~(BUS_DMA_WAITOK
|BUS_DMA_NOWAIT
);
201 map
->_dm_origbuf
= NULL
;
202 map
->_dm_buftype
= ARM32_BUFTYPE_INVALID
;
203 map
->_dm_vmspace
= vmspace_kernel();
204 map
->dm_maxsegsz
= maxsegsz
;
205 map
->dm_mapsize
= 0; /* no valid mappings */
210 printf("dmamap_create:map=%p\n", map
);
211 #endif /* DEBUG_DMA */
216 * Common function for DMA map destruction. May be called by bus-specific
217 * DMA map destruction functions.
220 _bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
224 printf("dmamap_destroy: t=%p map=%p\n", t
, map
);
225 #endif /* DEBUG_DMA */
230 map
->dm_maxsegsz
= map
->_dm_maxmaxsegsz
;
233 map
->_dm_origbuf
= NULL
;
234 map
->_dm_buftype
= ARM32_BUFTYPE_INVALID
;
235 map
->_dm_vmspace
= NULL
;
241 * Common function for loading a DMA map with a linear buffer. May
242 * be called by bus-specific DMA map load functions.
245 _bus_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
,
246 bus_size_t buflen
, struct proc
*p
, int flags
)
252 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
253 t
, map
, buf
, buflen
, p
, flags
);
254 #endif /* DEBUG_DMA */
257 * Make sure that on error condition we return "no valid mappings".
261 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
263 if (buflen
> map
->_dm_size
)
269 vm
= vmspace_kernel();
272 /* _bus_dmamap_load_buffer() clears this if we're not... */
273 map
->_dm_flags
|= ARM32_DMAMAP_COHERENT
;
275 error
= _bus_dmamap_load_buffer(t
, map
, buf
, buflen
, vm
, flags
);
277 map
->dm_mapsize
= buflen
;
278 map
->_dm_origbuf
= buf
;
279 map
->_dm_buftype
= ARM32_BUFTYPE_LINEAR
;
280 map
->_dm_vmspace
= vm
;
283 printf("dmamap_load: error=%d\n", error
);
284 #endif /* DEBUG_DMA */
289 * Like _bus_dmamap_load(), but for mbufs.
292 _bus_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
, struct mbuf
*m0
,
299 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
301 #endif /* DEBUG_DMA */
304 * Make sure that on error condition we return "no valid mappings."
308 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
311 if ((m0
->m_flags
& M_PKTHDR
) == 0)
312 panic("_bus_dmamap_load_mbuf: no packet header");
313 #endif /* DIAGNOSTIC */
315 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
319 * Mbuf chains should almost never have coherent (i.e.
320 * un-cached) mappings, so clear that flag now.
322 map
->_dm_flags
&= ~ARM32_DMAMAP_COHERENT
;
325 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
328 const struct vm_page
* const *pgs
;
334 switch (m
->m_flags
& (M_EXT
|M_CLUSTER
|M_EXT_PAGES
)) {
335 case M_EXT
|M_CLUSTER
:
337 KASSERT(m
->m_ext
.ext_paddr
!= M_PADDR_INVALID
);
338 paddr
= m
->m_ext
.ext_paddr
+
339 (m
->m_data
- m
->m_ext
.ext_buf
);
341 error
= _bus_dmamap_load_paddr(t
, map
, paddr
, size
);
344 case M_EXT
|M_EXT_PAGES
:
345 KASSERT(m
->m_ext
.ext_buf
<= m
->m_data
);
347 m
->m_ext
.ext_buf
+ m
->m_ext
.ext_size
);
349 offset
= (vaddr_t
)m
->m_data
-
350 trunc_page((vaddr_t
)m
->m_ext
.ext_buf
);
351 remainbytes
= m
->m_len
;
353 /* skip uninteresting pages */
354 pgs
= (const struct vm_page
* const *)
355 m
->m_ext
.ext_pgs
+ (offset
>> PAGE_SHIFT
);
357 offset
&= PAGE_MASK
; /* offset in the first page */
360 while (remainbytes
> 0) {
361 const struct vm_page
*pg
;
363 size
= MIN(remainbytes
, PAGE_SIZE
- offset
);
367 paddr
= VM_PAGE_TO_PHYS(pg
) + offset
;
369 error
= _bus_dmamap_load_paddr(t
, map
,
379 paddr
= m
->m_paddr
+ M_BUFOFFSET(m
) +
380 (m
->m_data
- M_BUFADDR(m
));
382 error
= _bus_dmamap_load_paddr(t
, map
, paddr
, size
);
386 error
= _bus_dmamap_load_buffer(t
, map
, m
->m_data
,
387 m
->m_len
, vmspace_kernel(), flags
);
391 map
->dm_mapsize
= m0
->m_pkthdr
.len
;
392 map
->_dm_origbuf
= m0
;
393 map
->_dm_buftype
= ARM32_BUFTYPE_MBUF
;
394 map
->_dm_vmspace
= vmspace_kernel(); /* always kernel */
397 printf("dmamap_load_mbuf: error=%d\n", error
);
398 #endif /* DEBUG_DMA */
403 * Like _bus_dmamap_load(), but for uios.
406 _bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
,
410 bus_size_t minlen
, resid
;
415 * Make sure that on error condition we return "no valid mappings."
419 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
421 resid
= uio
->uio_resid
;
424 /* _bus_dmamap_load_buffer() clears this if we're not... */
425 map
->_dm_flags
|= ARM32_DMAMAP_COHERENT
;
428 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && error
== 0; i
++) {
430 * Now at the first iovec to load. Load each iovec
431 * until we have exhausted the residual count.
433 minlen
= resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
434 addr
= (void *)iov
[i
].iov_base
;
436 error
= _bus_dmamap_load_buffer(t
, map
, addr
, minlen
,
437 uio
->uio_vmspace
, flags
);
442 map
->dm_mapsize
= uio
->uio_resid
;
443 map
->_dm_origbuf
= uio
;
444 map
->_dm_buftype
= ARM32_BUFTYPE_UIO
;
445 map
->_dm_vmspace
= uio
->uio_vmspace
;
451 * Like _bus_dmamap_load(), but for raw memory allocated with
452 * bus_dmamem_alloc().
455 _bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
,
456 bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
459 panic("_bus_dmamap_load_raw: not implemented");
463 * Common function for unloading a DMA map. May be called by
464 * bus-specific DMA map unload functions.
467 _bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
471 printf("dmamap_unload: t=%p map=%p\n", t
, map
);
472 #endif /* DEBUG_DMA */
475 * No resources to free; just mark the mappings as
480 map
->_dm_origbuf
= NULL
;
481 map
->_dm_buftype
= ARM32_BUFTYPE_INVALID
;
482 map
->_dm_vmspace
= NULL
;
486 _bus_dmamap_sync_linear(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
487 bus_size_t len
, int ops
)
489 vaddr_t addr
= (vaddr_t
) map
->_dm_origbuf
;
494 case BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
:
495 cpu_dcache_wbinv_range(addr
, len
);
498 case BUS_DMASYNC_PREREAD
:
499 if (((addr
| len
) & arm_dcache_align_mask
) == 0)
500 cpu_dcache_inv_range(addr
, len
);
502 cpu_dcache_wbinv_range(addr
, len
);
505 case BUS_DMASYNC_PREWRITE
:
506 cpu_dcache_wb_range(addr
, len
);
512 _bus_dmamap_sync_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
513 bus_size_t len
, int ops
)
515 struct mbuf
*m
, *m0
= map
->_dm_origbuf
;
516 bus_size_t minlen
, moff
;
519 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0; m
= m
->m_next
) {
520 /* Find the beginning mbuf. */
521 if (moff
>= m
->m_len
) {
527 * Now at the first mbuf to sync; nail each one until
528 * we have exhausted the length.
530 minlen
= m
->m_len
- moff
;
534 maddr
= mtod(m
, vaddr_t
);
538 * We can save a lot of work here if we know the mapping
539 * is read-only at the MMU:
541 * If a mapping is read-only, no dirty cache blocks will
542 * exist for it. If a writable mapping was made read-only,
543 * we know any dirty cache lines for the range will have
544 * been cleaned for us already. Therefore, if the upper
545 * layer can tell us we have a read-only mapping, we can
546 * skip all cache cleaning.
548 * NOTE: This only works if we know the pmap cleans pages
549 * before making a read-write -> read-only transition. If
550 * this ever becomes non-true (e.g. Physically Indexed
551 * cache), this will have to be revisited.
554 case BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
:
556 cpu_dcache_wbinv_range(maddr
, minlen
);
559 /* else FALLTHROUGH */
561 case BUS_DMASYNC_PREREAD
:
562 if (((maddr
| minlen
) & arm_dcache_align_mask
) == 0)
563 cpu_dcache_inv_range(maddr
, minlen
);
565 cpu_dcache_wbinv_range(maddr
, minlen
);
568 case BUS_DMASYNC_PREWRITE
:
570 cpu_dcache_wb_range(maddr
, minlen
);
579 _bus_dmamap_sync_uio(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
580 bus_size_t len
, int ops
)
582 struct uio
*uio
= map
->_dm_origbuf
;
584 bus_size_t minlen
, ioff
;
587 for (iov
= uio
->uio_iov
, ioff
= offset
; len
!= 0; iov
++) {
588 /* Find the beginning iovec. */
589 if (ioff
>= iov
->iov_len
) {
590 ioff
-= iov
->iov_len
;
595 * Now at the first iovec to sync; nail each one until
596 * we have exhausted the length.
598 minlen
= iov
->iov_len
- ioff
;
602 addr
= (vaddr_t
) iov
->iov_base
;
606 case BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
:
607 cpu_dcache_wbinv_range(addr
, minlen
);
610 case BUS_DMASYNC_PREREAD
:
611 if (((addr
| minlen
) & arm_dcache_align_mask
) == 0)
612 cpu_dcache_inv_range(addr
, minlen
);
614 cpu_dcache_wbinv_range(addr
, minlen
);
617 case BUS_DMASYNC_PREWRITE
:
618 cpu_dcache_wb_range(addr
, minlen
);
627 * Common function for DMA map synchronization. May be called
628 * by bus-specific DMA map synchronization functions.
630 * This version works for the Virtually Indexed Virtually Tagged
631 * cache found on 32-bit ARM processors.
633 * XXX Should have separate versions for write-through vs.
634 * XXX write-back caches. We currently assume write-back
635 * XXX here, which is not as efficient as it could be for
636 * XXX the write-through case.
639 _bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
640 bus_size_t len
, int ops
)
644 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
645 t
, map
, offset
, len
, ops
);
646 #endif /* DEBUG_DMA */
649 * Mixing of PRE and POST operations is not allowed.
651 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
652 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
653 panic("_bus_dmamap_sync: mix PRE and POST");
656 if (offset
>= map
->dm_mapsize
)
657 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
658 offset
, map
->dm_mapsize
);
659 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
660 panic("_bus_dmamap_sync: bad length");
664 * For a virtually-indexed write-back cache, we need
665 * to do the following things:
667 * PREREAD -- Invalidate the D-cache. We do this
668 * here in case a write-back is required by the back-end.
670 * PREWRITE -- Write-back the D-cache. Note that if
671 * we are doing a PREREAD|PREWRITE, we can collapse
672 * the whole thing into a single Wb-Inv.
674 * POSTREAD -- Nothing.
676 * POSTWRITE -- Nothing.
679 ops
&= (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
683 /* Skip cache frobbing if mapping was COHERENT. */
684 if (map
->_dm_flags
& ARM32_DMAMAP_COHERENT
) {
685 /* Drain the write buffer. */
686 cpu_drain_writebuf();
691 * If the mapping belongs to a non-kernel vmspace, and the
692 * vmspace has not been active since the last time a full
693 * cache flush was performed, we don't need to do anything.
695 if (__predict_false(!VMSPACE_IS_KERNEL_P(map
->_dm_vmspace
) &&
696 vm_map_pmap(&map
->_dm_vmspace
->vm_map
)->pm_cstate
.cs_cache_d
== 0))
699 switch (map
->_dm_buftype
) {
700 case ARM32_BUFTYPE_LINEAR
:
701 _bus_dmamap_sync_linear(t
, map
, offset
, len
, ops
);
704 case ARM32_BUFTYPE_MBUF
:
705 _bus_dmamap_sync_mbuf(t
, map
, offset
, len
, ops
);
708 case ARM32_BUFTYPE_UIO
:
709 _bus_dmamap_sync_uio(t
, map
, offset
, len
, ops
);
712 case ARM32_BUFTYPE_RAW
:
713 panic("_bus_dmamap_sync: ARM32_BUFTYPE_RAW");
716 case ARM32_BUFTYPE_INVALID
:
717 panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID");
721 printf("unknown buffer type %d\n", map
->_dm_buftype
);
722 panic("_bus_dmamap_sync");
725 /* Drain the write buffer. */
726 cpu_drain_writebuf();
730 * Common function for DMA-safe memory allocation. May be called
731 * by bus-specific DMA memory allocation functions.
734 extern paddr_t physical_start
;
735 extern paddr_t physical_end
;
738 _bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
,
739 bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
,
742 struct arm32_dma_range
*dr
;
746 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
747 "segs=%p nsegs=%x rsegs=%p flags=%x\n", t
, size
, alignment
,
748 boundary
, segs
, nsegs
, rsegs
, flags
);
751 if ((dr
= t
->_ranges
) != NULL
) {
753 for (i
= 0; i
< t
->_nranges
; i
++, dr
++) {
756 error
= _bus_dmamem_alloc_range(t
, size
, alignment
,
757 boundary
, segs
, nsegs
, rsegs
, flags
,
758 trunc_page(dr
->dr_sysbase
),
759 trunc_page(dr
->dr_sysbase
+ dr
->dr_len
));
764 error
= _bus_dmamem_alloc_range(t
, size
, alignment
, boundary
,
765 segs
, nsegs
, rsegs
, flags
, trunc_page(physical_start
),
766 trunc_page(physical_end
));
770 printf("dmamem_alloc: =%d\n", error
);
777 * Common function for freeing DMA-safe memory. May be called by
778 * bus-specific DMA memory free functions.
781 _bus_dmamem_free(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
)
789 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t
, segs
, nsegs
);
790 #endif /* DEBUG_DMA */
793 * Build a list of pages to free back to the VM system.
796 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
797 for (addr
= segs
[curseg
].ds_addr
;
798 addr
< (segs
[curseg
].ds_addr
+ segs
[curseg
].ds_len
);
800 m
= PHYS_TO_VM_PAGE(addr
);
801 TAILQ_INSERT_TAIL(&mlist
, m
, pageq
.queue
);
804 uvm_pglistfree(&mlist
);
808 * Common function for mapping DMA-safe memory. May be called by
809 * bus-specific DMA memory map functions.
812 _bus_dmamem_map(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
,
813 size_t size
, void **kvap
, int flags
)
818 pt_entry_t
*ptep
/*, pte*/;
819 const uvm_flag_t kmflags
=
820 (flags
& BUS_DMA_NOWAIT
) != 0 ? UVM_KMF_NOWAIT
: 0;
823 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t
,
824 segs
, nsegs
, (unsigned long)size
, flags
);
825 #endif /* DEBUG_DMA */
827 size
= round_page(size
);
828 va
= uvm_km_alloc(kernel_map
, size
, 0, UVM_KMF_VAONLY
| kmflags
);
835 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
836 for (addr
= segs
[curseg
].ds_addr
;
837 addr
< (segs
[curseg
].ds_addr
+ segs
[curseg
].ds_len
);
838 addr
+= PAGE_SIZE
, va
+= PAGE_SIZE
, size
-= PAGE_SIZE
) {
840 printf("wiring p%lx to v%lx", addr
, va
);
841 #endif /* DEBUG_DMA */
843 panic("_bus_dmamem_map: size botch");
844 pmap_enter(pmap_kernel(), va
, addr
,
845 VM_PROT_READ
| VM_PROT_WRITE
,
846 VM_PROT_READ
| VM_PROT_WRITE
| PMAP_WIRED
);
848 * If the memory must remain coherent with the
849 * cache then we must make the memory uncacheable
850 * in order to maintain virtual cache coherency.
851 * We must also guarantee the cache does not already
852 * contain the virtal addresses we are making
855 if (flags
& BUS_DMA_COHERENT
) {
856 cpu_dcache_wbinv_range(va
, PAGE_SIZE
);
857 cpu_drain_writebuf();
859 *ptep
&= ~L2_S_CACHE_MASK
;
865 printf(" pte=v%p *pte=%x\n", ptep
, *ptep
);
866 #endif /* DEBUG_DMA */
869 pmap_update(pmap_kernel());
871 printf("dmamem_map: =%p\n", *kvap
);
872 #endif /* DEBUG_DMA */
877 * Common function for unmapping DMA-safe memory. May be called by
878 * bus-specific DMA memory unmapping functions.
881 _bus_dmamem_unmap(bus_dma_tag_t t
, void *kva
, size_t size
)
885 printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t
, kva
,
886 (unsigned long)size
);
887 #endif /* DEBUG_DMA */
889 if ((u_long
)kva
& PGOFSET
)
890 panic("_bus_dmamem_unmap");
891 #endif /* DIAGNOSTIC */
893 size
= round_page(size
);
894 pmap_remove(pmap_kernel(), (vaddr_t
)kva
, (vaddr_t
)kva
+ size
);
895 pmap_update(pmap_kernel());
896 uvm_km_free(kernel_map
, (vaddr_t
)kva
, size
, UVM_KMF_VAONLY
);
900 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
901 * bus-specific DMA mmap(2)'ing functions.
904 _bus_dmamem_mmap(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
,
905 off_t off
, int prot
, int flags
)
909 for (i
= 0; i
< nsegs
; i
++) {
912 panic("_bus_dmamem_mmap: offset unaligned");
913 if (segs
[i
].ds_addr
& PGOFSET
)
914 panic("_bus_dmamem_mmap: segment unaligned");
915 if (segs
[i
].ds_len
& PGOFSET
)
916 panic("_bus_dmamem_mmap: segment size not multiple"
918 #endif /* DIAGNOSTIC */
919 if (off
>= segs
[i
].ds_len
) {
920 off
-= segs
[i
].ds_len
;
924 return (arm_btop((u_long
)segs
[i
].ds_addr
+ off
));
927 /* Page not found. */
931 /**********************************************************************
932 * DMA utility functions
933 **********************************************************************/
936 * Utility function to load a linear buffer. lastaddrp holds state
937 * between invocations (for multiple-buffer loads). segp contains
938 * the starting segment on entrace, and the ending segment on exit.
939 * first indicates if this is the first invocation of this function.
942 _bus_dmamap_load_buffer(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
,
943 bus_size_t buflen
, struct vmspace
*vm
, int flags
)
947 vaddr_t vaddr
= (vaddr_t
)buf
;
955 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n",
957 #endif /* DEBUG_DMA */
959 pmap
= vm_map_pmap(&vm
->vm_map
);
963 * Get the physical address for this segment.
965 * XXX Don't support checking for coherent mappings
966 * XXX in user address space.
968 if (__predict_true(pmap
== pmap_kernel())) {
969 (void) pmap_get_pde_pte(pmap
, vaddr
, &pde
, &ptep
);
970 if (__predict_false(pmap_pde_section(pde
))) {
971 curaddr
= (*pde
& L1_S_FRAME
) |
972 (vaddr
& L1_S_OFFSET
);
973 if (*pde
& L1_S_CACHE_MASK
) {
975 ~ARM32_DMAMAP_COHERENT
;
979 KDASSERT((pte
& L2_TYPE_MASK
) != L2_TYPE_INV
);
980 if (__predict_false((pte
& L2_TYPE_MASK
)
982 curaddr
= (pte
& L2_L_FRAME
) |
983 (vaddr
& L2_L_OFFSET
);
984 if (pte
& L2_L_CACHE_MASK
) {
986 ~ARM32_DMAMAP_COHERENT
;
989 curaddr
= (pte
& L2_S_FRAME
) |
990 (vaddr
& L2_S_OFFSET
);
991 if (pte
& L2_S_CACHE_MASK
) {
993 ~ARM32_DMAMAP_COHERENT
;
998 (void) pmap_extract(pmap
, vaddr
, &curaddr
);
999 map
->_dm_flags
&= ~ARM32_DMAMAP_COHERENT
;
1003 * Compute the segment size, and adjust counts.
1005 sgsize
= PAGE_SIZE
- ((u_long
)vaddr
& PGOFSET
);
1006 if (buflen
< sgsize
)
1009 error
= _bus_dmamap_load_paddr(t
, map
, curaddr
, sgsize
);
1021 * Allocate physical memory from the given physical address range.
1022 * Called by DMA-safe memory allocation methods.
1025 _bus_dmamem_alloc_range(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
,
1026 bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
,
1027 int flags
, paddr_t low
, paddr_t high
)
1029 paddr_t curaddr
, lastaddr
;
1031 struct pglist mlist
;
1035 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
1036 t
, size
, alignment
, boundary
, segs
, nsegs
, rsegs
, flags
, low
, high
);
1037 #endif /* DEBUG_DMA */
1039 /* Always round the size. */
1040 size
= round_page(size
);
1043 * Allocate pages from the VM system.
1045 error
= uvm_pglistalloc(size
, low
, high
, alignment
, boundary
,
1046 &mlist
, nsegs
, (flags
& BUS_DMA_NOWAIT
) == 0);
1051 * Compute the location, size, and number of segments actually
1052 * returned by the VM code.
1054 m
= TAILQ_FIRST(&mlist
);
1056 lastaddr
= segs
[curseg
].ds_addr
= VM_PAGE_TO_PHYS(m
);
1057 segs
[curseg
].ds_len
= PAGE_SIZE
;
1059 printf("alloc: page %lx\n", lastaddr
);
1060 #endif /* DEBUG_DMA */
1061 m
= TAILQ_NEXT(m
, pageq
.queue
);
1063 for (; m
!= NULL
; m
= TAILQ_NEXT(m
, pageq
.queue
)) {
1064 curaddr
= VM_PAGE_TO_PHYS(m
);
1066 if (curaddr
< low
|| curaddr
>= high
) {
1067 printf("uvm_pglistalloc returned non-sensical"
1068 " address 0x%lx\n", curaddr
);
1069 panic("_bus_dmamem_alloc_range");
1071 #endif /* DIAGNOSTIC */
1073 printf("alloc: page %lx\n", curaddr
);
1074 #endif /* DEBUG_DMA */
1075 if (curaddr
== (lastaddr
+ PAGE_SIZE
))
1076 segs
[curseg
].ds_len
+= PAGE_SIZE
;
1079 segs
[curseg
].ds_addr
= curaddr
;
1080 segs
[curseg
].ds_len
= PAGE_SIZE
;
1085 *rsegs
= curseg
+ 1;
1091 * Check if a memory region intersects with a DMA range, and return the
1092 * page-rounded intersection if it does.
1095 arm32_dma_range_intersect(struct arm32_dma_range
*ranges
, int nranges
,
1096 paddr_t pa
, psize_t size
, paddr_t
*pap
, psize_t
*sizep
)
1098 struct arm32_dma_range
*dr
;
1104 for (i
= 0, dr
= ranges
; i
< nranges
; i
++, dr
++) {
1105 if (dr
->dr_sysbase
<= pa
&&
1106 pa
< (dr
->dr_sysbase
+ dr
->dr_len
)) {
1108 * Beginning of region intersects with this range.
1110 *pap
= trunc_page(pa
);
1111 *sizep
= round_page(min(pa
+ size
,
1112 dr
->dr_sysbase
+ dr
->dr_len
) - pa
);
1115 if (pa
< dr
->dr_sysbase
&& dr
->dr_sysbase
< (pa
+ size
)) {
1117 * End of region intersects with this range.
1119 *pap
= trunc_page(dr
->dr_sysbase
);
1120 *sizep
= round_page(min((pa
+ size
) - dr
->dr_sysbase
,
1126 /* No intersection found. */