2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.74.2.4 2006/10/21 16:26:53 hrs Exp $");
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
33 #include <sys/kernel.h>
36 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
43 #include <machine/atomic.h>
44 #include <machine/bus.h>
47 # define MAX_BPAGES 8192
49 # define MAX_BPAGES 512
53 #define malloc(a, b, c) kernel_malloc(a, b, c)
54 #define free(a, b) kernel_free(a, b)
55 #define contigmalloc(a, b, c, d, e, f, g) kernel_contigmalloc(a, b, c, d, e, f, g)
56 #define contigfree(a, b, c) kernel_contigfree(a, b, c)
57 void busdma_swi(void);
58 void init_bounce_pages(void);
59 void uninit_bounce_pages(void);
70 bus_dma_filter_t
*filter
;
78 bus_dma_lock_t
*lockfunc
;
80 bus_dma_segment_t
*segments
;
81 struct bounce_zone
*bounce_zone
;
85 vm_offset_t vaddr
; /* kva of bounce buffer */
86 bus_addr_t busaddr
; /* Physical address */
87 vm_offset_t datavaddr
; /* kva of client data */
88 bus_size_t datacount
; /* client data count */
89 STAILQ_ENTRY(bounce_page
) links
;
92 int busdma_swi_pending
;
95 STAILQ_ENTRY(bounce_zone
) links
;
96 STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
103 bus_size_t alignment
;
108 struct sysctl_ctx_list sysctl_tree
;
109 struct sysctl_oid
*sysctl_tree_top
;
112 static struct mtx bounce_lock
;
113 static int total_bpages
;
114 static int busdma_zonecount
;
115 static STAILQ_HEAD(, bounce_zone
) bounce_zone_list
;
117 SYSCTL_NODE(_hw
, OID_AUTO
, busdma
, CTLFLAG_RD
, 0, "Busdma parameters");
118 SYSCTL_INT(_hw_busdma
, OID_AUTO
, total_bpages
, CTLFLAG_RD
, &total_bpages
, 0,
119 "Total bounce pages");
122 struct bp_list bpages
;
126 void *buf
; /* unmapped buffer pointer */
127 bus_size_t buflen
; /* unmapped buffer length */
128 bus_dmamap_callback_t
*callback
;
130 STAILQ_ENTRY(bus_dmamap
) links
;
133 static STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
134 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
;
135 static struct bus_dmamap nobounce_dmamap
;
137 static int alloc_bounce_zone(bus_dma_tag_t dmat
);
138 static int alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
);
139 static int reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
,
141 static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
,
142 vm_offset_t vaddr
, bus_size_t size
);
143 static void free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
);
144 static __inline
int run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
);
147 * Return true if a match is made.
149 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
151 * If paddr is within the bounds of the dma tag then call the filter callback
152 * to check for a match, if there is no filter callback then assume a match.
156 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
163 if (((paddr
> dmat
->lowaddr
&& paddr
<= dmat
->highaddr
)
164 || ((paddr
& (dmat
->alignment
- 1)) != 0))
165 && (dmat
->filter
== NULL
166 || (*dmat
->filter
)(dmat
->filterarg
, paddr
) != 0))
170 } while (retval
== 0 && dmat
!= NULL
);
175 * Convenience function for manipulating driver locks from busdma (during
176 * busdma_swi, for example). Drivers that don't provide their own locks
177 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
178 * non-mutex locking scheme don't have to use this at all.
181 busdma_lock_mutex(void *arg
, bus_dma_lock_op_t op
)
185 dmtx
= (struct mtx
*)arg
;
194 panic("Unknown operation 0x%x for busdma_lock_mutex!", op
);
199 * dflt_lock should never get called. It gets put into the dma tag when
200 * lockfunc == NULL, which is only valid if the maps that are associated
201 * with the tag are meant to never be defered.
202 * XXX Should have a way to identify which driver is responsible here.
206 dflt_lock(void *arg
, bus_dma_lock_op_t op
)
208 panic("driver error: busdma dflt_lock called");
211 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
212 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
215 * Allocate a device specific dma_tag.
218 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
219 bus_size_t boundary
, bus_addr_t lowaddr
,
220 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
221 void *filterarg
, bus_size_t maxsize
, int nsegments
,
222 bus_size_t maxsegsz
, int flags
, bus_dma_lock_t
*lockfunc
,
223 void *lockfuncarg
, bus_dma_tag_t
*dmat
)
225 bus_dma_tag_t newtag
;
228 /* Basic sanity checking */
229 if (boundary
!= 0 && boundary
< maxsegsz
)
232 /* Return a NULL tag on failure */
235 newtag
= (bus_dma_tag_t
)malloc(sizeof(*newtag
), M_DEVBUF
,
237 if (newtag
== NULL
) {
238 CTR4(KTR_BUSDMA
, "%s returned tag %p tag flags 0x%x error %d",
239 __func__
, newtag
, 0, error
);
243 newtag
->parent
= parent
;
244 newtag
->alignment
= alignment
;
245 newtag
->boundary
= boundary
;
246 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
247 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) +
249 newtag
->filter
= filter
;
250 newtag
->filterarg
= filterarg
;
251 newtag
->maxsize
= maxsize
;
252 newtag
->nsegments
= nsegments
;
253 newtag
->maxsegsz
= maxsegsz
;
254 newtag
->flags
= flags
;
255 newtag
->ref_count
= 1; /* Count ourself */
256 newtag
->map_count
= 0;
257 if (lockfunc
!= NULL
) {
258 newtag
->lockfunc
= lockfunc
;
259 newtag
->lockfuncarg
= lockfuncarg
;
261 newtag
->lockfunc
= dflt_lock
;
262 newtag
->lockfuncarg
= NULL
;
264 newtag
->segments
= NULL
;
266 /* Take into account any restrictions imposed by our parent tag */
267 if (parent
!= NULL
) {
268 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
269 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
270 if (newtag
->boundary
== 0)
271 newtag
->boundary
= parent
->boundary
;
272 else if (parent
->boundary
!= 0)
273 newtag
->boundary
= MIN(parent
->boundary
,
275 if (newtag
->filter
== NULL
) {
277 * Short circuit looking at our parent directly
278 * since we have encapsulated all of its information
280 newtag
->filter
= parent
->filter
;
281 newtag
->filterarg
= parent
->filterarg
;
282 newtag
->parent
= parent
->parent
;
284 if (newtag
->parent
!= NULL
)
285 atomic_add_int(&parent
->ref_count
, 1);
288 if (newtag
->lowaddr
< ptoa((vm_paddr_t
)Maxmem
)
289 || newtag
->alignment
> 1)
290 newtag
->flags
|= BUS_DMA_COULD_BOUNCE
;
292 if (((newtag
->flags
& BUS_DMA_COULD_BOUNCE
) != 0) &&
293 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
294 struct bounce_zone
*bz
;
298 if ((error
= alloc_bounce_zone(newtag
)) != 0) {
299 free(newtag
, M_DEVBUF
);
302 bz
= newtag
->bounce_zone
;
304 if (ptoa(bz
->total_bpages
) < maxsize
) {
307 pages
= atop(maxsize
) - bz
->total_bpages
;
309 /* Add pages to our bounce pool */
310 if (alloc_bounce_pages(newtag
, pages
) < pages
)
313 /* Performed initial allocation */
314 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
318 free(newtag
, M_DEVBUF
);
322 CTR4(KTR_BUSDMA
, "%s returned tag %p tag flags 0x%x error %d",
323 __func__
, newtag
, (newtag
!= NULL
? newtag
->flags
: 0), error
);
328 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
330 bus_dma_tag_t dmat_copy
;
338 if (dmat
->map_count
!= 0) {
343 while (dmat
!= NULL
) {
344 bus_dma_tag_t parent
;
346 parent
= dmat
->parent
;
347 atomic_subtract_int(&dmat
->ref_count
, 1);
348 if (dmat
->ref_count
== 0) {
349 if (dmat
->segments
!= NULL
)
350 free(dmat
->segments
, M_DEVBUF
);
351 free(dmat
, M_DEVBUF
);
353 * Last reference count, so
354 * release our reference
355 * count on our parent.
363 CTR3(KTR_BUSDMA
, "%s tag %p error %d", __func__
, dmat_copy
, error
);
368 * Allocate a handle for mapping from kva/uva/physical
369 * address space into bus device space.
372 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
378 if (dmat
->segments
== NULL
) {
379 dmat
->segments
= (bus_dma_segment_t
*)malloc(
380 sizeof(bus_dma_segment_t
) * dmat
->nsegments
, M_DEVBUF
,
382 if (dmat
->segments
== NULL
) {
383 CTR3(KTR_BUSDMA
, "%s: tag %p error %d",
384 __func__
, dmat
, ENOMEM
);
390 * Bouncing might be required if the driver asks for an active
391 * exclusion region, a data alignment that is stricter than 1, and/or
392 * an active address boundary.
394 if (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) {
397 struct bounce_zone
*bz
;
400 if (dmat
->bounce_zone
== NULL
) {
401 if ((error
= alloc_bounce_zone(dmat
)) != 0)
404 bz
= dmat
->bounce_zone
;
406 *mapp
= (bus_dmamap_t
)malloc(sizeof(**mapp
), M_DEVBUF
,
409 CTR3(KTR_BUSDMA
, "%s: tag %p error %d",
410 __func__
, dmat
, ENOMEM
);
414 /* Initialize the new map */
415 STAILQ_INIT(&((*mapp
)->bpages
));
418 * Attempt to add pages to our pool on a per-instance
419 * basis up to a sane limit.
421 if (dmat
->alignment
> 1)
422 maxpages
= MAX_BPAGES
;
424 maxpages
= MIN(MAX_BPAGES
, Maxmem
-atop(dmat
->lowaddr
));
425 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0
426 || (dmat
->map_count
> 0 && bz
->total_bpages
< maxpages
)) {
429 pages
= MAX(atop(dmat
->maxsize
), 1);
430 pages
= MIN(maxpages
- bz
->total_bpages
, pages
);
431 pages
= MAX(pages
, 1);
432 if (alloc_bounce_pages(dmat
, pages
) < pages
)
435 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
437 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
447 CTR4(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d",
448 __func__
, dmat
, dmat
->flags
, error
);
453 * Destroy a handle for mapping from kva/uva/physical
454 * address space into bus device space.
457 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
459 if (map
!= NULL
&& map
!= &nobounce_dmamap
) {
460 if (STAILQ_FIRST(&map
->bpages
) != NULL
) {
461 CTR3(KTR_BUSDMA
, "%s: tag %p error %d",
462 __func__
, dmat
, EBUSY
);
468 CTR2(KTR_BUSDMA
, "%s: tag %p error 0", __func__
, dmat
);
473 * Allocate a piece of memory that can be efficiently mapped into
474 * bus device space based on the constraints lited in the dma tag.
475 * A dmamap to for use with dmamap_load is also allocated.
478 bus_dmamem_alloc(bus_dma_tag_t dmat
, void** vaddr
, int flags
,
483 if (flags
& BUS_DMA_NOWAIT
)
487 if (flags
& BUS_DMA_ZERO
)
490 /* If we succeed, no mapping/bouncing will be required */
493 if (dmat
->segments
== NULL
) {
494 dmat
->segments
= (bus_dma_segment_t
*)malloc(
495 sizeof(bus_dma_segment_t
) * dmat
->nsegments
, M_DEVBUF
,
497 if (dmat
->segments
== NULL
) {
498 CTR4(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d",
499 __func__
, dmat
, dmat
->flags
, ENOMEM
);
506 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
507 * alignment guarantees of malloc need to be nailed down, and the
508 * code below should be rewritten to take that into account.
510 * In the meantime, we'll warn the user if malloc gets it wrong.
512 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
513 (dmat
->alignment
< dmat
->maxsize
) &&
514 dmat
->lowaddr
>= ptoa((vm_paddr_t
)Maxmem
)) {
515 *vaddr
= malloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
518 * XXX Use Contigmalloc until it is merged into this facility
519 * and handles multi-seg allocations. Nobody is doing
520 * multi-seg allocations yet though.
521 * XXX Certain AGP hardware does.
523 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
524 0ul, dmat
->lowaddr
, dmat
->alignment
? dmat
->alignment
: 1ul,
527 if (*vaddr
== NULL
) {
528 CTR4(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d",
529 __func__
, dmat
, dmat
->flags
, ENOMEM
);
531 } else if ((uintptr_t)*vaddr
& (dmat
->alignment
- 1)) {
532 printf("bus_dmamem_alloc failed to align memory properly.\n");
534 CTR4(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d",
535 __func__
, dmat
, dmat
->flags
, 0);
540 * Free a piece of memory and it's allociated dmamap, that was allocated
541 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
544 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
547 * dmamem does not need to be bounced, so the map should be
551 panic("bus_dmamem_free: Invalid map freed\n");
552 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
553 (dmat
->alignment
< dmat
->maxsize
) &&
554 dmat
->lowaddr
>= ptoa((vm_paddr_t
)Maxmem
))
555 free(vaddr
, M_DEVBUF
);
557 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
559 CTR3(KTR_BUSDMA
, "%s: tag %p flags 0x%x", __func__
, dmat
, dmat
->flags
);
563 * Utility function to load a linear buffer. lastaddrp holds state
564 * between invocations (for multiple-buffer loads). segp contains
565 * the starting segment on entrace, and the ending segment on exit.
566 * first indicates if this is the first invocation of this function.
569 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
571 void *buf
, bus_size_t buflen
,
574 bus_addr_t
*lastaddrp
,
575 bus_dma_segment_t
*segs
,
580 bus_addr_t curaddr
, lastaddr
, baddr
, bmask
;
587 map
= &nobounce_dmamap
;
589 if ((map
!= &nobounce_dmamap
&& map
->pagesneeded
== 0)
590 && ((dmat
->flags
& BUS_DMA_COULD_BOUNCE
) != 0)) {
591 vm_offset_t vendaddr
;
593 CTR4(KTR_BUSDMA
, "lowaddr= %d Maxmem= %d, boundary= %d, "
594 "alignment= %d", dmat
->lowaddr
, ptoa((vm_paddr_t
)Maxmem
),
595 dmat
->boundary
, dmat
->alignment
);
596 CTR3(KTR_BUSDMA
, "map= %p, nobouncemap= %p, pagesneeded= %d",
597 map
, &nobounce_dmamap
, map
->pagesneeded
);
599 * Count the number of bounce pages
600 * needed in order to complete this transfer
602 vaddr
= trunc_page((vm_offset_t
)buf
);
603 vendaddr
= (vm_offset_t
)buf
+ buflen
;
605 while (vaddr
< vendaddr
) {
606 paddr
= pmap_kextract(vaddr
);
607 if (run_filter(dmat
, paddr
) != 0) {
613 CTR1(KTR_BUSDMA
, "pagesneeded= %d\n", map
->pagesneeded
);
616 /* Reserve Necessary Bounce Pages */
617 if (map
->pagesneeded
!= 0) {
618 mtx_lock(&bounce_lock
);
619 if (flags
& BUS_DMA_NOWAIT
) {
620 if (reserve_bounce_pages(dmat
, map
, 0) != 0) {
621 mtx_unlock(&bounce_lock
);
625 if (reserve_bounce_pages(dmat
, map
, 1) != 0) {
626 /* Queue us for resources */
629 map
->buflen
= buflen
;
630 STAILQ_INSERT_TAIL(&bounce_map_waitinglist
,
632 mtx_unlock(&bounce_lock
);
633 return (EINPROGRESS
);
636 mtx_unlock(&bounce_lock
);
639 vaddr
= (vm_offset_t
)buf
;
640 lastaddr
= *lastaddrp
;
641 bmask
= ~(dmat
->boundary
- 1);
643 for (seg
= *segp
; buflen
> 0 ; ) {
645 * Get the physical address for this segment.
648 curaddr
= (bus_addr_t
)pmap_extract(pmap
, vaddr
);
650 curaddr
= pmap_kextract(vaddr
);
653 * Compute the segment size, and adjust counts.
655 sgsize
= PAGE_SIZE
- ((u_long
)curaddr
& PAGE_MASK
);
660 * Make sure we don't cross any boundaries.
662 if (dmat
->boundary
> 0) {
663 baddr
= (curaddr
+ dmat
->boundary
) & bmask
;
664 if (sgsize
> (baddr
- curaddr
))
665 sgsize
= (baddr
- curaddr
);
668 if (map
->pagesneeded
!= 0 && run_filter(dmat
, curaddr
))
669 curaddr
= add_bounce_page(dmat
, map
, vaddr
, sgsize
);
672 * Insert chunk into a segment, coalescing with
673 * previous segment if possible.
676 segs
[seg
].ds_addr
= curaddr
;
677 segs
[seg
].ds_len
= sgsize
;
680 if (needbounce
== 0 && curaddr
== lastaddr
&&
681 (segs
[seg
].ds_len
+ sgsize
) <= dmat
->maxsegsz
&&
682 (dmat
->boundary
== 0 ||
683 (segs
[seg
].ds_addr
& bmask
) == (curaddr
& bmask
)))
684 segs
[seg
].ds_len
+= sgsize
;
686 if (++seg
>= dmat
->nsegments
)
688 segs
[seg
].ds_addr
= curaddr
;
689 segs
[seg
].ds_len
= sgsize
;
693 lastaddr
= curaddr
+ sgsize
;
699 *lastaddrp
= lastaddr
;
704 return (buflen
!= 0 ? EFBIG
: 0); /* XXX better return value here? */
708 * Map the buffer buf into bus space using the dmamap map.
711 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
712 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
713 void *callback_arg
, int flags
)
715 bus_addr_t lastaddr
= 0;
716 int error
, nsegs
= 0;
719 flags
|= BUS_DMA_WAITOK
;
720 map
->callback
= callback
;
721 map
->callback_arg
= callback_arg
;
724 error
= _bus_dmamap_load_buffer(dmat
, map
, buf
, buflen
, NULL
, flags
,
725 &lastaddr
, dmat
->segments
, &nsegs
, 1);
727 CTR5(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d nsegs %d",
728 __func__
, dmat
, dmat
->flags
, error
, nsegs
+ 1);
730 if (error
== EINPROGRESS
) {
735 (*callback
)(callback_arg
, dmat
->segments
, 0, error
);
737 (*callback
)(callback_arg
, dmat
->segments
, nsegs
+ 1, 0);
740 * Return ENOMEM to the caller so that it can pass it up the stack.
741 * This error only happens when NOWAIT is set, so deferal is disabled.
750 * Like _bus_dmamap_load(), but for mbufs.
753 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
755 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
762 flags
|= BUS_DMA_NOWAIT
;
765 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
767 bus_addr_t lastaddr
= 0;
770 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
772 error
= _bus_dmamap_load_buffer(dmat
, map
,
774 NULL
, flags
, &lastaddr
,
775 dmat
->segments
, &nsegs
, first
);
784 /* force "no valid mappings" in callback */
785 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
787 (*callback
)(callback_arg
, dmat
->segments
,
788 nsegs
+1, m0
->m_pkthdr
.len
, error
);
790 CTR5(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d nsegs %d",
791 __func__
, dmat
, dmat
->flags
, error
, nsegs
+ 1);
796 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat
, bus_dmamap_t map
,
797 struct mbuf
*m0
, bus_dma_segment_t
*segs
, int *nsegs
,
804 flags
|= BUS_DMA_NOWAIT
;
807 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
809 bus_addr_t lastaddr
= 0;
812 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
814 error
= _bus_dmamap_load_buffer(dmat
, map
,
816 NULL
, flags
, &lastaddr
,
825 /* XXX FIXME: Having to increment nsegs is really annoying */
827 CTR5(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d nsegs %d",
828 __func__
, dmat
, dmat
->flags
, error
, *nsegs
);
834 * Like _bus_dmamap_load(), but for uios.
837 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
839 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
843 int nsegs
, error
, first
, i
;
848 flags
|= BUS_DMA_NOWAIT
;
849 resid
= uio
->uio_resid
;
852 if (uio
->uio_segflg
== UIO_USERSPACE
) {
853 KASSERT(uio
->uio_td
!= NULL
,
854 ("bus_dmamap_load_uio: USERSPACE but no proc"));
855 pmap
= vmspace_pmap(uio
->uio_td
->td_proc
->p_vmspace
);
862 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
864 * Now at the first iovec to load. Load each iovec
865 * until we have exhausted the residual count.
868 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
869 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
872 error
= _bus_dmamap_load_buffer(dmat
, map
,
873 addr
, minlen
, pmap
, flags
, &lastaddr
,
874 dmat
->segments
, &nsegs
, first
);
882 /* force "no valid mappings" in callback */
883 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
885 (*callback
)(callback_arg
, dmat
->segments
,
886 nsegs
+1, uio
->uio_resid
, error
);
888 CTR5(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x error %d nsegs %d",
889 __func__
, dmat
, dmat
->flags
, error
, nsegs
+ 1);
895 * Release the mapping held by map.
898 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
900 struct bounce_page
*bpage
;
902 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
903 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
904 free_bounce_page(dmat
, bpage
);
909 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
911 struct bounce_page
*bpage
;
913 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
915 * Handle data bouncing. We might also
916 * want to add support for invalidating
917 * the caches on broken hardware
919 dmat
->bounce_zone
->total_bounced
++;
920 CTR4(KTR_BUSDMA
, "%s: tag %p tag flags 0x%x op 0x%x "
921 "performing bounce", __func__
, op
, dmat
, dmat
->flags
);
923 if (op
& BUS_DMASYNC_PREWRITE
) {
924 while (bpage
!= NULL
) {
925 bcopy((void *)bpage
->datavaddr
,
926 (void *)bpage
->vaddr
,
928 bpage
= STAILQ_NEXT(bpage
, links
);
932 if (op
& BUS_DMASYNC_POSTREAD
) {
933 while (bpage
!= NULL
) {
934 bcopy((void *)bpage
->vaddr
,
935 (void *)bpage
->datavaddr
,
937 bpage
= STAILQ_NEXT(bpage
, links
);
948 STAILQ_INIT(&bounce_zone_list
);
949 STAILQ_INIT(&bounce_map_waitinglist
);
950 STAILQ_INIT(&bounce_map_callbacklist
);
951 mtx_init(&bounce_lock
, "bounce pages lock", NULL
, MTX_DEF
);
954 /* Haiku extension */
956 uninit_bounce_pages()
960 mtx_destroy(&bounce_lock
);
963 static struct sysctl_ctx_list
*
964 busdma_sysctl_tree(struct bounce_zone
*bz
)
966 return (&bz
->sysctl_tree
);
969 static struct sysctl_oid
*
970 busdma_sysctl_tree_top(struct bounce_zone
*bz
)
972 return (bz
->sysctl_tree_top
);
976 alloc_bounce_zone(bus_dma_tag_t dmat
)
978 struct bounce_zone
*bz
;
980 /* Check to see if we already have a suitable zone */
981 STAILQ_FOREACH(bz
, &bounce_zone_list
, links
) {
982 if ((dmat
->alignment
<= bz
->alignment
)
983 && (dmat
->boundary
<= bz
->boundary
)
984 && (dmat
->lowaddr
>= bz
->lowaddr
)) {
985 dmat
->bounce_zone
= bz
;
990 if ((bz
= (struct bounce_zone
*)malloc(sizeof(*bz
), M_DEVBUF
,
991 M_NOWAIT
| M_ZERO
)) == NULL
)
994 STAILQ_INIT(&bz
->bounce_page_list
);
996 bz
->reserved_bpages
= 0;
997 bz
->active_bpages
= 0;
998 bz
->lowaddr
= dmat
->lowaddr
;
999 bz
->alignment
= dmat
->alignment
;
1000 bz
->boundary
= dmat
->boundary
;
1001 snprintf(bz
->zoneid
, 8, "zone%d", busdma_zonecount
);
1003 snprintf(bz
->lowaddrid
, 18, "%llx", (uintmax_t)bz
->lowaddr
);
1004 STAILQ_INSERT_TAIL(&bounce_zone_list
, bz
, links
);
1005 dmat
->bounce_zone
= bz
;
1007 sysctl_ctx_init(&bz
->sysctl_tree
);
1008 bz
->sysctl_tree_top
= SYSCTL_ADD_NODE(&bz
->sysctl_tree
,
1009 SYSCTL_STATIC_CHILDREN(_hw_busdma
), OID_AUTO
, bz
->zoneid
,
1011 if (bz
->sysctl_tree_top
== NULL
) {
1012 sysctl_ctx_free(&bz
->sysctl_tree
);
1013 return (0); /* XXX error code? */
1016 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1017 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1018 "total_bpages", CTLFLAG_RD
, &bz
->total_bpages
, 0,
1019 "Total bounce pages");
1020 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1021 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1022 "free_bpages", CTLFLAG_RD
, &bz
->free_bpages
, 0,
1023 "Free bounce pages");
1024 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1025 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1026 "reserved_bpages", CTLFLAG_RD
, &bz
->reserved_bpages
, 0,
1027 "Reserved bounce pages");
1028 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1029 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1030 "active_bpages", CTLFLAG_RD
, &bz
->active_bpages
, 0,
1031 "Active bounce pages");
1032 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1033 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1034 "total_bounced", CTLFLAG_RD
, &bz
->total_bounced
, 0,
1035 "Total bounce requests");
1036 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1037 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1038 "total_deferred", CTLFLAG_RD
, &bz
->total_deferred
, 0,
1039 "Total bounce requests that were deferred");
1040 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz
),
1041 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1042 "lowaddr", CTLFLAG_RD
, bz
->lowaddrid
, 0, "");
1043 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1044 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1045 "alignment", CTLFLAG_RD
, &bz
->alignment
, 0, "");
1046 SYSCTL_ADD_INT(busdma_sysctl_tree(bz
),
1047 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz
)), OID_AUTO
,
1048 "boundary", CTLFLAG_RD
, &bz
->boundary
, 0, "");
1054 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
)
1056 struct bounce_zone
*bz
;
1059 bz
= dmat
->bounce_zone
;
1061 while (numpages
> 0) {
1062 struct bounce_page
*bpage
;
1064 bpage
= (struct bounce_page
*)malloc(sizeof(*bpage
), M_DEVBUF
,
1069 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
1074 if (bpage
->vaddr
== 0) {
1075 free(bpage
, M_DEVBUF
);
1078 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
1079 mtx_lock(&bounce_lock
);
1080 STAILQ_INSERT_TAIL(&bz
->bounce_page_list
, bpage
, links
);
1084 mtx_unlock(&bounce_lock
);
1092 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
, int commit
)
1094 struct bounce_zone
*bz
;
1097 mtx_assert(&bounce_lock
, MA_OWNED
);
1098 bz
= dmat
->bounce_zone
;
1099 pages
= MIN(bz
->free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
1100 if (commit
== 0 && map
->pagesneeded
> (map
->pagesreserved
+ pages
))
1101 return (map
->pagesneeded
- (map
->pagesreserved
+ pages
));
1102 bz
->free_bpages
-= pages
;
1103 bz
->reserved_bpages
+= pages
;
1104 map
->pagesreserved
+= pages
;
1105 pages
= map
->pagesneeded
- map
->pagesreserved
;
1111 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
1114 struct bounce_zone
*bz
;
1115 struct bounce_page
*bpage
;
1117 KASSERT(dmat
->bounce_zone
!= NULL
, ("no bounce zone in dma tag"));
1118 KASSERT(map
!= NULL
&& map
!= &nobounce_dmamap
,
1119 ("add_bounce_page: bad map %p", map
));
1121 bz
= dmat
->bounce_zone
;
1122 if (map
->pagesneeded
== 0)
1123 panic("add_bounce_page: map doesn't need any pages");
1126 if (map
->pagesreserved
== 0)
1127 panic("add_bounce_page: map doesn't need any pages");
1128 map
->pagesreserved
--;
1130 mtx_lock(&bounce_lock
);
1131 bpage
= STAILQ_FIRST(&bz
->bounce_page_list
);
1133 panic("add_bounce_page: free page list is empty");
1135 STAILQ_REMOVE_HEAD(&bz
->bounce_page_list
, links
);
1136 bz
->reserved_bpages
--;
1137 bz
->active_bpages
++;
1138 mtx_unlock(&bounce_lock
);
1140 bpage
->datavaddr
= vaddr
;
1141 bpage
->datacount
= size
;
1142 STAILQ_INSERT_TAIL(&(map
->bpages
), bpage
, links
);
1143 return (bpage
->busaddr
);
1147 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
1149 struct bus_dmamap
*map
;
1150 struct bounce_zone
*bz
;
1152 bz
= dmat
->bounce_zone
;
1153 bpage
->datavaddr
= 0;
1154 bpage
->datacount
= 0;
1156 mtx_lock(&bounce_lock
);
1157 STAILQ_INSERT_HEAD(&bz
->bounce_page_list
, bpage
, links
);
1159 bz
->active_bpages
--;
1160 if ((map
= STAILQ_FIRST(&bounce_map_waitinglist
)) != NULL
) {
1161 if (reserve_bounce_pages(map
->dmat
, map
, 1) == 0) {
1162 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist
, links
);
1163 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
,
1165 busdma_swi_pending
= 1;
1166 bz
->total_deferred
++;
1168 swi_sched(vm_ih
, 0);
1173 mtx_unlock(&bounce_lock
);
1180 struct bus_dmamap
*map
;
1182 mtx_lock(&bounce_lock
);
1183 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
1184 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
1185 mtx_unlock(&bounce_lock
);
1187 (dmat
->lockfunc
)(dmat
->lockfuncarg
, BUS_DMA_LOCK
);
1188 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
1189 map
->callback
, map
->callback_arg
, /*flags*/0);
1190 (dmat
->lockfunc
)(dmat
->lockfuncarg
, BUS_DMA_UNLOCK
);
1191 mtx_lock(&bounce_lock
);
1193 mtx_unlock(&bounce_lock
);