1 /* $NetBSD: isadma_bounce.c,v 1.10 2007/03/06 13:54:45 he Exp $ */
2 /* NetBSD: isadma_bounce.c,v 1.2 2000/06/01 05:49:36 thorpej Exp */
5 * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
36 __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.10 2007/03/06 13:54:45 he Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/syslog.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
46 #define _ARC_BUS_DMA_PRIVATE
47 #include <machine/bus.h>
49 #include <dev/isa/isareg.h>
50 #include <dev/isa/isavar.h>
52 #include <uvm/uvm_extern.h>
54 extern paddr_t avail_end
; /* from pmap.c */
57 * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed
60 struct isadma_bounce_cookie
{
61 int id_flags
; /* flags; see below */
64 * Information about the original buffer used during
65 * DMA map syncs. Note that origbuflen is only used
66 * for ID_BUFTYPE_LINEAR.
68 void *id_origbuf
; /* pointer to orig buffer if
70 bus_size_t id_origbuflen
; /* ...and size */
71 int id_buftype
; /* type of buffer */
73 void *id_bouncebuf
; /* pointer to the bounce buffer */
74 bus_size_t id_bouncebuflen
; /* ...and size */
75 int id_nbouncesegs
; /* number of valid bounce segs */
76 bus_dma_segment_t id_bouncesegs
[1]; /* array of bounce buffer
77 physical memory segments */
81 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
82 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
83 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
86 #define ID_BUFTYPE_INVALID 0
87 #define ID_BUFTYPE_LINEAR 1
88 #define ID_BUFTYPE_MBUF 2
89 #define ID_BUFTYPE_UIO 3
90 #define ID_BUFTYPE_RAW 4
92 int isadma_bounce_dmamap_create(bus_dma_tag_t
, bus_size_t
, int,
93 bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
94 void isadma_bounce_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
95 int isadma_bounce_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
96 bus_size_t
, struct proc
*, int);
97 int isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
,
99 int isadma_bounce_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
,
101 int isadma_bounce_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
,
102 bus_dma_segment_t
*, int, bus_size_t
, int);
103 void isadma_bounce_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
104 void isadma_bounce_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
105 bus_addr_t
, bus_size_t
, int);
106 int isadma_bounce_dmamem_alloc(bus_dma_tag_t
, bus_size_t
, bus_size_t
,
107 bus_size_t
, bus_dma_segment_t
*, int, int *, int);
109 static int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
111 static void isadma_bounce_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
114 isadma_bounce_tag_init(bus_dma_tag_t t
)
117 * Initialize the DMA tag used for ISA DMA.
120 _bus_dma_tag_init(t
);
122 t
->_dmamap_create
= isadma_bounce_dmamap_create
;
123 t
->_dmamap_destroy
= isadma_bounce_dmamap_destroy
;
124 t
->_dmamap_load
= isadma_bounce_dmamap_load
;
125 t
->_dmamap_load_mbuf
= isadma_bounce_dmamap_load_mbuf
;
126 t
->_dmamap_load_uio
= isadma_bounce_dmamap_load_uio
;
127 t
->_dmamap_load_raw
= isadma_bounce_dmamap_load_raw
;
128 t
->_dmamap_unload
= isadma_bounce_dmamap_unload
;
129 t
->_dmamap_sync
= isadma_bounce_dmamap_sync
;
130 t
->_dmamem_alloc
= isadma_bounce_dmamem_alloc
;
134 * Create an ISA DMA map.
137 isadma_bounce_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
,
138 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
140 struct isadma_bounce_cookie
*cookie
;
142 int error
, cookieflags
;
146 /* Call common function to create the basic map. */
147 error
= _bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
153 map
->_dm_cookie
= NULL
;
155 cookiesize
= sizeof(*cookie
);
158 * ISA only has 24-bits of address space. This means
159 * we can't DMA to pages over 16M. In order to DMA to
160 * arbitrary buffers, we use "bounce buffers" - pages
161 * in memory below the 16M boundary. On DMA reads,
162 * DMA happens to the bounce buffers, and is copied into
163 * the caller's buffer. On writes, data is copied into
164 * but bounce buffer, and the DMA happens from those
165 * pages. To software using the DMA mapping interface,
166 * this looks simply like a data cache.
168 * If we have more than 16M of RAM in the system, we may
169 * need bounce buffers. We check and remember that here.
171 * ...or, there is an opposite case. The most segments
172 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
173 * the caller can't handle that many segments (e.g. the
174 * ISA DMA controller), we may have to bounce it as well.
177 if (avail_end
> ISA_DMA_BOUNCE_THRESHOLD
||
178 ((map
->_dm_size
/ PAGE_SIZE
) + 1) > map
->_dm_segcnt
) {
179 cookieflags
|= ID_MIGHT_NEED_BOUNCE
;
180 cookiesize
+= (sizeof(bus_dma_segment_t
) *
181 (map
->_dm_segcnt
- 1));
185 * Allocate our cookie.
187 if ((cookiestore
= malloc(cookiesize
, M_DMAMAP
,
188 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
) {
192 memset(cookiestore
, 0, cookiesize
);
193 cookie
= (struct isadma_bounce_cookie
*)cookiestore
;
194 cookie
->id_flags
= cookieflags
;
195 map
->_dm_cookie
= cookie
;
197 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
199 * Allocate the bounce pages now if the caller
200 * wishes us to do so.
202 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
205 error
= isadma_bounce_alloc_bouncebuf(t
, map
, size
, flags
);
210 if (map
->_dm_cookie
!= NULL
)
211 free(map
->_dm_cookie
, M_DMAMAP
);
212 _bus_dmamap_destroy(t
, map
);
218 * Destroy an ISA DMA map.
221 isadma_bounce_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
223 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
226 * Free any bounce pages this map might hold.
228 if (cookie
->id_flags
& ID_HAS_BOUNCE
)
229 isadma_bounce_free_bouncebuf(t
, map
);
231 free(cookie
, M_DMAMAP
);
232 _bus_dmamap_destroy(t
, map
);
236 * Load an ISA DMA map with a linear buffer.
239 isadma_bounce_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
,
240 bus_size_t buflen
, struct proc
*p
, int flags
)
242 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
246 * Make sure that on error condition we return "no valid mappings."
252 * Try to load the map the normal way. If this errors out,
253 * and we can bounce, we will.
255 error
= _bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
);
257 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
261 * First attempt failed; bounce it.
265 * Allocate bounce pages, if necessary.
267 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
268 error
= isadma_bounce_alloc_bouncebuf(t
, map
, buflen
, flags
);
274 * Cache a pointer to the caller's buffer and load the DMA map
275 * with the bounce buffer.
277 cookie
->id_origbuf
= buf
;
278 cookie
->id_origbuflen
= buflen
;
279 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
280 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
, buflen
,
284 * Free the bounce pages, unless our resources
285 * are reserved for our exclusive use.
287 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
288 isadma_bounce_free_bouncebuf(t
, map
);
292 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
293 cookie
->id_flags
|= ID_IS_BOUNCING
;
298 * Like isadma_bounce_dmamap_load(), but for mbufs.
301 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
,
302 struct mbuf
*m0
, int flags
)
304 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
308 * Make sure on error condition we return "no valid mappings."
314 if ((m0
->m_flags
& M_PKTHDR
) == 0)
315 panic("isadma_bounce_dmamap_load_mbuf: no packet header");
318 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
322 * Try to load the map the normal way. If this errors out,
323 * and we can bounce, we will.
325 error
= _bus_dmamap_load_mbuf(t
, map
, m0
, flags
);
327 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
331 * First attempt failed; bounce it.
335 * Allocate bounce pages, if necessary.
337 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
338 error
= isadma_bounce_alloc_bouncebuf(t
, map
, m0
->m_pkthdr
.len
,
345 * Cache a pointer to the caller's buffer and load the DMA map
346 * with the bounce buffer.
348 cookie
->id_origbuf
= m0
;
349 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
350 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
351 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
,
352 m0
->m_pkthdr
.len
, NULL
, flags
);
355 * Free the bounce pages, unless our resources
356 * are reserved for our exclusive use.
358 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
359 isadma_bounce_free_bouncebuf(t
, map
);
363 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
364 cookie
->id_flags
|= ID_IS_BOUNCING
;
369 * Like isadma_bounce_dmamap_load(), but for uios.
372 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
,
373 struct uio
*uio
, int flags
)
376 panic("isadma_bounce_dmamap_load_uio: not implemented");
380 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
381 * bus_dmamem_alloc().
384 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
,
385 bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
388 panic("isadma_bounce_dmamap_load_raw: not implemented");
392 * Unload an ISA DMA map.
395 isadma_bounce_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
397 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
400 * If we have bounce pages, free them, unless they're
401 * reserved for our exclusive use.
403 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
404 (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
405 isadma_bounce_free_bouncebuf(t
, map
);
407 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
408 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
411 * Do the generic bits of the unload.
413 _bus_dmamap_unload(t
, map
);
417 * Synchronize an ISA DMA map.
420 isadma_bounce_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
421 bus_size_t len
, int ops
)
423 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
424 void (*sync
)(bus_dma_tag_t
, bus_dmamap_t
, bus_addr_t
, bus_size_t
, int);
426 sync
= _bus_dmamap_sync
;
429 * Mixing PRE and POST operations is not allowed.
431 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
432 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
433 panic("isadma_bounce_dmamap_sync: mix PRE and POST");
436 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
437 if (offset
>= map
->dm_mapsize
)
438 panic("isadma_bounce_dmamap_sync: bad offset");
439 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
440 panic("isadma_bounce_dmamap_sync: bad length");
445 * If we're not bouncing, just drain the write buffer
448 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0) {
449 ((*sync
)(t
, map
, offset
, len
, ops
));
455 * This should be needed in BUS_DMASYNC_POSTREAD case only,
456 * if _mips3_bus_dmamap_sync() used "Hit_Invalidate on POSTREAD",
457 * rather than "Hit_Write_Back_Invalidate on PREREAD".
459 if (ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_POSTREAD
))
460 ((*sync
)(t
, map
, offset
, len
, ops
));
462 switch (cookie
->id_buftype
) {
463 case ID_BUFTYPE_LINEAR
:
465 * Nothing to do for pre-read.
468 if (ops
& BUS_DMASYNC_PREWRITE
) {
470 * Copy the caller's buffer to the bounce buffer.
472 memcpy((char *)cookie
->id_bouncebuf
+ offset
,
473 (char *)cookie
->id_origbuf
+ offset
, len
);
476 if (ops
& BUS_DMASYNC_POSTREAD
) {
478 * Copy the bounce buffer to the caller's buffer.
480 memcpy((char *)cookie
->id_origbuf
+ offset
,
481 (char *)cookie
->id_bouncebuf
+ offset
, len
);
485 * Nothing to do for post-write.
489 case ID_BUFTYPE_MBUF
:
491 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
492 bus_size_t minlen
, moff
;
495 * Nothing to do for pre-read.
498 if (ops
& BUS_DMASYNC_PREWRITE
) {
500 * Copy the caller's buffer to the bounce buffer.
502 m_copydata(m0
, offset
, len
,
503 (char *)cookie
->id_bouncebuf
+ offset
);
506 if (ops
& BUS_DMASYNC_POSTREAD
) {
508 * Copy the bounce buffer to the caller's buffer.
510 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
512 /* Find the beginning mbuf. */
513 if (moff
>= m
->m_len
) {
519 * Now at the first mbuf to sync; nail
520 * each one until we have exhausted the
523 minlen
= len
< m
->m_len
- moff
?
524 len
: m
->m_len
- moff
;
526 memcpy(mtod(m
, char *) + moff
,
527 (char *)cookie
->id_bouncebuf
+ offset
,
537 * Nothing to do for post-write.
543 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
547 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
550 case ID_BUFTYPE_INVALID
:
551 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
555 printf("unknown buffer type %d\n", cookie
->id_buftype
);
556 panic("isadma_bounce_dmamap_sync");
559 if (ops
& BUS_DMASYNC_PREWRITE
)
560 ((*sync
)(t
, map
, offset
, len
, ops
));
564 * Allocate memory safe for ISA DMA.
567 isadma_bounce_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
,
568 bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
,
569 int nsegs
, int *rsegs
, int flags
)
573 if (avail_end
> ISA_DMA_BOUNCE_THRESHOLD
)
574 high
= trunc_page(ISA_DMA_BOUNCE_THRESHOLD
);
576 high
= trunc_page(avail_end
);
578 return _bus_dmamem_alloc_range(t
, size
, alignment
, boundary
,
579 segs
, nsegs
, rsegs
, flags
, 0, high
);
582 /**********************************************************************
583 * ISA DMA utility functions
584 **********************************************************************/
587 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
,
588 bus_size_t size
, int flags
)
590 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
593 cookie
->id_bouncebuflen
= round_page(size
);
594 error
= isadma_bounce_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
595 PAGE_SIZE
, map
->_dm_boundary
, cookie
->id_bouncesegs
,
596 map
->_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
599 error
= _bus_dmamem_map(t
, cookie
->id_bouncesegs
,
600 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
601 (void **)&cookie
->id_bouncebuf
, flags
);
605 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
606 cookie
->id_nbouncesegs
);
607 cookie
->id_bouncebuflen
= 0;
608 cookie
->id_nbouncesegs
= 0;
610 cookie
->id_flags
|= ID_HAS_BOUNCE
;
616 isadma_bounce_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
618 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
620 _bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
621 cookie
->id_bouncebuflen
);
622 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
623 cookie
->id_nbouncesegs
);
624 cookie
->id_bouncebuflen
= 0;
625 cookie
->id_nbouncesegs
= 0;
626 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;