1 /* $NetBSD: isadma_bounce.c,v 1.8 2008/04/28 20:23:11 martin Exp $ */
4 * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
35 __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.8 2008/04/28 20:23:11 martin Exp $");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/syslog.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
45 #define _ALPHA_BUS_DMA_PRIVATE
46 #include <machine/bus.h>
48 #include <dev/isa/isareg.h>
49 #include <dev/isa/isavar.h>
51 #include <uvm/uvm_extern.h>
53 extern paddr_t avail_end
;
56 * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed
59 struct isadma_bounce_cookie
{
60 int id_flags
; /* flags; see below */
63 * Information about the original buffer used during
64 * DMA map syncs. Note that origbuflen is only used
65 * for ID_BUFTYPE_LINEAR.
67 void *id_origbuf
; /* pointer to orig buffer if
69 bus_size_t id_origbuflen
; /* ...and size */
70 int id_buftype
; /* type of buffer */
72 void *id_bouncebuf
; /* pointer to the bounce buffer */
73 bus_size_t id_bouncebuflen
; /* ...and size */
74 int id_nbouncesegs
; /* number of valid bounce segs */
75 bus_dma_segment_t id_bouncesegs
[1]; /* array of bounce buffer
76 physical memory segments */
80 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
81 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
82 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
85 #define ID_BUFTYPE_INVALID 0
86 #define ID_BUFTYPE_LINEAR 1
87 #define ID_BUFTYPE_MBUF 2
88 #define ID_BUFTYPE_UIO 3
89 #define ID_BUFTYPE_RAW 4
91 int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
93 void isadma_bounce_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
96 * Create an ISA DMA map.
99 isadma_bounce_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
,
100 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
102 struct isadma_bounce_cookie
*cookie
;
104 int error
, cookieflags
;
108 /* Call common function to create the basic map. */
109 error
= _bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
115 map
->_dm_cookie
= NULL
;
117 cookiesize
= sizeof(*cookie
);
120 * ISA only has 24-bits of address space. This means
121 * we can't DMA to pages over 16M. In order to DMA to
122 * arbitrary buffers, we use "bounce buffers" - pages
123 * in memory below the 16M boundary. On DMA reads,
124 * DMA happens to the bounce buffers, and is copied into
125 * the caller's buffer. On writes, data is copied into
126 * but bounce buffer, and the DMA happens from those
127 * pages. To software using the DMA mapping interface,
128 * this looks simply like a data cache.
130 * If we have more than 16M of RAM in the system, we may
131 * need bounce buffers. We check and remember that here.
133 * ...or, there is an opposite case. The most segments
134 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
135 * the caller can't handle that many segments (e.g. the
136 * ISA DMA controller), we may have to bounce it as well.
139 if (avail_end
> (t
->_wbase
+ t
->_wsize
) ||
140 ((map
->_dm_size
/ PAGE_SIZE
) + 1) > map
->_dm_segcnt
) {
141 cookieflags
|= ID_MIGHT_NEED_BOUNCE
;
142 cookiesize
+= (sizeof(bus_dma_segment_t
) *
143 (map
->_dm_segcnt
- 1));
147 * Allocate our cookie.
149 if ((cookiestore
= malloc(cookiesize
, M_DMAMAP
,
150 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
) {
154 memset(cookiestore
, 0, cookiesize
);
155 cookie
= (struct isadma_bounce_cookie
*)cookiestore
;
156 cookie
->id_flags
= cookieflags
;
157 map
->_dm_cookie
= cookie
;
159 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
161 * Allocate the bounce pages now if the caller
162 * wishes us to do so.
164 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
167 error
= isadma_bounce_alloc_bouncebuf(t
, map
, size
, flags
);
172 if (map
->_dm_cookie
!= NULL
)
173 free(map
->_dm_cookie
, M_DMAMAP
);
174 _bus_dmamap_destroy(t
, map
);
180 * Destroy an ISA DMA map.
183 isadma_bounce_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
185 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
188 * Free any bounce pages this map might hold.
190 if (cookie
->id_flags
& ID_HAS_BOUNCE
)
191 isadma_bounce_free_bouncebuf(t
, map
);
193 free(cookie
, M_DMAMAP
);
194 _bus_dmamap_destroy(t
, map
);
198 * Load an ISA DMA map with a linear buffer.
201 isadma_bounce_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
,
202 size_t buflen
, struct proc
*p
, int flags
)
204 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
208 * Make sure that on error condition we return "no valid mappings."
214 * Try to load the map the normal way. If this errors out,
215 * and we can bounce, we will.
217 error
= _bus_dmamap_load_direct(t
, map
, buf
, buflen
, p
, flags
);
219 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
223 * First attempt failed; bounce it.
227 * Allocate bounce pages, if necessary.
229 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
230 error
= isadma_bounce_alloc_bouncebuf(t
, map
, buflen
, flags
);
236 * Cache a pointer to the caller's buffer and load the DMA map
237 * with the bounce buffer.
239 cookie
->id_origbuf
= buf
;
240 cookie
->id_origbuflen
= buflen
;
241 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
242 error
= _bus_dmamap_load_direct(t
, map
, cookie
->id_bouncebuf
, buflen
,
246 * Free the bounce pages, unless our resources
247 * are reserved for our exclusive use.
249 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
250 isadma_bounce_free_bouncebuf(t
, map
);
254 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
255 cookie
->id_flags
|= ID_IS_BOUNCING
;
261 * Like isadma_bounce_dmamap_load(), but for mbufs.
264 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
,
265 struct mbuf
*m0
, int flags
)
267 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
271 * Make sure on error condition we return "no valid mappings."
277 if ((m0
->m_flags
& M_PKTHDR
) == 0)
278 panic("isadma_bounce_dmamap_load_mbuf: no packet header");
281 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
285 * Try to load the map the normal way. If this errors out,
286 * and we can bounce, we will.
288 error
= _bus_dmamap_load_mbuf_direct(t
, map
, m0
, flags
);
290 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
294 * First attempt failed; bounce it.
298 * Allocate bounce pages, if necessary.
300 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
301 error
= isadma_bounce_alloc_bouncebuf(t
, map
, m0
->m_pkthdr
.len
,
308 * Cache a pointer to the caller's buffer and load the DMA map
309 * with the bounce buffer.
311 cookie
->id_origbuf
= m0
;
312 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
313 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
314 error
= _bus_dmamap_load_direct(t
, map
, cookie
->id_bouncebuf
,
315 m0
->m_pkthdr
.len
, NULL
, flags
);
318 * Free the bounce pages, unless our resources
319 * are reserved for our exclusive use.
321 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
322 isadma_bounce_free_bouncebuf(t
, map
);
326 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
327 cookie
->id_flags
|= ID_IS_BOUNCING
;
333 * Like isadma_bounce_dmamap_load(), but for uios.
336 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
,
337 struct uio
*uio
, int flags
)
340 panic("isadma_bounce_dmamap_load_uio: not implemented");
344 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
345 * bus_dmamem_alloc().
348 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
,
349 bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
352 panic("isadma_bounce_dmamap_load_raw: not implemented");
356 * Unload an ISA DMA map.
359 isadma_bounce_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
361 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
364 * If we have bounce pages, free them, unless they're
365 * reserved for our exclusive use.
367 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
368 (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
369 isadma_bounce_free_bouncebuf(t
, map
);
371 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
372 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
375 * Do the generic bits of the unload.
377 _bus_dmamap_unload(t
, map
);
381 * Synchronize an ISA DMA map.
384 isadma_bounce_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
385 bus_size_t len
, int ops
)
387 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
390 * Mixing PRE and POST operations is not allowed.
392 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
393 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
394 panic("isadma_bounce_dmamap_sync: mix PRE and POST");
397 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
398 if (offset
>= map
->dm_mapsize
)
399 panic("isadma_bounce_dmamap_sync: bad offset");
400 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
401 panic("isadma_bounce_dmamap_sync: bad length");
406 * If we're not bouncing, just drain the write buffer
409 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0) {
414 switch (cookie
->id_buftype
) {
415 case ID_BUFTYPE_LINEAR
:
417 * Nothing to do for pre-read.
420 if (ops
& BUS_DMASYNC_PREWRITE
) {
422 * Copy the caller's buffer to the bounce buffer.
424 memcpy((char *)cookie
->id_bouncebuf
+ offset
,
425 (char *)cookie
->id_origbuf
+ offset
, len
);
428 if (ops
& BUS_DMASYNC_POSTREAD
) {
430 * Copy the bounce buffer to the caller's buffer.
432 memcpy((char *)cookie
->id_origbuf
+ offset
,
433 (char *)cookie
->id_bouncebuf
+ offset
, len
);
437 * Nothing to do for post-write.
441 case ID_BUFTYPE_MBUF
:
443 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
444 bus_size_t minlen
, moff
;
447 * Nothing to do for pre-read.
450 if (ops
& BUS_DMASYNC_PREWRITE
) {
452 * Copy the caller's buffer to the bounce buffer.
454 m_copydata(m0
, offset
, len
,
455 (char *)cookie
->id_bouncebuf
+ offset
);
458 if (ops
& BUS_DMASYNC_POSTREAD
) {
460 * Copy the bounce buffer to the caller's buffer.
462 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
464 /* Find the beginning mbuf. */
465 if (moff
>= m
->m_len
) {
471 * Now at the first mbuf to sync; nail
472 * each one until we have exhausted the
475 minlen
= len
< m
->m_len
- moff
?
476 len
: m
->m_len
- moff
;
478 memcpy(mtod(m
, char *) + moff
,
479 (char *)cookie
->id_bouncebuf
+ offset
,
489 * Nothing to do for post-write.
495 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
499 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
502 case ID_BUFTYPE_INVALID
:
503 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
507 printf("unknown buffer type %d\n", cookie
->id_buftype
);
508 panic("isadma_bounce_dmamap_sync");
511 /* Drain the write buffer. */
516 * Allocate memory safe for ISA DMA.
519 isadma_bounce_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
,
520 bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
,
521 int nsegs
, int *rsegs
, int flags
)
525 if (avail_end
> ISA_DMA_BOUNCE_THRESHOLD
)
526 high
= trunc_page(ISA_DMA_BOUNCE_THRESHOLD
);
528 high
= trunc_page(avail_end
);
530 return (_bus_dmamem_alloc_range(t
, size
, alignment
, boundary
,
531 segs
, nsegs
, rsegs
, flags
, 0, high
));
534 /**********************************************************************
535 * ISA DMA utility functions
536 **********************************************************************/
539 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
,
540 bus_size_t size
, int flags
)
542 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
545 cookie
->id_bouncebuflen
= round_page(size
);
546 error
= isadma_bounce_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
547 PAGE_SIZE
, map
->_dm_boundary
, cookie
->id_bouncesegs
,
548 map
->_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
551 error
= _bus_dmamem_map(t
, cookie
->id_bouncesegs
,
552 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
553 (void **)&cookie
->id_bouncebuf
, flags
);
557 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
558 cookie
->id_nbouncesegs
);
559 cookie
->id_bouncebuflen
= 0;
560 cookie
->id_nbouncesegs
= 0;
562 cookie
->id_flags
|= ID_HAS_BOUNCE
;
568 isadma_bounce_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
570 struct isadma_bounce_cookie
*cookie
= map
->_dm_cookie
;
572 _bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
573 cookie
->id_bouncebuflen
);
574 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
575 cookie
->id_nbouncesegs
);
576 cookie
->id_bouncebuflen
= 0;
577 cookie
->id_nbouncesegs
= 0;
578 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;