1 /* $NetBSD: isadma_machdep.c,v 1.12 2009/03/14 21:04:05 dsl Exp $ */
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11 * NASA Ames Research Center.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.12 2009/03/14 21:04:05 dsl Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/syslog.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
46 #define _ARM32_BUS_DMA_PRIVATE
47 #include <machine/bus.h>
49 #include <dev/isa/isareg.h>
50 #include <dev/isa/isavar.h>
52 #include <uvm/uvm_extern.h>
55 * ISA has a 24-bit address limitation, so at most it has a 16M
56 * DMA range. However, some platforms have a more limited range,
57 * e.g. the Shark NC. On these systems, we are provided with
58 * a set of DMA ranges. The pmap module is aware of these ranges
59 * and places DMA-safe memory for them onto an alternate free list
60 * so that they are protected from being used to service page faults,
61 * etc. (unless we've run out of memory elsewhere).
63 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
65 struct arm32_dma_range
*footbridge_isa_dma_ranges
;
66 int footbridge_isa_dma_nranges
;
68 int _isa_bus_dmamap_create(bus_dma_tag_t
, bus_size_t
, int,
69 bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
70 void _isa_bus_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
71 int _isa_bus_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
72 bus_size_t
, struct proc
*, int);
73 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
,
75 int _isa_bus_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
,
77 int _isa_bus_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
,
78 bus_dma_segment_t
*, int, bus_size_t
, int);
79 void _isa_bus_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
80 void _isa_bus_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
81 bus_addr_t
, bus_size_t
, int);
83 int _isa_bus_dmamem_alloc(bus_dma_tag_t
, bus_size_t
, bus_size_t
,
84 bus_size_t
, bus_dma_segment_t
*, int, int *, int);
86 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
88 void _isa_dma_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
91 * Entry points for ISA DMA. These are mostly wrappers around
92 * the generic functions that understand how to deal with bounce
93 * buffers, if necessary.
95 struct arm32_bus_dma_tag isa_bus_dma_tag
= {
99 _isa_bus_dmamap_create
,
100 _isa_bus_dmamap_destroy
,
101 _isa_bus_dmamap_load
,
102 _isa_bus_dmamap_load_mbuf
,
103 _isa_bus_dmamap_load_uio
,
104 _isa_bus_dmamap_load_raw
,
105 _isa_bus_dmamap_unload
,
106 _isa_bus_dmamap_sync
, /* pre */
107 _isa_bus_dmamap_sync
, /* post */
108 _isa_bus_dmamem_alloc
,
116 * Initialize ISA DMA.
122 isa_bus_dma_tag
._ranges
= footbridge_isa_dma_ranges
;
123 isa_bus_dma_tag
._nranges
= footbridge_isa_dma_nranges
;
126 /**********************************************************************
127 * bus.h dma interface entry points
128 **********************************************************************/
131 #define STAT_INCR(v) (v)++
132 #define STAT_DECR(v) do { \
134 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
138 u_long isa_dma_stats_loads
;
139 u_long isa_dma_stats_bounces
;
140 u_long isa_dma_stats_nbouncebufs
;
147 * Create an ISA DMA map.
150 _isa_bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
, bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
152 struct arm32_isa_dma_cookie
*cookie
;
154 int error
, cookieflags
;
158 /* Call common function to create the basic map. */
159 error
= _bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
165 map
->_dm_cookie
= NULL
;
167 cookiesize
= sizeof(struct arm32_isa_dma_cookie
);
170 * ISA only has 24-bits of address space. This means
171 * we can't DMA to pages over 16M. In order to DMA to
172 * arbitrary buffers, we use "bounce buffers" - pages
173 * in memory below the 16M boundary. On DMA reads,
174 * DMA happens to the bounce buffers, and is copied into
175 * the caller's buffer. On writes, data is copied into
176 * but bounce buffer, and the DMA happens from those
177 * pages. To software using the DMA mapping interface,
178 * this looks simply like a data cache.
180 * If we have more than 16M of RAM in the system, we may
181 * need bounce buffers. We check and remember that here.
183 * There are exceptions, however. VLB devices can do
184 * 32-bit DMA, and indicate that here.
186 * ...or, there is an opposite case. The most segments
187 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
188 * the caller can't handle that many segments (e.g. the
189 * ISA DMA controller), we may have to bounce it as well.
191 * Well, not really... see note above regarding DMA ranges.
192 * Because of the range issue on this platform, we just
193 * always "might bounce".
195 cookieflags
= ID_MIGHT_NEED_BOUNCE
;
196 cookiesize
+= (sizeof(bus_dma_segment_t
) * map
->_dm_segcnt
);
199 * Allocate our cookie.
201 if ((cookiestore
= malloc(cookiesize
, M_DMAMAP
,
202 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
) {
206 memset(cookiestore
, 0, cookiesize
);
207 cookie
= (struct arm32_isa_dma_cookie
*)cookiestore
;
208 cookie
->id_flags
= cookieflags
;
209 map
->_dm_cookie
= cookie
;
211 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
213 * Allocate the bounce pages now if the caller
214 * wishes us to do so.
216 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
219 error
= _isa_dma_alloc_bouncebuf(t
, map
, size
, flags
);
224 if (map
->_dm_cookie
!= NULL
)
225 free(map
->_dm_cookie
, M_DMAMAP
);
226 _bus_dmamap_destroy(t
, map
);
232 * Destroy an ISA DMA map.
235 _isa_bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
237 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
240 * Free any bounce pages this map might hold.
242 if (cookie
->id_flags
& ID_HAS_BOUNCE
)
243 _isa_dma_free_bouncebuf(t
, map
);
245 free(cookie
, M_DMAMAP
);
246 _bus_dmamap_destroy(t
, map
);
250 * Load an ISA DMA map with a linear buffer.
253 _isa_bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
)
261 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
264 STAT_INCR(isa_dma_stats_loads
);
267 * Make sure that on error condition we return "no valid mappings."
273 * Try to load the map the normal way. If this errors out,
274 * and we can bounce, we will.
276 error
= _bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
);
278 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
282 * First attempt failed; bounce it.
285 STAT_INCR(isa_dma_stats_bounces
);
288 * Allocate bounce pages, if necessary.
290 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
291 error
= _isa_dma_alloc_bouncebuf(t
, map
, buflen
, flags
);
297 * Cache a pointer to the caller's buffer and load the DMA map
298 * with the bounce buffer.
300 cookie
->id_origbuf
= buf
;
301 cookie
->id_origbuflen
= buflen
;
302 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
303 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
, buflen
,
307 * Free the bounce pages, unless our resources
308 * are reserved for our exclusive use.
310 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
311 _isa_dma_free_bouncebuf(t
, map
);
315 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
316 cookie
->id_flags
|= ID_IS_BOUNCING
;
321 * Like _isa_bus_dmamap_load(), but for mbufs.
324 _isa_bus_dmamap_load_mbuf(t
, map
, m0
, flags
)
330 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
334 * Make sure that on error condition we return "no valid mappings."
340 if ((m0
->m_flags
& M_PKTHDR
) == 0)
341 panic("_isa_bus_dmamap_load_mbuf: no packet header");
344 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
348 * Try to load the map the normal way. If this errors out,
349 * and we can bounce, we will.
351 error
= _bus_dmamap_load_mbuf(t
, map
, m0
, flags
);
353 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
357 * First attempt failed; bounce it.
360 STAT_INCR(isa_dma_stats_bounces
);
363 * Allocate bounce pages, if necessary.
365 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
366 error
= _isa_dma_alloc_bouncebuf(t
, map
, m0
->m_pkthdr
.len
,
373 * Cache a pointer to the caller's buffer and load the DMA map
374 * with the bounce buffer.
376 cookie
->id_origbuf
= m0
;
377 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
378 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
379 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
,
380 m0
->m_pkthdr
.len
, NULL
, flags
);
383 * Free the bounce pages, unless our resources
384 * are reserved for our exclusive use.
386 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
387 _isa_dma_free_bouncebuf(t
, map
);
391 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
392 cookie
->id_flags
|= ID_IS_BOUNCING
;
397 * Like _isa_bus_dmamap_load(), but for uios.
400 _isa_bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
, int flags
)
403 panic("_isa_bus_dmamap_load_uio: not implemented");
407 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
408 * bus_dmamem_alloc().
411 _isa_bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
, bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
414 panic("_isa_bus_dmamap_load_raw: not implemented");
418 * Unload an ISA DMA map.
421 _isa_bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
423 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
426 * If we have bounce pages, free them, unless they're
427 * reserved for our exclusive use.
429 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
430 (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
431 _isa_dma_free_bouncebuf(t
, map
);
433 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
434 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
437 * Do the generic bits of the unload.
439 _bus_dmamap_unload(t
, map
);
443 * Synchronize an ISA DMA map.
446 _isa_bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
, bus_size_t len
, int ops
)
448 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
451 * Mixing PRE and POST operations is not allowed.
453 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
454 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
455 panic("_isa_bus_dmamap_sync: mix PRE and POST");
458 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
459 if (offset
>= map
->dm_mapsize
)
460 panic("_isa_bus_dmamap_sync: bad offset");
461 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
462 panic("_isa_bus_dmamap_sync: bad length");
467 * If we're not bouncing, just return; nothing to do.
469 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0)
472 switch (cookie
->id_buftype
) {
473 case ID_BUFTYPE_LINEAR
:
475 * Nothing to do for pre-read.
478 if (ops
& BUS_DMASYNC_PREWRITE
) {
480 * Copy the caller's buffer to the bounce buffer.
482 memcpy((char *)cookie
->id_bouncebuf
+ offset
,
483 (char *)cookie
->id_origbuf
+ offset
, len
);
486 if (ops
& BUS_DMASYNC_POSTREAD
) {
488 * Copy the bounce buffer to the caller's buffer.
490 memcpy((char *)cookie
->id_origbuf
+ offset
,
491 (char *)cookie
->id_bouncebuf
+ offset
, len
);
495 * Nothing to do for post-write.
499 case ID_BUFTYPE_MBUF
:
501 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
502 bus_size_t minlen
, moff
;
505 * Nothing to do for pre-read.
508 if (ops
& BUS_DMASYNC_PREWRITE
) {
510 * Copy the caller's buffer to the bounce buffer.
512 m_copydata(m0
, offset
, len
,
513 (char *)cookie
->id_bouncebuf
+ offset
);
516 if (ops
& BUS_DMASYNC_POSTREAD
) {
518 * Copy the bounce buffer to the caller's buffer.
520 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
522 /* Find the beginning mbuf. */
523 if (moff
>= m
->m_len
) {
529 * Now at the first mbuf to sync; nail
530 * each one until we have exhausted the
533 minlen
= len
< m
->m_len
- moff
?
534 len
: m
->m_len
- moff
;
536 memcpy(mtod(m
, char *) + moff
,
537 (char *)cookie
->id_bouncebuf
+ offset
,
547 * Nothing to do for post-write.
553 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
557 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
560 case ID_BUFTYPE_INVALID
:
561 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
565 printf("unknown buffer type %d\n", cookie
->id_buftype
);
566 panic("_isa_bus_dmamap_sync");
571 * Allocate memory safe for ISA DMA.
574 _isa_bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
, int flags
)
577 if (t
->_ranges
== NULL
)
580 /* _bus_dmamem_alloc() does the range checks for us. */
581 return (_bus_dmamem_alloc(t
, size
, alignment
, boundary
, segs
, nsegs
,
585 /**********************************************************************
586 * ISA DMA utility functions
587 **********************************************************************/
590 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
, bus_size_t size
, int flags
)
592 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
595 cookie
->id_bouncebuflen
= round_page(size
);
596 error
= _isa_bus_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
597 PAGE_SIZE
, map
->_dm_boundary
, cookie
->id_bouncesegs
,
598 map
->_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
601 error
= _bus_dmamem_map(t
, cookie
->id_bouncesegs
,
602 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
603 (void **)&cookie
->id_bouncebuf
, flags
);
607 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
608 cookie
->id_nbouncesegs
);
609 cookie
->id_bouncebuflen
= 0;
610 cookie
->id_nbouncesegs
= 0;
612 cookie
->id_flags
|= ID_HAS_BOUNCE
;
613 STAT_INCR(isa_dma_stats_nbouncebufs
);
620 _isa_dma_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
622 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
624 STAT_DECR(isa_dma_stats_nbouncebufs
);
626 _bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
627 cookie
->id_bouncebuflen
);
628 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
629 cookie
->id_nbouncesegs
);
630 cookie
->id_bouncebuflen
= 0;
631 cookie
->id_nbouncesegs
= 0;
632 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;