1 /* $NetBSD: isadma_machdep.c,v 1.13 2009/03/14 21:04:16 dsl Exp $ */
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.13 2009/03/14 21:04:16 dsl Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/syslog.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
46 #define _ARM32_BUS_DMA_PRIVATE
47 #include <machine/bus.h>
49 #include <dev/isa/isareg.h>
50 #include <dev/isa/isavar.h>
52 #include <uvm/uvm_extern.h>
55 * ISA has a 24-bit address limitation, so at most it has a 16M
56 * DMA range. However, some platforms have a more limited range,
57 * e.g. the Shark NC. On these systems, we are provided with
58 * a set of DMA ranges. The pmap module is aware of these ranges
59 * and places DMA-safe memory for them onto an alternate free list
60 * so that they are protected from being used to service page faults,
61 * etc. (unless we've run out of memory elsewhere).
63 extern struct arm32_dma_range
*shark_isa_dma_ranges
;
64 extern int shark_isa_dma_nranges
;
66 int _isa_bus_dmamap_create(bus_dma_tag_t
, bus_size_t
, int,
67 bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
68 void _isa_bus_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
69 int _isa_bus_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
70 bus_size_t
, struct proc
*, int);
71 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
,
73 int _isa_bus_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
,
75 int _isa_bus_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
,
76 bus_dma_segment_t
*, int, bus_size_t
, int);
77 void _isa_bus_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
78 void _isa_bus_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
79 bus_addr_t
, bus_size_t
, int);
81 int _isa_bus_dmamem_alloc(bus_dma_tag_t
, bus_size_t
, bus_size_t
,
82 bus_size_t
, bus_dma_segment_t
*, int, int *, int);
84 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
86 void _isa_dma_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
89 * Entry points for ISA DMA. These are mostly wrappers around
90 * the generic functions that understand how to deal with bounce
91 * buffers, if necessary.
93 struct arm32_bus_dma_tag isa_bus_dma_tag
= {
97 _isa_bus_dmamap_create
,
98 _isa_bus_dmamap_destroy
,
100 _isa_bus_dmamap_load_mbuf
,
101 _isa_bus_dmamap_load_uio
,
102 _isa_bus_dmamap_load_raw
,
103 _isa_bus_dmamap_unload
,
104 _isa_bus_dmamap_sync
, /* pre */
105 _isa_bus_dmamap_sync
, /* post */
106 _isa_bus_dmamem_alloc
,
114 * Initialize ISA DMA.
120 isa_bus_dma_tag
._ranges
= shark_isa_dma_ranges
;
121 isa_bus_dma_tag
._nranges
= shark_isa_dma_nranges
;
124 /**********************************************************************
125 * bus.h dma interface entry points
126 **********************************************************************/
129 #define STAT_INCR(v) (v)++
130 #define STAT_DECR(v) do { \
132 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
136 u_long isa_dma_stats_loads
;
137 u_long isa_dma_stats_bounces
;
138 u_long isa_dma_stats_nbouncebufs
;
145 * Create an ISA DMA map.
148 _isa_bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
, bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
150 struct arm32_isa_dma_cookie
*cookie
;
152 int error
, cookieflags
;
156 /* Call common function to create the basic map. */
157 error
= _bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
163 map
->_dm_cookie
= NULL
;
165 cookiesize
= sizeof(struct arm32_isa_dma_cookie
);
168 * ISA only has 24-bits of address space. This means
169 * we can't DMA to pages over 16M. In order to DMA to
170 * arbitrary buffers, we use "bounce buffers" - pages
171 * in memory below the 16M boundary. On DMA reads,
172 * DMA happens to the bounce buffers, and is copied into
173 * the caller's buffer. On writes, data is copied into
174 * but bounce buffer, and the DMA happens from those
175 * pages. To software using the DMA mapping interface,
176 * this looks simply like a data cache.
178 * If we have more than 16M of RAM in the system, we may
179 * need bounce buffers. We check and remember that here.
181 * There are exceptions, however. VLB devices can do
182 * 32-bit DMA, and indicate that here.
184 * ...or, there is an opposite case. The most segments
185 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
186 * the caller can't handle that many segments (e.g. the
187 * ISA DMA controller), we may have to bounce it as well.
189 * Well, not really... see note above regarding DMA ranges.
190 * Because of the range issue on this platform, we just
191 * always "might bounce".
193 cookieflags
= ID_MIGHT_NEED_BOUNCE
;
194 cookiesize
+= (sizeof(bus_dma_segment_t
) * map
->_dm_segcnt
);
197 * Allocate our cookie.
199 if ((cookiestore
= malloc(cookiesize
, M_DMAMAP
,
200 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
) {
204 memset(cookiestore
, 0, cookiesize
);
205 cookie
= (struct arm32_isa_dma_cookie
*)cookiestore
;
206 cookie
->id_flags
= cookieflags
;
207 map
->_dm_cookie
= cookie
;
209 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
211 * Allocate the bounce pages now if the caller
212 * wishes us to do so.
214 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
217 error
= _isa_dma_alloc_bouncebuf(t
, map
, size
, flags
);
222 if (map
->_dm_cookie
!= NULL
)
223 free(map
->_dm_cookie
, M_DMAMAP
);
224 _bus_dmamap_destroy(t
, map
);
230 * Destroy an ISA DMA map.
233 _isa_bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
235 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
238 * Free any bounce pages this map might hold.
240 if (cookie
->id_flags
& ID_HAS_BOUNCE
)
241 _isa_dma_free_bouncebuf(t
, map
);
243 free(cookie
, M_DMAMAP
);
244 _bus_dmamap_destroy(t
, map
);
248 * Load an ISA DMA map with a linear buffer.
251 _isa_bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
)
259 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
262 STAT_INCR(isa_dma_stats_loads
);
265 * Make sure that on error condition we return "no valid mappings."
271 * Try to load the map the normal way. If this errors out,
272 * and we can bounce, we will.
274 error
= _bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
);
276 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
280 * First attempt failed; bounce it.
283 STAT_INCR(isa_dma_stats_bounces
);
286 * Allocate bounce pages, if necessary.
288 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
289 error
= _isa_dma_alloc_bouncebuf(t
, map
, buflen
, flags
);
295 * Cache a pointer to the caller's buffer and load the DMA map
296 * with the bounce buffer.
298 cookie
->id_origbuf
= buf
;
299 cookie
->id_origbuflen
= buflen
;
300 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
301 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
, buflen
,
305 * Free the bounce pages, unless our resources
306 * are reserved for our exclusive use.
308 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
309 _isa_dma_free_bouncebuf(t
, map
);
313 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
314 cookie
->id_flags
|= ID_IS_BOUNCING
;
319 * Like _isa_bus_dmamap_load(), but for mbufs.
322 _isa_bus_dmamap_load_mbuf(t
, map
, m0
, flags
)
328 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
332 * Make sure that on error condition we return "no valid mappings."
338 if ((m0
->m_flags
& M_PKTHDR
) == 0)
339 panic("_isa_bus_dmamap_load_mbuf: no packet header");
342 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
346 * Try to load the map the normal way. If this errors out,
347 * and we can bounce, we will.
349 error
= _bus_dmamap_load_mbuf(t
, map
, m0
, flags
);
351 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
355 * First attempt failed; bounce it.
358 STAT_INCR(isa_dma_stats_bounces
);
361 * Allocate bounce pages, if necessary.
363 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
364 error
= _isa_dma_alloc_bouncebuf(t
, map
, m0
->m_pkthdr
.len
,
371 * Cache a pointer to the caller's buffer and load the DMA map
372 * with the bounce buffer.
374 cookie
->id_origbuf
= m0
;
375 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
376 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
377 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
,
378 m0
->m_pkthdr
.len
, NULL
, flags
);
381 * Free the bounce pages, unless our resources
382 * are reserved for our exclusive use.
384 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
385 _isa_dma_free_bouncebuf(t
, map
);
389 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
390 cookie
->id_flags
|= ID_IS_BOUNCING
;
395 * Like _isa_bus_dmamap_load(), but for uios.
398 _isa_bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
, int flags
)
401 panic("_isa_bus_dmamap_load_uio: not implemented");
405 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
406 * bus_dmamem_alloc().
409 _isa_bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
, bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
412 panic("_isa_bus_dmamap_load_raw: not implemented");
416 * Unload an ISA DMA map.
419 _isa_bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
421 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
424 * If we have bounce pages, free them, unless they're
425 * reserved for our exclusive use.
427 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
428 (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
429 _isa_dma_free_bouncebuf(t
, map
);
431 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
432 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
435 * Do the generic bits of the unload.
437 _bus_dmamap_unload(t
, map
);
441 * Synchronize an ISA DMA map.
444 _isa_bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
, bus_size_t len
, int ops
)
446 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
449 * Mixing PRE and POST operations is not allowed.
451 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
452 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
453 panic("_isa_bus_dmamap_sync: mix PRE and POST");
456 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
457 if (offset
>= map
->dm_mapsize
)
458 panic("_isa_bus_dmamap_sync: bad offset");
459 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
460 panic("_isa_bus_dmamap_sync: bad length");
465 * If we're not bouncing, just return; nothing to do.
467 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0)
470 switch (cookie
->id_buftype
) {
471 case ID_BUFTYPE_LINEAR
:
473 * Nothing to do for pre-read.
476 if (ops
& BUS_DMASYNC_PREWRITE
) {
478 * Copy the caller's buffer to the bounce buffer.
480 memcpy((char *)cookie
->id_bouncebuf
+ offset
,
481 (char *)cookie
->id_origbuf
+ offset
, len
);
484 if (ops
& BUS_DMASYNC_POSTREAD
) {
486 * Copy the bounce buffer to the caller's buffer.
488 memcpy((char *)cookie
->id_origbuf
+ offset
,
489 (char *)cookie
->id_bouncebuf
+ offset
, len
);
493 * Nothing to do for post-write.
497 case ID_BUFTYPE_MBUF
:
499 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
500 bus_size_t minlen
, moff
;
503 * Nothing to do for pre-read.
506 if (ops
& BUS_DMASYNC_PREWRITE
) {
508 * Copy the caller's buffer to the bounce buffer.
510 m_copydata(m0
, offset
, len
,
511 (char *)cookie
->id_bouncebuf
+ offset
);
514 if (ops
& BUS_DMASYNC_POSTREAD
) {
516 * Copy the bounce buffer to the caller's buffer.
518 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
520 /* Find the beginning mbuf. */
521 if (moff
>= m
->m_len
) {
527 * Now at the first mbuf to sync; nail
528 * each one until we have exhausted the
531 minlen
= len
< m
->m_len
- moff
?
532 len
: m
->m_len
- moff
;
534 memcpy(mtod(m
, char *) + moff
,
535 (char *)cookie
->id_bouncebuf
+ offset
,
545 * Nothing to do for post-write.
551 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
555 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
558 case ID_BUFTYPE_INVALID
:
559 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
563 printf("unknown buffer type %d\n", cookie
->id_buftype
);
564 panic("_isa_bus_dmamap_sync");
569 * Allocate memory safe for ISA DMA.
572 _isa_bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
, int flags
)
575 if (t
->_ranges
== NULL
)
578 /* _bus_dmamem_alloc() does the range checks for us. */
579 return (_bus_dmamem_alloc(t
, size
, alignment
, boundary
, segs
, nsegs
,
583 /**********************************************************************
584 * ISA DMA utility functions
585 **********************************************************************/
588 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
, bus_size_t size
, int flags
)
590 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
593 cookie
->id_bouncebuflen
= round_page(size
);
594 error
= _isa_bus_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
595 PAGE_SIZE
, map
->_dm_boundary
, cookie
->id_bouncesegs
,
596 map
->_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
599 error
= _bus_dmamem_map(t
, cookie
->id_bouncesegs
,
600 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
601 (void **)&cookie
->id_bouncebuf
, flags
);
605 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
606 cookie
->id_nbouncesegs
);
607 cookie
->id_bouncebuflen
= 0;
608 cookie
->id_nbouncesegs
= 0;
610 cookie
->id_flags
|= ID_HAS_BOUNCE
;
611 STAT_INCR(isa_dma_stats_nbouncebufs
);
618 _isa_dma_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
620 struct arm32_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
622 STAT_DECR(isa_dma_stats_nbouncebufs
);
624 _bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
625 cookie
->id_bouncebuflen
);
626 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
627 cookie
->id_nbouncesegs
);
628 cookie
->id_bouncebuflen
= 0;
629 cookie
->id_nbouncesegs
= 0;
630 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;