1 /* $NetBSD: isa_dma.c,v 1.8 2009/03/14 14:45:56 dsl Exp $ */
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
11 * Simulation Facility, NASA Ames Research Center.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isa_dma.c,v 1.8 2009/03/14 14:45:56 dsl Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/syslog.h>
42 #include <sys/device.h>
43 #include <sys/malloc.h>
47 #define _ATARI_BUS_DMA_PRIVATE
48 #include <machine/bus.h>
50 #include <dev/isa/isareg.h>
51 #include <dev/isa/isavar.h>
53 #include <uvm/uvm_extern.h>
55 extern paddr_t avail_end
;
58 * Cookie used by ISA dma. A pointer to one of these it stashed in
61 struct atari_isa_dma_cookie
{
62 int id_flags
; /* flags; see below */
65 * Information about the original buffer used during
66 * DMA map syncs. Note that origibuflen is only used
67 * for ID_BUFTYPE_LINEAR.
69 void *id_origbuf
; /* pointer to orig buffer if
71 bus_size_t id_origbuflen
; /* ...and size */
72 int id_buftype
; /* type of buffer */
74 void *id_bouncebuf
; /* pointer to the bounce buffer */
75 bus_size_t id_bouncebuflen
; /* ...and size */
76 int id_nbouncesegs
; /* number of valid bounce segs */
77 bus_dma_segment_t id_bouncesegs
[0]; /* array of bounce buffer
78 physical memory segments */
82 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
83 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
84 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
87 #define ID_BUFTYPE_INVALID 0
88 #define ID_BUFTYPE_LINEAR 1
89 #define ID_BUFTYPE_MBUF 2
90 #define ID_BUFTYPE_UIO 3
91 #define ID_BUFTYPE_RAW 4
93 int _isa_bus_dmamap_create(bus_dma_tag_t
, bus_size_t
, int,
94 bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
95 void _isa_bus_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
96 int _isa_bus_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
97 bus_size_t
, struct proc
*, int);
98 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
,
100 int _isa_bus_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
,
102 int _isa_bus_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
,
103 bus_dma_segment_t
*, int, bus_size_t
, int);
104 void _isa_bus_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
105 void _isa_bus_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
106 bus_addr_t
, bus_size_t
, int);
108 int _isa_bus_dmamem_alloc(bus_dma_tag_t
, bus_size_t
, bus_size_t
,
109 bus_size_t
, bus_dma_segment_t
*, int, int *, int);
111 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
113 void _isa_dma_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
116 * Entry points for ISA DMA. These are mostly wrappers around
117 * the generic functions that understand how to deal with bounce
118 * buffers, if necessary.
120 struct atari_bus_dma_tag isa_bus_dma_tag
= {
121 ISA_DMA_BOUNCE_THRESHOLD
,
123 _isa_bus_dmamap_create
,
124 _isa_bus_dmamap_destroy
,
125 _isa_bus_dmamap_load
,
126 _isa_bus_dmamap_load_mbuf
,
127 _isa_bus_dmamap_load_uio
,
128 _isa_bus_dmamap_load_raw
,
129 _isa_bus_dmamap_unload
,
130 _isa_bus_dmamap_sync
,
133 /**********************************************************************
134 * bus.h dma interface entry points
135 **********************************************************************/
138 #define STAT_INCR(v) (v)++
139 #define STAT_DECR(v) do { \
141 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
145 u_long isa_dma_stats_loads
;
146 u_long isa_dma_stats_bounces
;
147 u_long isa_dma_stats_nbouncebufs
;
154 * Create an ISA DMA map.
157 _isa_bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
, bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
159 struct atari_isa_dma_cookie
*cookie
;
161 int error
, cookieflags
;
165 /* Call common function to create the basic map. */
166 error
= _bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
172 map
->_dm_cookie
= NULL
;
174 cookiesize
= sizeof(struct atari_isa_dma_cookie
);
177 * ISA only has 24-bits of address space. This means
178 * we can't DMA to pages over 16M. In order to DMA to
179 * arbitrary buffers, we use "bounce buffers" - pages
180 * in memory below the 16M boundary. On DMA reads,
181 * DMA happens to the bounce buffers, and is copied into
182 * the caller's buffer. On writes, data is copied into
183 * but bounce buffer, and the DMA happens from those
184 * pages. To software using the DMA mapping interface,
185 * this looks simply like a data cache.
187 * If we have more than 16M of RAM in the system, we may
188 * need bounce buffers. We check and remember that here.
190 * There are exceptions, however. VLB devices can do
191 * 32-bit DMA, and indicate that here.
193 * ...or, there is an opposite case. The most segments
194 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
195 * the caller can't handle that many segments (e.g. the
196 * ISA DMA controller), we may have to bounce it as well.
198 if (avail_end
<= t
->_bounce_thresh
||
199 (flags
& ISABUS_DMA_32BIT
) != 0) {
200 /* Bouncing not necessary due to memory size. */
201 map
->_dm_bounce_thresh
= 0;
204 if (map
->_dm_bounce_thresh
!= 0 ||
205 ((map
->_dm_size
/ PAGE_SIZE
) + 1) > map
->_dm_segcnt
) {
206 cookieflags
|= ID_MIGHT_NEED_BOUNCE
;
207 cookiesize
+= (sizeof(bus_dma_segment_t
) * map
->_dm_segcnt
);
211 * Allocate our cookie.
213 if ((cookiestore
= malloc(cookiesize
, M_DMAMAP
,
214 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
) {
218 memset(cookiestore
, 0, cookiesize
);
219 cookie
= (struct atari_isa_dma_cookie
*)cookiestore
;
220 cookie
->id_flags
= cookieflags
;
221 map
->_dm_cookie
= cookie
;
223 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
225 * Allocate the bounce pages now if the caller
226 * wishes us to do so.
228 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
231 error
= _isa_dma_alloc_bouncebuf(t
, map
, size
, flags
);
236 if (map
->_dm_cookie
!= NULL
)
237 free(map
->_dm_cookie
, M_DMAMAP
);
238 _bus_dmamap_destroy(t
, map
);
244 * Destroy an ISA DMA map.
247 _isa_bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
249 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
252 * Free any bounce pages this map might hold.
254 if (cookie
->id_flags
& ID_HAS_BOUNCE
)
255 _isa_dma_free_bouncebuf(t
, map
);
257 free(cookie
, M_DMAMAP
);
258 _bus_dmamap_destroy(t
, map
);
262 * Load an ISA DMA map with a linear buffer.
265 _isa_bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
)
273 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
276 STAT_INCR(isa_dma_stats_loads
);
279 * Make sure that on error condition we return "no valid mappings."
285 * Try to load the map the normal way. If this errors out,
286 * and we can bounce, we will.
288 error
= _bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
);
290 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
294 * First attempt failed; bounce it.
297 STAT_INCR(isa_dma_stats_bounces
);
300 * Allocate bounce pages, if necessary.
302 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
303 error
= _isa_dma_alloc_bouncebuf(t
, map
, buflen
, flags
);
309 * Cache a pointer to the caller's buffer and load the DMA map
310 * with the bounce buffer.
312 cookie
->id_origbuf
= buf
;
313 cookie
->id_origbuflen
= buflen
;
314 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
315 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
, buflen
,
319 * Free the bounce pages, unless our resources
320 * are reserved for our exclusive use.
322 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
323 _isa_dma_free_bouncebuf(t
, map
);
327 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
328 cookie
->id_flags
|= ID_IS_BOUNCING
;
333 * Like _isa_bus_dmamap_load(), but for mbufs.
336 _isa_bus_dmamap_load_mbuf(t
, map
, m0
, flags
)
342 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
346 * Make sure on error condition we return "no valid mappings."
352 if ((m0
->m_flags
& M_PKTHDR
) == 0)
353 panic("_isa_bus_dmamap_load_mbuf: no packet header");
356 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
360 * Try to load the map the normal way. If this errors out,
361 * and we can bounce, we will.
363 error
= _bus_dmamap_load_mbuf(t
, map
, m0
, flags
);
365 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
369 * First attempt failed; bounce it.
372 STAT_INCR(isa_dma_stats_bounces
);
375 * Allocate bounce pages, if necessary.
377 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
378 error
= _isa_dma_alloc_bouncebuf(t
, map
, m0
->m_pkthdr
.len
,
385 * Cache a pointer to the caller's buffer and load the DMA map
386 * with the bounce buffer.
388 cookie
->id_origbuf
= m0
;
389 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
390 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
391 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
,
392 m0
->m_pkthdr
.len
, NULL
, flags
);
395 * Free the bounce pages, unless our resources
396 * are reserved for our exclusive use.
398 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
399 _isa_dma_free_bouncebuf(t
, map
);
403 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
404 cookie
->id_flags
|= ID_IS_BOUNCING
;
409 * Like _isa_bus_dmamap_load(), but for uios.
412 _isa_bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
, int flags
)
415 panic("_isa_bus_dmamap_load_uio: not implemented");
419 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
420 * bus_dmamem_alloc().
423 _isa_bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
, bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
426 panic("_isa_bus_dmamap_load_raw: not implemented");
430 * Unload an ISA DMA map.
433 _isa_bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
435 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
438 * If we have bounce pages, free them, unless they're
439 * reserved for our exclusive use.
441 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
442 (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
443 _isa_dma_free_bouncebuf(t
, map
);
445 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
446 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
449 * Do the generic bits of the unload.
451 _bus_dmamap_unload(t
, map
);
455 * Synchronize an ISA DMA map.
458 _isa_bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
, bus_size_t len
, int ops
)
460 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
463 * Mixing PRE and POST operations is not allowed.
465 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
466 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
467 panic("_isa_bus_dmamap_sync: mix PRE and POST");
470 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
471 if (offset
>= map
->dm_mapsize
)
472 panic("_isa_bus_dmamap_sync: bad offset");
473 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
474 panic("_isa_bus_dmamap_sync: bad length");
479 * If we're not bouncing, just return; nothing to do.
481 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0)
484 switch (cookie
->id_buftype
) {
485 case ID_BUFTYPE_LINEAR
:
487 * Nothing to do for pre-read.
490 if (ops
& BUS_DMASYNC_PREWRITE
) {
492 * Copy the caller's buffer to the bounce buffer.
494 memcpy((char *)cookie
->id_bouncebuf
+ offset
,
495 (char *)cookie
->id_origbuf
+ offset
, len
);
498 if (ops
& BUS_DMASYNC_POSTREAD
) {
500 * Copy the bounce buffer to the caller's buffer.
502 memcpy((char *)cookie
->id_origbuf
+ offset
,
503 (char *)cookie
->id_bouncebuf
+ offset
, len
);
507 * Nothing to do for post-write.
511 case ID_BUFTYPE_MBUF
:
513 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
514 bus_size_t minlen
, moff
;
517 * Nothing to do for pre-read.
520 if (ops
& BUS_DMASYNC_PREWRITE
) {
522 * Copy the caller's buffer to the bounce buffer.
524 m_copydata(m0
, offset
, len
,
525 (char *)cookie
->id_bouncebuf
+ offset
);
528 if (ops
& BUS_DMASYNC_POSTREAD
) {
530 * Copy the bounce buffer to the caller's buffer.
532 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
534 /* Find the beginning mbuf. */
535 if (moff
>= m
->m_len
) {
541 * Now at the first mbuf to sync; nail
542 * each one until we have exhausted the
545 minlen
= len
< m
->m_len
- moff
?
546 len
: m
->m_len
- moff
;
548 memcpy(mtod(m
, char *) + moff
,
549 (char *)cookie
->id_bouncebuf
+ offset
,
559 * Nothing to do for post-write.
565 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
569 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
572 case ID_BUFTYPE_INVALID
:
573 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
577 printf("unknown buffer type %d\n", cookie
->id_buftype
);
578 panic("_isa_bus_dmamap_sync");
583 * Allocate memory safe for ISA DMA.
586 _isa_bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
, int flags
)
590 if (avail_end
> ISA_DMA_BOUNCE_THRESHOLD
)
591 high
= trunc_page(ISA_DMA_BOUNCE_THRESHOLD
);
593 high
= trunc_page(avail_end
);
595 return (bus_dmamem_alloc_range(t
, size
, alignment
, boundary
,
596 segs
, nsegs
, rsegs
, flags
, 0, high
));
599 /**********************************************************************
600 * ISA DMA utility functions
601 **********************************************************************/
604 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
, bus_size_t size
, int flags
)
606 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
609 cookie
->id_bouncebuflen
= round_page(size
);
610 error
= _isa_bus_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
611 PAGE_SIZE
, map
->_dm_boundary
, cookie
->id_bouncesegs
,
612 map
->_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
615 error
= bus_dmamem_map(t
, cookie
->id_bouncesegs
,
616 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
617 (void **)&cookie
->id_bouncebuf
, flags
);
621 bus_dmamem_free(t
, cookie
->id_bouncesegs
,
622 cookie
->id_nbouncesegs
);
623 cookie
->id_bouncebuflen
= 0;
624 cookie
->id_nbouncesegs
= 0;
626 cookie
->id_flags
|= ID_HAS_BOUNCE
;
627 STAT_INCR(isa_dma_stats_nbouncebufs
);
634 _isa_dma_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
636 struct atari_isa_dma_cookie
*cookie
= map
->_dm_cookie
;
638 STAT_DECR(isa_dma_stats_nbouncebufs
);
640 bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
641 cookie
->id_bouncebuflen
);
642 bus_dmamem_free(t
, cookie
->id_bouncesegs
,
643 cookie
->id_nbouncesegs
);
644 cookie
->id_bouncebuflen
= 0;
645 cookie
->id_nbouncesegs
= 0;
646 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;