1 /* $NetBSD: int_bus_dma.c,v 1.15 2007/03/04 05:59:45 christos Exp $ */
4 * Copyright (c) 2002 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * PCI DMA support for the ARM Integrator.
42 #define _ARM32_BUS_DMA_PRIVATE
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: int_bus_dma.c,v 1.15 2007/03/04 05:59:45 christos Exp $");
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
53 #include <uvm/uvm_extern.h>
55 #include <machine/bootconfig.h>
57 #include <evbarm/integrator/int_bus_dma.h>
59 struct integrator_dma_cookie
{
60 int id_flags
; /* flags; see below */
63 * Information about the original buffer used during
64 * DMA map syncs. Note that origbuflen is only used
65 * for ID_BUFTYPE_LINEAR.
67 void *id_origbuf
; /* pointer to orig buffer if
69 bus_size_t id_origbuflen
; /* ...and size */
70 int id_buftype
; /* type of buffer */
72 void *id_bouncebuf
; /* pointer to the bounce buffer */
73 bus_size_t id_bouncebuflen
; /* ...and size */
74 int id_nbouncesegs
; /* number of valid bounce segs */
75 bus_dma_segment_t id_bouncesegs
[0]; /* array of bounce buffer
76 physical memory segments */
79 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
80 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
81 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
84 #define ID_BUFTYPE_INVALID 0
85 #define ID_BUFTYPE_LINEAR 1
86 #define ID_BUFTYPE_MBUF 2
87 #define ID_BUFTYPE_UIO 3
88 #define ID_BUFTYPE_RAW 4
92 static struct arm32_dma_range integrator_dma_ranges
[DRAM_BLOCKS
];
94 extern BootConfig bootconfig
;
96 static int integrator_bus_dmamap_create(bus_dma_tag_t
, bus_size_t
, int,
97 bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
98 static void integrator_bus_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
99 static int integrator_bus_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
100 bus_size_t
, struct proc
*, int);
101 static int integrator_bus_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
,
103 static int integrator_bus_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
,
105 static int integrator_bus_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
,
106 bus_dma_segment_t
*, int, bus_size_t
, int);
107 static void integrator_bus_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
108 static void integrator_bus_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
109 bus_addr_t
, bus_size_t
, int);
110 static int integrator_bus_dmamem_alloc(bus_dma_tag_t
, bus_size_t
,
111 bus_size_t
, bus_size_t
, bus_dma_segment_t
*, int, int *, int);
112 static int integrator_dma_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
114 static void integrator_dma_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
118 * Create an Integrator DMA map.
121 integrator_bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
,
122 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
124 struct integrator_dma_cookie
*cookie
;
126 int error
, cookieflags
;
130 DEBUG(printf("I_bus_dmamap_create(tag %x, size %x, nseg %d, max %x,"
131 " boundary %x, flags %x, dmamap %p)\n", (unsigned) t
,
132 (unsigned) size
, nsegments
, (unsigned) maxsegsz
,
133 (unsigned)boundary
, flags
, dmamp
));
135 /* Call common function to create the basic map. */
136 error
= _bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
142 map
->_dm_cookie
= NULL
;
144 cookiesize
= sizeof(struct integrator_dma_cookie
);
147 * Some CM boards have private memory which is significantly
148 * faster than the normal memory stick. To support this
149 * memory we have to bounce any DMA transfers.
151 * In order to DMA to arbitrary buffers, we use "bounce
152 * buffers" - pages in in the main PCI visible memory. On DMA
153 * reads, DMA happens to the bounce buffers, and is copied
154 * into the caller's buffer. On writes, data is copied into
155 * but bounce buffer, and the DMA happens from those pages.
156 * To software using the DMA mapping interface, this looks
157 * simply like a data cache.
159 * If we have private RAM in the system, we may need bounce
160 * buffers. We check and remember that here.
163 cookieflags
= ID_MIGHT_NEED_BOUNCE
;
167 cookiesize
+= (sizeof(bus_dma_segment_t
) * map
->_dm_segcnt
);
170 * Allocate our cookie.
172 if ((cookiestore
= malloc(cookiesize
, M_DMAMAP
,
173 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
)) == NULL
) {
177 memset(cookiestore
, 0, cookiesize
);
178 cookie
= (struct integrator_dma_cookie
*)cookiestore
;
179 cookie
->id_flags
= cookieflags
;
180 map
->_dm_cookie
= cookie
;
182 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
184 * Allocate the bounce pages now if the caller
185 * wishes us to do so.
187 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
190 DEBUG(printf("I_bus_dmamap_create bouncebuf alloc\n"));
191 error
= integrator_dma_alloc_bouncebuf(t
, map
, size
, flags
);
196 if (map
->_dm_cookie
!= NULL
)
197 free(map
->_dm_cookie
, M_DMAMAP
);
198 _bus_dmamap_destroy(t
, map
);
199 printf("I_bus_dmamap_create failed (%d)\n", error
);
205 * Destroy an ISA DMA map.
208 integrator_bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
210 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
212 DEBUG(printf("I_bus_dmamap_destroy (tag %x, map %x)\n", (unsigned) t
,
215 * Free any bounce pages this map might hold.
217 if (cookie
->id_flags
& ID_HAS_BOUNCE
) {
218 DEBUG(printf("I_bus_dmamap_destroy bouncebuf\n"));
219 integrator_dma_free_bouncebuf(t
, map
);
222 free(cookie
, M_DMAMAP
);
223 _bus_dmamap_destroy(t
, map
);
227 * Load an Integrator DMA map with a linear buffer.
230 integrator_bus_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
,
231 bus_size_t buflen
, struct proc
*p
, int flags
)
233 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
236 DEBUG(printf("I_bus_dmamap_load (tag %x, map %x, buf %p, len %u,"
237 " proc %p, flags %d)\n", (unsigned) t
, (unsigned) map
, buf
,
238 (unsigned) buflen
, p
, flags
));
240 * Make sure that on error condition we return "no valid mappings."
246 * Try to load the map the normal way. If this errors out,
247 * and we can bounce, we will.
249 error
= _bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
);
251 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
255 * First attempt failed; bounce it.
259 * Allocate bounce pages, if necessary.
261 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
262 DEBUG(printf("I_bus_dmamap_load alloc bouncebuf\n"));
263 error
= integrator_dma_alloc_bouncebuf(t
, map
, buflen
, flags
);
269 * Cache a pointer to the caller's buffer and load the DMA map
270 * with the bounce buffer.
272 cookie
->id_origbuf
= buf
;
273 cookie
->id_origbuflen
= buflen
;
274 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
275 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
, buflen
,
279 * Free the bounce pages, unless our resources
280 * are reserved for our exclusive use.
282 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
283 integrator_dma_free_bouncebuf(t
, map
);
287 /* ...so integrator_bus_dmamap_sync() knows we're bouncing */
288 cookie
->id_flags
|= ID_IS_BOUNCING
;
293 * Like integrator_bus_dmamap_load(), but for mbufs.
296 integrator_bus_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
,
297 struct mbuf
*m0
, int flags
)
299 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
303 * Make sure that on error condition we return "no valid mappings."
309 if ((m0
->m_flags
& M_PKTHDR
) == 0)
310 panic("integrator_bus_dmamap_load_mbuf: no packet header");
313 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
317 * Try to load the map the normal way. If this errors out,
318 * and we can bounce, we will.
320 error
= _bus_dmamap_load_mbuf(t
, map
, m0
, flags
);
322 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
326 * First attempt failed; bounce it.
328 * Allocate bounce pages, if necessary.
330 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
331 error
= integrator_dma_alloc_bouncebuf(t
, map
,
332 m0
->m_pkthdr
.len
, flags
);
338 * Cache a pointer to the caller's buffer and load the DMA map
339 * with the bounce buffer.
341 cookie
->id_origbuf
= m0
;
342 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
343 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
344 error
= _bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
,
345 m0
->m_pkthdr
.len
, NULL
, flags
);
348 * Free the bounce pages, unless our resources
349 * are reserved for our exclusive use.
351 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
352 integrator_dma_free_bouncebuf(t
, map
);
356 /* ...so integrator_bus_dmamap_sync() knows we're bouncing */
357 cookie
->id_flags
|= ID_IS_BOUNCING
;
362 * Like integrator_bus_dmamap_load(), but for uios.
365 integrator_bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
,
366 struct uio
*uio
, int flags
)
369 panic("integrator_bus_dmamap_load_uio: not implemented");
373 * Like intgrator_bus_dmamap_load(), but for raw memory allocated with
374 * bus_dmamem_alloc().
377 integrator_bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
,
378 bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
381 panic("integrator_bus_dmamap_load_raw: not implemented");
385 * Unload an Integrator DMA map.
388 integrator_bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
390 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
393 * If we have bounce pages, free them, unless they're
394 * reserved for our exclusive use.
396 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
397 (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
398 integrator_dma_free_bouncebuf(t
, map
);
400 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
401 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
404 * Do the generic bits of the unload.
406 _bus_dmamap_unload(t
, map
);
410 * Synchronize an Integrator DMA map.
413 integrator_bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
,
414 bus_addr_t offset
, bus_size_t len
, int ops
)
416 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
418 DEBUG(printf("I_bus_dmamap_sync (tag %x, map %x, offset %x, size %u,"
419 " ops %d\n", (unsigned)t
, (unsigned)map
, (unsigned)offset
,
420 (unsigned)len
, ops
));
422 * Mixing PRE and POST operations is not allowed.
424 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
425 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
426 panic("integrator_bus_dmamap_sync: mix PRE and POST");
429 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
430 if (offset
>= map
->dm_mapsize
)
431 panic("integrator_bus_dmamap_sync: bad offset");
432 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
433 panic("integrator_bus_dmamap_sync: bad length");
438 * If we're not bouncing then use the standard code.
440 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0) {
441 _bus_dmamap_sync(t
, map
, offset
, len
, ops
);
445 DEBUG(printf("dmamap_sync(");
446 if (ops
& BUS_DMASYNC_PREREAD
)
448 if (ops
& BUS_DMASYNC_PREWRITE
)
450 if (ops
& BUS_DMASYNC_POSTREAD
)
452 if (ops
& BUS_DMASYNC_POSTWRITE
)
453 printf("postwrite ");)
455 switch (cookie
->id_buftype
) {
456 case ID_BUFTYPE_LINEAR
:
457 if (ops
& BUS_DMASYNC_PREWRITE
) {
459 * Copy the caller's buffer to the bounce buffer.
461 memcpy((uint8_t *)cookie
->id_bouncebuf
+ offset
,
462 (uint8_t *)cookie
->id_origbuf
+ offset
, len
);
463 cpu_dcache_wbinv_range((vaddr_t
)cookie
->id_bouncebuf
+
466 if (ops
& BUS_DMASYNC_PREREAD
) {
467 cpu_dcache_wbinv_range((vaddr_t
)cookie
->id_bouncebuf
+
470 if (ops
& BUS_DMASYNC_POSTREAD
) {
472 * Copy the bounce buffer to the caller's buffer.
474 memcpy((uint8_t *)cookie
->id_origbuf
+ offset
,
475 (uint8_t *)cookie
->id_bouncebuf
+ offset
, len
);
479 * Nothing to do for post-write.
483 case ID_BUFTYPE_MBUF
:
485 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
486 bus_size_t minlen
, moff
;
488 if (ops
& BUS_DMASYNC_PREWRITE
) {
490 * Copy the caller's buffer to the bounce buffer.
492 m_copydata(m0
, offset
, len
,
493 (uint8_t *)cookie
->id_bouncebuf
+ offset
);
494 cpu_dcache_wb_range((vaddr_t
)cookie
->id_bouncebuf
+
497 if (ops
& BUS_DMASYNC_PREREAD
) {
498 cpu_dcache_wbinv_range ((vaddr_t
)cookie
->id_bouncebuf
+
501 if (ops
& BUS_DMASYNC_POSTREAD
) {
503 * Copy the bounce buffer to the caller's buffer.
505 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
507 /* Find the beginning mbuf. */
508 if (moff
>= m
->m_len
) {
514 * Now at the first mbuf to sync; nail
515 * each one until we have exhausted the
518 minlen
= len
< m
->m_len
- moff
?
519 len
: m
->m_len
- moff
;
521 memcpy(mtod(m
, uint8_t *) + moff
,
522 (uint8_t *)cookie
->id_bouncebuf
+ offset
,
531 * Nothing to do for post-write.
537 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_UIO");
541 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_RAW");
544 case ID_BUFTYPE_INVALID
:
545 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_INVALID");
549 printf("unknown buffer type %d\n", cookie
->id_buftype
);
550 panic("integrator_bus_dmamap_sync");
555 * Allocate memory safe for Integrator DMA.
558 integrator_bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
,
559 bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
,
560 int nsegs
, int *rsegs
, int flags
)
563 if (t
->_ranges
== NULL
)
566 /* _bus_dmamem_alloc() does the range checks for us. */
567 return (_bus_dmamem_alloc(t
, size
, alignment
, boundary
, segs
, nsegs
,
571 /**********************************************************************
572 * Integrator DMA utility functions
573 **********************************************************************/
576 integrator_dma_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
,
577 bus_size_t size
, int flags
)
579 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
582 DEBUG(printf("Alloc bouncebuf\n"));
583 cookie
->id_bouncebuflen
= round_page(size
);
584 error
= integrator_bus_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
585 NBPG
, map
->_dm_boundary
, cookie
->id_bouncesegs
,
586 map
->_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
592 for (seg
= 0; seg
< cookie
->id_nbouncesegs
; seg
++)
593 DEBUG(printf("Seg %d @ PA 0x%08x+0x%x\n", seg
,
594 (unsigned) cookie
->id_bouncesegs
[seg
].ds_addr
,
595 (unsigned) cookie
->id_bouncesegs
[seg
].ds_len
));
597 error
= _bus_dmamem_map(t
, cookie
->id_bouncesegs
,
598 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
599 (void **)&cookie
->id_bouncebuf
, flags
);
603 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
604 cookie
->id_nbouncesegs
);
605 cookie
->id_bouncebuflen
= 0;
606 cookie
->id_nbouncesegs
= 0;
608 DEBUG(printf("Alloc bouncebuf OK\n"));
609 cookie
->id_flags
|= ID_HAS_BOUNCE
;
616 integrator_dma_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
618 struct integrator_dma_cookie
*cookie
= map
->_dm_cookie
;
620 _bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
621 cookie
->id_bouncebuflen
);
622 _bus_dmamem_free(t
, cookie
->id_bouncesegs
,
623 cookie
->id_nbouncesegs
);
624 cookie
->id_bouncebuflen
= 0;
625 cookie
->id_nbouncesegs
= 0;
626 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;
630 integrator_pci_dma_init(bus_dma_tag_t dmat
)
632 struct arm32_dma_range
*dr
= integrator_dma_ranges
;
636 for (i
= 0; i
< bootconfig
.dramblocks
; i
++)
637 if (bootconfig
.dram
[i
].flags
& BOOT_DRAM_CAN_DMA
) {
638 dr
[nranges
].dr_sysbase
= bootconfig
.dram
[i
].address
;
639 dr
[nranges
].dr_busbase
=
640 LOCAL_TO_CM_ALIAS(dr
[nranges
].dr_sysbase
);
641 dr
[nranges
].dr_len
= bootconfig
.dram
[i
].pages
* NBPG
;
646 panic ("integrator_pci_dma_init: No DMA capable memory");
649 dmat
->_nranges
= nranges
;
651 dmat
->_dmamap_create
= integrator_bus_dmamap_create
;
652 dmat
->_dmamap_destroy
= integrator_bus_dmamap_destroy
;
653 dmat
->_dmamap_load
= integrator_bus_dmamap_load
;
654 dmat
->_dmamap_load_mbuf
= integrator_bus_dmamap_load_mbuf
;
655 dmat
->_dmamap_load_uio
= integrator_bus_dmamap_load_uio
;
656 dmat
->_dmamap_load_raw
= integrator_bus_dmamap_load_raw
;
657 dmat
->_dmamap_unload
= integrator_bus_dmamap_unload
;
658 dmat
->_dmamap_sync_pre
= integrator_bus_dmamap_sync
;
659 dmat
->_dmamap_sync_post
= integrator_bus_dmamap_sync
;
661 dmat
->_dmamem_alloc
= integrator_bus_dmamem_alloc
;
662 dmat
->_dmamem_free
= _bus_dmamem_free
;
663 dmat
->_dmamem_map
= _bus_dmamem_map
;
664 dmat
->_dmamem_unmap
= _bus_dmamem_unmap
;
665 dmat
->_dmamem_mmap
= _bus_dmamem_mmap
;