1 /* $NetBSD: gapspci_dma.c,v 1.16 2008/06/04 12:41:41 ad Exp $ */
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Bus DMA implementation for the SEGA GAPS PCI bridge.
35 * NOTE: We only implement a small subset of what the bus_space(9)
36 * API specifies. Right now, the GAPS PCI bridge is only used for
37 * the Dreamcast Broadband Adatper, so we only provide what the
38 * pci(4) and rtk(4) drivers need.
41 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
42 __KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.16 2008/06/04 12:41:41 ad Exp $");
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
48 #include <sys/extent.h>
49 #include <sys/malloc.h>
51 #include <machine/cpu.h>
52 #include <machine/bus.h>
54 #include <dev/pci/pcivar.h>
56 #include <dreamcast/dev/g2/gapspcivar.h>
58 #include <uvm/uvm_extern.h>
60 int gaps_dmamap_create(bus_dma_tag_t
, bus_size_t
, int, bus_size_t
,
61 bus_size_t
, int, bus_dmamap_t
*);
62 void gaps_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
63 int gaps_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *, bus_size_t
,
65 int gaps_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
, struct mbuf
*, int);
66 int gaps_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
, struct uio
*, int);
67 int gaps_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
, bus_dma_segment_t
*,
68 int, bus_size_t
, int);
69 void gaps_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
70 void gaps_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
, bus_addr_t
,
73 int gaps_dmamem_alloc(bus_dma_tag_t tag
, bus_size_t size
,
74 bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
,
75 int nsegs
, int *rsegs
, int flags
);
76 void gaps_dmamem_free(bus_dma_tag_t tag
, bus_dma_segment_t
*segs
, int nsegs
);
77 int gaps_dmamem_map(bus_dma_tag_t tag
, bus_dma_segment_t
*segs
, int nsegs
,
78 size_t size
, void **kvap
, int flags
);
79 void gaps_dmamem_unmap(bus_dma_tag_t tag
, void *kva
, size_t size
);
80 paddr_t
gaps_dmamem_mmap(bus_dma_tag_t tag
, bus_dma_segment_t
*segs
, int nsegs
,
81 off_t off
, int prot
, int flags
);
84 gaps_dma_init(struct gaps_softc
*sc
)
86 bus_dma_tag_t t
= &sc
->sc_dmat
;
88 memset(t
, 0, sizeof(*t
));
91 t
->_dmamap_create
= gaps_dmamap_create
;
92 t
->_dmamap_destroy
= gaps_dmamap_destroy
;
93 t
->_dmamap_load
= gaps_dmamap_load
;
94 t
->_dmamap_load_mbuf
= gaps_dmamap_load_mbuf
;
95 t
->_dmamap_load_uio
= gaps_dmamap_load_uio
;
96 t
->_dmamap_load_raw
= gaps_dmamap_load_raw
;
97 t
->_dmamap_unload
= gaps_dmamap_unload
;
98 t
->_dmamap_sync
= gaps_dmamap_sync
;
100 t
->_dmamem_alloc
= gaps_dmamem_alloc
;
101 t
->_dmamem_free
= gaps_dmamem_free
;
102 t
->_dmamem_map
= gaps_dmamem_map
;
103 t
->_dmamem_unmap
= gaps_dmamem_unmap
;
104 t
->_dmamem_mmap
= gaps_dmamem_mmap
;
107 * The GAPS PCI bridge has 32k of DMA memory. We manage it
108 * with an extent map.
110 sc
->sc_dma_ex
= extent_create("gaps dma",
111 sc
->sc_dmabase
, sc
->sc_dmabase
+ (sc
->sc_dmasize
- 1),
112 M_DEVBUF
, NULL
, 0, EX_WAITOK
| EXF_NOCOALESCE
);
114 if (bus_space_map(sc
->sc_memt
, sc
->sc_dmabase
, sc
->sc_dmasize
,
115 0, &sc
->sc_dma_memh
) != 0)
116 panic("gaps_dma_init: can't map SRAM buffer");
120 * A GAPS DMA map -- has the standard DMA map, plus some extra
124 struct dreamcast_bus_dmamap gd_dmamap
;
129 #define GAPS_DMA_BUFTYPE_INVALID 0
130 #define GAPS_DMA_BUFTYPE_LINEAR 1
131 #define GAPS_DMA_BUFTYPE_MBUF 2
134 gaps_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
,
135 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamap
)
137 struct gaps_softc
*sc
= t
->_cookie
;
138 struct gaps_dmamap
*gmap
;
142 * Allocate an initialize the DMA map. The end of the map is
143 * a variable-sized array of segments, so we allocate enough
144 * room for them in one shot. Since the DMA map always includes
145 * one segment, and we only support one segment, this is really
148 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
149 * of ALLOCNOW notifies others that we've reserved these resources
150 * and they are not to be freed.
153 gmap
= malloc(sizeof(*gmap
), M_DMAMAP
,
154 (flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
);
158 memset(gmap
, 0, sizeof(*gmap
));
160 gmap
->gd_buftype
= GAPS_DMA_BUFTYPE_INVALID
;
162 map
= &gmap
->gd_dmamap
;
164 map
->_dm_size
= size
;
166 map
->_dm_maxmaxsegsz
= maxsegsz
;
167 map
->_dm_boundary
= boundary
;
168 map
->_dm_flags
= flags
& ~(BUS_DMA_WAITOK
|BUS_DMA_NOWAIT
);
169 map
->dm_maxsegsz
= maxsegsz
;
171 if (flags
& BUS_DMA_ALLOCNOW
) {
175 error
= extent_alloc(sc
->sc_dma_ex
, size
, 1024 /* XXX */,
177 (flags
& BUS_DMA_NOWAIT
) ? EX_NOWAIT
: EX_WAITOK
, &res
);
179 free(gmap
, M_DEVBUF
);
183 map
->dm_segs
[0].ds_addr
= res
;
184 map
->dm_segs
[0].ds_len
= size
;
186 map
->dm_mapsize
= size
;
189 map
->dm_mapsize
= 0; /* no valid mappings */
199 gaps_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
201 struct gaps_softc
*sc
= t
->_cookie
;
203 if (map
->_dm_flags
& BUS_DMA_ALLOCNOW
) {
204 (void) extent_free(sc
->sc_dma_ex
,
205 map
->dm_segs
[0].ds_addr
,
206 map
->dm_mapsize
, EX_NOWAIT
);
212 gaps_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *addr
,
213 bus_size_t size
, struct proc
*p
, int flags
)
215 struct gaps_softc
*sc
= t
->_cookie
;
216 struct gaps_dmamap
*gmap
= (void *) map
;
220 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0) {
222 * Make sure that on error condition we return
223 * "no valid mappings".
227 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
230 /* XXX Don't support DMA to process space right now. */
234 if (size
> map
->_dm_size
)
237 error
= extent_alloc(sc
->sc_dma_ex
, size
, 1024 /* XXX */,
239 (flags
& BUS_DMA_NOWAIT
) ? EX_NOWAIT
: EX_WAITOK
, &res
);
243 map
->dm_segs
[0].ds_addr
= res
;
244 map
->dm_segs
[0].ds_len
= size
;
246 gmap
->gd_origbuf
= addr
;
247 gmap
->gd_buftype
= GAPS_DMA_BUFTYPE_LINEAR
;
249 map
->dm_mapsize
= size
;
256 gaps_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
, struct mbuf
*m0
,
259 struct gaps_softc
*sc
= t
->_cookie
;
260 struct gaps_dmamap
*gmap
= (void *) map
;
264 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0) {
266 * Make sure that on error condition we return
267 * "no valid mappings".
271 KASSERT(map
->dm_maxsegsz
<= map
->_dm_maxmaxsegsz
);
275 if ((m0
->m_flags
& M_PKTHDR
) == 0)
276 panic("gaps_dmamap_load_mbuf: no packet header");
279 if (m0
->m_pkthdr
.len
> map
->_dm_size
)
282 error
= extent_alloc(sc
->sc_dma_ex
, m0
->m_pkthdr
.len
, 1024 /* XXX */,
284 (flags
& BUS_DMA_NOWAIT
) ? EX_NOWAIT
: EX_WAITOK
, &res
);
288 map
->dm_segs
[0].ds_addr
= res
;
289 map
->dm_segs
[0].ds_len
= m0
->m_pkthdr
.len
;
291 gmap
->gd_origbuf
= m0
;
292 gmap
->gd_buftype
= GAPS_DMA_BUFTYPE_MBUF
;
294 map
->dm_mapsize
= m0
->m_pkthdr
.len
;
301 gaps_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
,
305 printf("gaps_dmamap_load_uio: not implemented\n");
310 gaps_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
,
311 bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
314 printf("gaps_dmamap_load_raw: not implemented\n");
319 gaps_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
321 struct gaps_softc
*sc
= t
->_cookie
;
322 struct gaps_dmamap
*gmap
= (void *) map
;
324 if (gmap
->gd_buftype
== GAPS_DMA_BUFTYPE_INVALID
) {
325 printf("gaps_dmamap_unload: DMA map not loaded!\n");
329 if ((map
->_dm_flags
& BUS_DMA_ALLOCNOW
) == 0) {
330 (void) extent_free(sc
->sc_dma_ex
,
331 map
->dm_segs
[0].ds_addr
,
332 map
->dm_mapsize
, EX_NOWAIT
);
334 map
->dm_maxsegsz
= map
->_dm_maxmaxsegsz
;
339 gmap
->gd_buftype
= GAPS_DMA_BUFTYPE_INVALID
;
343 gaps_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
344 bus_size_t len
, int ops
)
346 struct gaps_softc
*sc
= t
->_cookie
;
347 struct gaps_dmamap
*gmap
= (void *) map
;
348 bus_addr_t dmaoff
= map
->dm_segs
[0].ds_addr
- sc
->sc_dmabase
;
351 * Mixing PRE and POST operations is not allowed.
353 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
354 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
355 panic("gaps_dmamap_sync: mix PRE and POST");
358 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
359 if (offset
>= map
->dm_mapsize
) {
360 printf("offset 0x%lx mapsize 0x%lx\n",
361 offset
, map
->dm_mapsize
);
362 panic("gaps_dmamap_sync: bad offset");
364 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
) {
365 printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
366 len
, offset
, map
->dm_mapsize
);
367 panic("gaps_dmamap_sync: bad length");
372 switch (gmap
->gd_buftype
) {
373 case GAPS_DMA_BUFTYPE_INVALID
:
374 printf("gaps_dmamap_sync: DMA map is not loaded!\n");
377 case GAPS_DMA_BUFTYPE_LINEAR
:
379 * Nothing to do for pre-read.
382 if (ops
& BUS_DMASYNC_PREWRITE
) {
384 * Copy the caller's buffer to the SRAM buffer.
386 bus_space_write_region_1(sc
->sc_memt
,
389 (uint8_t *)gmap
->gd_origbuf
+ offset
, len
);
392 if (ops
& BUS_DMASYNC_POSTREAD
) {
394 * Copy the SRAM buffer to the caller's buffer.
396 bus_space_read_region_1(sc
->sc_memt
,
399 (uint8_t *)gmap
->gd_origbuf
+ offset
, len
);
403 * Nothing to do for post-write.
407 case GAPS_DMA_BUFTYPE_MBUF
:
409 struct mbuf
*m
, *m0
= gmap
->gd_origbuf
;
410 bus_size_t minlen
, moff
;
413 * Nothing to do for pre-read.
416 if (ops
& BUS_DMASYNC_PREWRITE
) {
418 * Copy the caller's buffer into the SRAM buffer.
420 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
422 /* Find the beginning mbuf. */
423 if (moff
>= m
->m_len
) {
429 * Now at the first mbuf to sync; nail
430 * each one until we have exhausted the
433 minlen
= len
< m
->m_len
- moff
?
434 len
: m
->m_len
- moff
;
436 bus_space_write_region_1(sc
->sc_memt
,
437 sc
->sc_dma_memh
, dmaoff
+ offset
,
438 mtod(m
, uint8_t *) + moff
, minlen
);
446 if (ops
& BUS_DMASYNC_POSTREAD
) {
448 * Copy the SRAM buffer into the caller's buffer.
450 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
452 /* Find the beginning mbuf. */
453 if (moff
>= m
->m_len
) {
459 * Now at the first mbuf to sync; nail
460 * each one until we have exhausted the
463 minlen
= len
< m
->m_len
- moff
?
464 len
: m
->m_len
- moff
;
466 bus_space_read_region_1(sc
->sc_memt
,
467 sc
->sc_dma_memh
, dmaoff
+ offset
,
468 mtod(m
, uint8_t *) + moff
, minlen
);
477 * Nothing to do for post-write.
483 printf("unknown buffer type %d\n", gmap
->gd_buftype
);
484 panic("gaps_dmamap_sync");
489 gaps_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
,
490 bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
,
493 extern paddr_t avail_start
, avail_end
; /* from pmap.c */
496 paddr_t curaddr
, lastaddr
;
500 /* Always round the size. */
501 size
= round_page(size
);
504 * Allocate the pages from the VM system.
506 error
= uvm_pglistalloc(size
, avail_start
, avail_end
- PAGE_SIZE
,
507 alignment
, boundary
, &mlist
, nsegs
, (flags
& BUS_DMA_NOWAIT
) == 0);
512 * Compute the location, size, and number of segments actually
513 * returned by the VM code.
517 lastaddr
= segs
[curseg
].ds_addr
= VM_PAGE_TO_PHYS(m
);
518 segs
[curseg
].ds_len
= PAGE_SIZE
;
519 m
= TAILQ_NEXT(m
, pageq
.queue
);
521 for (; m
!= NULL
; m
= TAILQ_NEXT(m
, pageq
.queue
)) {
522 curaddr
= VM_PAGE_TO_PHYS(m
);
523 if (curaddr
== (lastaddr
+ PAGE_SIZE
))
524 segs
[curseg
].ds_len
+= PAGE_SIZE
;
527 segs
[curseg
].ds_addr
= curaddr
;
528 segs
[curseg
].ds_len
= PAGE_SIZE
;
539 gaps_dmamem_free(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
)
547 * Build a list of pages to free back to the VM system.
550 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
551 for (addr
= segs
[curseg
].ds_addr
;
552 addr
< segs
[curseg
].ds_addr
+ segs
[curseg
].ds_len
;
554 m
= PHYS_TO_VM_PAGE(addr
);
555 TAILQ_INSERT_TAIL(&mlist
, m
, pageq
.queue
);
559 uvm_pglistfree(&mlist
);
563 gaps_dmamem_map(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
,
564 size_t size
, void **kvap
, int flags
)
569 const uvm_flag_t kmflags
=
570 (flags
& BUS_DMA_NOWAIT
) != 0 ? UVM_KMF_NOWAIT
: 0;
573 * If we're only mapping 1 segment, use P2SEG, to avoid
577 *kvap
= (void *)SH3_PHYS_TO_P2SEG(segs
[0].ds_addr
);
581 size
= round_page(size
);
583 va
= uvm_km_alloc(kernel_map
, size
, 0, UVM_KMF_VAONLY
| kmflags
);
590 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
591 for (addr
= segs
[curseg
].ds_addr
;
592 addr
< segs
[curseg
].ds_addr
+ segs
[curseg
].ds_len
;
593 addr
+= PAGE_SIZE
, va
+= PAGE_SIZE
, size
-= PAGE_SIZE
) {
595 panic("gaps_dmamem_map: size botch");
596 pmap_kenter_pa(va
, addr
,
597 VM_PROT_READ
| VM_PROT_WRITE
, 0);
600 pmap_update(pmap_kernel());
606 gaps_dmamem_unmap(bus_dma_tag_t t
, void *kva
, size_t size
)
610 if ((u_long
) kva
& PAGE_MASK
)
611 panic("gaps_dmamem_unmap");
615 * Nothing to do if we mapped it with P2SEG.
617 if (kva
>= (void *)SH3_P2SEG_BASE
&&
618 kva
<= (void *)SH3_P2SEG_END
)
621 size
= round_page(size
);
622 pmap_kremove((vaddr_t
) kva
, size
);
623 pmap_update(pmap_kernel());
624 uvm_km_free(kernel_map
, (vaddr_t
) kva
, size
, UVM_KMF_VAONLY
);
628 gaps_dmamem_mmap(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
,
629 off_t off
, int prot
, int flags
)
632 /* Not implemented. */