1 /* $NetBSD: intio.c,v 1.41 2009/01/18 04:48:53 isaki Exp $ */
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
30 * NetBSD/x68k internal I/O virtual bus.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: intio.c,v 1.41 2009/01/18 04:48:53 isaki Exp $");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
41 #include <sys/extent.h>
42 #include <uvm/uvm_extern.h>
44 #include <machine/bus.h>
45 #include <machine/cpu.h>
46 #include <machine/frame.h>
48 #include <arch/x68k/dev/intiovar.h>
52 * bus_space(9) interface
54 static int intio_bus_space_map(bus_space_tag_t
, bus_addr_t
, bus_size_t
, int, bus_space_handle_t
*);
55 static void intio_bus_space_unmap(bus_space_tag_t
, bus_space_handle_t
, bus_size_t
);
56 static int intio_bus_space_subregion(bus_space_tag_t
, bus_space_handle_t
, bus_size_t
, bus_size_t
, bus_space_handle_t
*);
58 static struct x68k_bus_space intio_bus
= {
62 intio_bus_space_map
, intio_bus_space_unmap
, intio_bus_space_subregion
,
63 x68k_bus_space_alloc
, x68k_bus_space_free
,
65 x68k_bus_space_barrier
,
72 * bus_dma(9) interface
74 #define INTIO_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
75 int _intio_bus_dmamap_create(bus_dma_tag_t
, bus_size_t
, int,
76 bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
77 void _intio_bus_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
78 int _intio_bus_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
79 bus_size_t
, struct proc
*, int);
80 int _intio_bus_dmamap_load_mbuf(bus_dma_tag_t
, bus_dmamap_t
,
82 int _intio_bus_dmamap_load_uio(bus_dma_tag_t
, bus_dmamap_t
,
84 int _intio_bus_dmamap_load_raw(bus_dma_tag_t
, bus_dmamap_t
,
85 bus_dma_segment_t
*, int, bus_size_t
, int);
86 void _intio_bus_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
87 void _intio_bus_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
88 bus_addr_t
, bus_size_t
, int);
90 int _intio_bus_dmamem_alloc(bus_dma_tag_t
, bus_size_t
, bus_size_t
,
91 bus_size_t
, bus_dma_segment_t
*, int, int *, int);
93 int _intio_dma_alloc_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
,
95 void _intio_dma_free_bouncebuf(bus_dma_tag_t
, bus_dmamap_t
);
97 struct x68k_bus_dma intio_bus_dma
= {
98 INTIO_DMA_BOUNCE_THRESHOLD
,
99 _intio_bus_dmamap_create
,
100 _intio_bus_dmamap_destroy
,
101 _intio_bus_dmamap_load
,
102 _intio_bus_dmamap_load_mbuf
,
103 _intio_bus_dmamap_load_uio
,
104 _intio_bus_dmamap_load_raw
,
105 _intio_bus_dmamap_unload
,
106 _intio_bus_dmamap_sync
,
107 _intio_bus_dmamem_alloc
,
108 x68k_bus_dmamem_free
,
110 x68k_bus_dmamem_unmap
,
111 x68k_bus_dmamem_mmap
,
117 static int intio_match(device_t
, cfdata_t
, void *);
118 static void intio_attach(device_t
, device_t
, void *);
119 static int intio_search(device_t
, cfdata_t
, const int *, void *);
120 static int intio_print(void *, const char *);
121 static void intio_alloc_system_ports(struct intio_softc
*);
123 CFATTACH_DECL_NEW(intio
, sizeof(struct intio_softc
),
124 intio_match
, intio_attach
, NULL
, NULL
);
126 extern struct cfdriver intio_cd
;
128 static int intio_attached
;
130 static struct intio_interrupt_vector
{
131 intio_intr_handler_t iiv_handler
;
133 struct evcnt
*iiv_evcnt
;
134 } iiv
[256] = {{0,},};
141 intio_match(device_t parent
, cfdata_t cf
, void *aux
)
144 if (strcmp(aux
, intio_cd
.cd_name
) != 0)
153 intio_attach(device_t parent
, device_t self
, void *aux
)
155 struct intio_softc
*sc
= device_private(self
);
156 struct intio_attach_args ia
;
160 aprint_normal(" mapped at %8p\n", intiobase
);
162 sc
->sc_map
= extent_create("intiomap",
164 INTIOBASE
+ 0x400000,
165 M_DEVBUF
, NULL
, 0, EX_NOWAIT
);
166 intio_alloc_system_ports(sc
);
168 sc
->sc_bst
= &intio_bus
;
169 sc
->sc_bst
->x68k_bus_device
= self
;
170 sc
->sc_dmat
= &intio_bus_dma
;
173 memset(iiv
, 0, sizeof(struct intio_interrupt_vector
) * 256);
175 ia
.ia_bst
= sc
->sc_bst
;
176 ia
.ia_dmat
= sc
->sc_dmat
;
178 config_search_ia(intio_search
, self
, "intio", &ia
);
182 intio_search(device_t parent
, cfdata_t cf
, const int *ldesc
, void *aux
)
184 struct intio_softc
*sc
= device_private(parent
);
185 struct intio_attach_args
*ia
= aux
;
187 ia
->ia_bst
= sc
->sc_bst
;
188 ia
->ia_dmat
= sc
->sc_dmat
;
189 ia
->ia_name
= cf
->cf_name
;
190 ia
->ia_addr
= cf
->cf_addr
;
191 ia
->ia_intr
= cf
->cf_intr
;
192 ia
->ia_dma
= cf
->cf_dma
;
193 ia
->ia_dmaintr
= cf
->cf_dmaintr
;
195 if (config_match(parent
, cf
, ia
) > 0)
196 config_attach(parent
, cf
, ia
, intio_print
);
202 intio_print(void *aux
, const char *name
)
204 struct intio_attach_args
*ia
= aux
;
206 /* if (ia->ia_addr > 0) */
207 aprint_normal(" addr 0x%06x", ia
->ia_addr
);
209 aprint_normal(" intr 0x%02x", ia
->ia_intr
);
210 if (ia
->ia_dma
>= 0) {
211 aprint_normal(" using DMA ch%d", ia
->ia_dma
);
212 if (ia
->ia_dmaintr
> 0)
213 aprint_normal(" intr 0x%02x and 0x%02x",
214 ia
->ia_dmaintr
, ia
->ia_dmaintr
+1);
221 * intio memory map manager
225 intio_map_allocate_region(device_t parent
, struct intio_attach_args
*ia
,
226 enum intio_map_flag flag
)
228 struct intio_softc
*sc
= device_private(parent
);
229 struct extent
*map
= sc
->sc_map
;
232 r
= extent_alloc_region(map
, ia
->ia_addr
, ia
->ia_size
, 0);
238 if (flag
!= INTIO_MAP_ALLOCATE
)
239 extent_free(map
, ia
->ia_addr
, ia
->ia_size
, 0);
247 intio_map_free_region(device_t parent
, struct intio_attach_args
*ia
)
249 struct intio_softc
*sc
= device_private(parent
);
250 struct extent
*map
= sc
->sc_map
;
252 extent_free(map
, ia
->ia_addr
, ia
->ia_size
, 0);
261 intio_alloc_system_ports(struct intio_softc
*sc
)
263 extent_alloc_region(sc
->sc_map
, INTIO_SYSPORT
, 16, 0);
264 extent_alloc_region(sc
->sc_map
, INTIO_SICILIAN
, 0x2000, 0);
269 * intio bus space stuff.
272 intio_bus_space_map(bus_space_tag_t t
, bus_addr_t bpa
, bus_size_t size
,
273 int flags
, bus_space_handle_t
*bshp
)
276 * Intio bus is mapped permanently.
278 *bshp
= (bus_space_handle_t
)IIOV(bpa
);
281 * Some devices are mapped on odd or even addresses only.
283 if ((flags
& BUS_SPACE_MAP_SHIFTED_MASK
) == BUS_SPACE_MAP_SHIFTED_ODD
)
285 if ((flags
& BUS_SPACE_MAP_SHIFTED_MASK
) == BUS_SPACE_MAP_SHIFTED_EVEN
)
292 intio_bus_space_unmap(bus_space_tag_t t
, bus_space_handle_t bsh
,
299 intio_bus_space_subregion(bus_space_tag_t t
, bus_space_handle_t bsh
,
300 bus_size_t offset
, bus_size_t size
, bus_space_handle_t
*nbshp
)
303 *nbshp
= bsh
+ offset
;
312 intio_intr_establish(int vector
, const char *name
, intio_intr_handler_t handler
,
316 return intio_intr_establish_ext(vector
, name
, "intr", handler
, arg
);
320 intio_intr_establish_ext(int vector
, const char *name1
, const char *name2
,
321 intio_intr_handler_t handler
, void *arg
)
326 panic("Invalid interrupt vector");
327 if (iiv
[vector
].iiv_handler
)
330 evcnt
= malloc(sizeof(*evcnt
), M_DEVBUF
, M_NOWAIT
);
333 evcnt_attach_dynamic(evcnt
, EVCNT_TYPE_INTR
, NULL
, name1
, name2
);
335 iiv
[vector
].iiv_handler
= handler
;
336 iiv
[vector
].iiv_arg
= arg
;
337 iiv
[vector
].iiv_evcnt
= evcnt
;
343 intio_intr_disestablish(int vector
, void *arg
)
345 if (iiv
[vector
].iiv_handler
== 0 || iiv
[vector
].iiv_arg
!= arg
)
347 iiv
[vector
].iiv_handler
= 0;
348 iiv
[vector
].iiv_arg
= 0;
349 evcnt_detach(iiv
[vector
].iiv_evcnt
);
350 free(iiv
[vector
].iiv_evcnt
, M_DEVBUF
);
356 intio_intr(struct frame
*frame
)
358 int vector
= frame
->f_vector
/ 4;
360 if (iiv
[vector
].iiv_handler
== 0) {
361 printf("Stray interrupt: %d type %x, pc %x\n",
362 vector
, frame
->f_format
, frame
->f_pc
);
366 iiv
[vector
].iiv_evcnt
->ev_count
++;
368 return (*(iiv
[vector
].iiv_handler
))(iiv
[vector
].iiv_arg
);
372 * Intio I/O controller interrupt
374 static u_int8_t intio_ivec
= 0;
377 intio_set_ivec(int vec
)
381 if (intio_ivec
&& intio_ivec
!= (vec
& 0xfc))
382 panic("Wrong interrupt vector for Sicilian.");
385 intio_set_sicilian_ivec(vec
);
390 * intio bus DMA stuff. stolen from arch/i386/isa/isa_machdep.c
394 * Create an INTIO DMA map.
397 _intio_bus_dmamap_create(bus_dma_tag_t t
, bus_size_t size
, int nsegments
,
398 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamp
)
400 struct intio_dma_cookie
*cookie
;
402 int error
, cookieflags
;
404 extern paddr_t avail_end
;
406 /* Call common function to create the basic map. */
407 error
= x68k_bus_dmamap_create(t
, size
, nsegments
, maxsegsz
, boundary
,
413 map
->x68k_dm_cookie
= NULL
;
415 cookiesize
= sizeof(struct intio_dma_cookie
);
418 * INTIO only has 24-bits of address space. This means
419 * we can't DMA to pages over 16M. In order to DMA to
420 * arbitrary buffers, we use "bounce buffers" - pages
421 * in memory below the 16M boundary. On DMA reads,
422 * DMA happens to the bounce buffers, and is copied into
423 * the caller's buffer. On writes, data is copied into
424 * but bounce buffer, and the DMA happens from those
425 * pages. To software using the DMA mapping interface,
426 * this looks simply like a data cache.
428 * If we have more than 16M of RAM in the system, we may
429 * need bounce buffers. We check and remember that here.
431 * ...or, there is an opposite case. The most segments
432 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
433 * the caller can't handle that many segments (e.g. the
434 * DMAC), we may have to bounce it as well.
436 if (avail_end
<= t
->_bounce_thresh
)
437 /* Bouncing not necessary due to memory size. */
438 map
->x68k_dm_bounce_thresh
= 0;
440 if (map
->x68k_dm_bounce_thresh
!= 0 ||
441 ((map
->x68k_dm_size
/ PAGE_SIZE
) + 1) > map
->x68k_dm_segcnt
) {
442 cookieflags
|= ID_MIGHT_NEED_BOUNCE
;
443 cookiesize
+= (sizeof(bus_dma_segment_t
) * map
->x68k_dm_segcnt
);
447 * Allocate our cookie.
449 cookie
= malloc(cookiesize
, M_DMAMAP
,
450 ((flags
& BUS_DMA_NOWAIT
) ? M_NOWAIT
: M_WAITOK
) | M_ZERO
);
451 if (cookie
== NULL
) {
455 cookie
->id_flags
= cookieflags
;
456 map
->x68k_dm_cookie
= cookie
;
458 if (cookieflags
& ID_MIGHT_NEED_BOUNCE
) {
460 * Allocate the bounce pages now if the caller
461 * wishes us to do so.
463 if ((flags
& BUS_DMA_ALLOCNOW
) == 0)
466 error
= _intio_dma_alloc_bouncebuf(t
, map
, size
, flags
);
471 if (map
->x68k_dm_cookie
!= NULL
)
472 free(map
->x68k_dm_cookie
, M_DMAMAP
);
473 x68k_bus_dmamap_destroy(t
, map
);
479 * Destroy an INTIO DMA map.
482 _intio_bus_dmamap_destroy(bus_dma_tag_t t
, bus_dmamap_t map
)
484 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
487 * Free any bounce pages this map might hold.
489 if (cookie
->id_flags
& ID_HAS_BOUNCE
)
490 _intio_dma_free_bouncebuf(t
, map
);
492 free(cookie
, M_DMAMAP
);
493 x68k_bus_dmamap_destroy(t
, map
);
497 * Load an INTIO DMA map with a linear buffer.
500 _intio_bus_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
, void *buf
,
501 bus_size_t buflen
, struct proc
*p
, int flags
)
503 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
507 * Make sure that on error condition we return "no valid mappings."
513 * Try to load the map the normal way. If this errors out,
514 * and we can bounce, we will.
516 error
= x68k_bus_dmamap_load(t
, map
, buf
, buflen
, p
, flags
);
518 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
522 * Allocate bounce pages, if necessary.
524 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
525 error
= _intio_dma_alloc_bouncebuf(t
, map
, buflen
, flags
);
531 * Cache a pointer to the caller's buffer and load the DMA map
532 * with the bounce buffer.
534 cookie
->id_origbuf
= buf
;
535 cookie
->id_origbuflen
= buflen
;
536 cookie
->id_buftype
= ID_BUFTYPE_LINEAR
;
537 error
= x68k_bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
, buflen
,
541 * Free the bounce pages, unless our resources
542 * are reserved for our exclusive use.
544 if ((map
->x68k_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
545 _intio_dma_free_bouncebuf(t
, map
);
549 /* ...so _intio_bus_dmamap_sync() knows we're bouncing */
550 cookie
->id_flags
|= ID_IS_BOUNCING
;
555 * Like _intio_bus_dmamap_load(), but for mbufs.
558 _intio_bus_dmamap_load_mbuf(bus_dma_tag_t t
, bus_dmamap_t map
, struct mbuf
*m0
,
561 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
565 * Make sure on error condition we return "no valid mappings."
571 if ((m0
->m_flags
& M_PKTHDR
) == 0)
572 panic("_intio_bus_dmamap_load_mbuf: no packet header");
575 if (m0
->m_pkthdr
.len
> map
->x68k_dm_size
)
579 * Try to load the map the normal way. If this errors out,
580 * and we can bounce, we will.
582 error
= x68k_bus_dmamap_load_mbuf(t
, map
, m0
, flags
);
584 (error
!= 0 && (cookie
->id_flags
& ID_MIGHT_NEED_BOUNCE
) == 0))
588 * Allocate bounce pages, if necessary.
590 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) == 0) {
591 error
= _intio_dma_alloc_bouncebuf(t
, map
, m0
->m_pkthdr
.len
,
598 * Cache a pointer to the caller's buffer and load the DMA map
599 * with the bounce buffer.
601 cookie
->id_origbuf
= m0
;
602 cookie
->id_origbuflen
= m0
->m_pkthdr
.len
; /* not really used */
603 cookie
->id_buftype
= ID_BUFTYPE_MBUF
;
604 error
= x68k_bus_dmamap_load(t
, map
, cookie
->id_bouncebuf
,
605 m0
->m_pkthdr
.len
, NULL
, flags
);
608 * Free the bounce pages, unless our resources
609 * are reserved for our exclusive use.
611 if ((map
->x68k_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
612 _intio_dma_free_bouncebuf(t
, map
);
616 /* ...so _intio_bus_dmamap_sync() knows we're bouncing */
617 cookie
->id_flags
|= ID_IS_BOUNCING
;
622 * Like _intio_bus_dmamap_load(), but for uios.
625 _intio_bus_dmamap_load_uio(bus_dma_tag_t t
, bus_dmamap_t map
, struct uio
*uio
,
628 panic("_intio_bus_dmamap_load_uio: not implemented");
632 * Like _intio_bus_dmamap_load(), but for raw memory allocated with
633 * bus_dmamem_alloc().
636 _intio_bus_dmamap_load_raw(bus_dma_tag_t t
, bus_dmamap_t map
,
637 bus_dma_segment_t
*segs
, int nsegs
, bus_size_t size
, int flags
)
640 panic("_intio_bus_dmamap_load_raw: not implemented");
644 * Unload an INTIO DMA map.
647 _intio_bus_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
649 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
652 * If we have bounce pages, free them, unless they're
653 * reserved for our exclusive use.
655 if ((cookie
->id_flags
& ID_HAS_BOUNCE
) &&
656 (map
->x68k_dm_flags
& BUS_DMA_ALLOCNOW
) == 0)
657 _intio_dma_free_bouncebuf(t
, map
);
659 cookie
->id_flags
&= ~ID_IS_BOUNCING
;
660 cookie
->id_buftype
= ID_BUFTYPE_INVALID
;
663 * Do the generic bits of the unload.
665 x68k_bus_dmamap_unload(t
, map
);
669 * Synchronize an INTIO DMA map.
672 _intio_bus_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
, bus_addr_t offset
,
673 bus_size_t len
, int ops
)
675 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
678 * Mixing PRE and POST operations is not allowed.
680 if ((ops
& (BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
)) != 0 &&
681 (ops
& (BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
)) != 0)
682 panic("_intio_bus_dmamap_sync: mix PRE and POST");
685 if ((ops
& (BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_POSTREAD
)) != 0) {
686 if (offset
>= map
->dm_mapsize
)
687 panic("_intio_bus_dmamap_sync: bad offset");
688 if (len
== 0 || (offset
+ len
) > map
->dm_mapsize
)
689 panic("_intio_bus_dmamap_sync: bad length");
694 * If we're not bouncing, just return; nothing to do.
696 if ((cookie
->id_flags
& ID_IS_BOUNCING
) == 0)
699 switch (cookie
->id_buftype
) {
700 case ID_BUFTYPE_LINEAR
:
702 * Nothing to do for pre-read.
705 if (ops
& BUS_DMASYNC_PREWRITE
) {
707 * Copy the caller's buffer to the bounce buffer.
709 memcpy((char *)cookie
->id_bouncebuf
+ offset
,
710 (char *)cookie
->id_origbuf
+ offset
, len
);
713 if (ops
& BUS_DMASYNC_POSTREAD
) {
715 * Copy the bounce buffer to the caller's buffer.
717 memcpy((char *)cookie
->id_origbuf
+ offset
,
718 (char *)cookie
->id_bouncebuf
+ offset
, len
);
722 * Nothing to do for post-write.
726 case ID_BUFTYPE_MBUF
:
728 struct mbuf
*m
, *m0
= cookie
->id_origbuf
;
729 bus_size_t minlen
, moff
;
732 * Nothing to do for pre-read.
735 if (ops
& BUS_DMASYNC_PREWRITE
) {
737 * Copy the caller's buffer to the bounce buffer.
739 m_copydata(m0
, offset
, len
,
740 (char *)cookie
->id_bouncebuf
+ offset
);
743 if (ops
& BUS_DMASYNC_POSTREAD
) {
745 * Copy the bounce buffer to the caller's buffer.
747 for (moff
= offset
, m
= m0
; m
!= NULL
&& len
!= 0;
749 /* Find the beginning mbuf. */
750 if (moff
>= m
->m_len
) {
756 * Now at the first mbuf to sync; nail
757 * each one until we have exhausted the
760 minlen
= len
< m
->m_len
- moff
?
761 len
: m
->m_len
- moff
;
763 memcpy(mtod(m
, char *) + moff
,
764 (char *)cookie
->id_bouncebuf
+ offset
,
774 * Nothing to do for post-write.
780 panic("_intio_bus_dmamap_sync: ID_BUFTYPE_UIO");
784 panic("_intio_bus_dmamap_sync: ID_BUFTYPE_RAW");
787 case ID_BUFTYPE_INVALID
:
788 panic("_intio_bus_dmamap_sync: ID_BUFTYPE_INVALID");
792 printf("unknown buffer type %d\n", cookie
->id_buftype
);
793 panic("_intio_bus_dmamap_sync");
798 * Allocate memory safe for INTIO DMA.
801 _intio_bus_dmamem_alloc(bus_dma_tag_t t
, bus_size_t size
, bus_size_t alignment
,
802 bus_size_t boundary
, bus_dma_segment_t
*segs
, int nsegs
, int *rsegs
,
806 extern paddr_t avail_end
;
808 if (avail_end
> INTIO_DMA_BOUNCE_THRESHOLD
)
809 high
= trunc_page(INTIO_DMA_BOUNCE_THRESHOLD
);
811 high
= trunc_page(avail_end
);
813 return (x68k_bus_dmamem_alloc_range(t
, size
, alignment
, boundary
,
814 segs
, nsegs
, rsegs
, flags
, 0, high
));
817 /**********************************************************************
818 * INTIO DMA utility functions
819 **********************************************************************/
822 _intio_dma_alloc_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
, bus_size_t size
,
825 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
828 cookie
->id_bouncebuflen
= round_page(size
);
829 error
= _intio_bus_dmamem_alloc(t
, cookie
->id_bouncebuflen
,
830 PAGE_SIZE
, map
->x68k_dm_boundary
, cookie
->id_bouncesegs
,
831 map
->x68k_dm_segcnt
, &cookie
->id_nbouncesegs
, flags
);
834 error
= x68k_bus_dmamem_map(t
, cookie
->id_bouncesegs
,
835 cookie
->id_nbouncesegs
, cookie
->id_bouncebuflen
,
836 (void **)&cookie
->id_bouncebuf
, flags
);
840 x68k_bus_dmamem_free(t
, cookie
->id_bouncesegs
,
841 cookie
->id_nbouncesegs
);
842 cookie
->id_bouncebuflen
= 0;
843 cookie
->id_nbouncesegs
= 0;
845 cookie
->id_flags
|= ID_HAS_BOUNCE
;
852 _intio_dma_free_bouncebuf(bus_dma_tag_t t
, bus_dmamap_t map
)
854 struct intio_dma_cookie
*cookie
= map
->x68k_dm_cookie
;
856 x68k_bus_dmamem_unmap(t
, cookie
->id_bouncebuf
,
857 cookie
->id_bouncebuflen
);
858 x68k_bus_dmamem_free(t
, cookie
->id_bouncesegs
,
859 cookie
->id_nbouncesegs
);
860 cookie
->id_bouncebuflen
= 0;
861 cookie
->id_nbouncesegs
= 0;
862 cookie
->id_flags
&= ~ID_HAS_BOUNCE
;