Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / arch / vax / uba / uba_dma.c
blob41a87a5fa5cc47a5f063a653c00fea81452fea08
1 /* $NetBSD: uba_dma.c,v 1.9 2008/03/11 05:34:02 matt Exp $ */
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: uba_dma.c,v 1.9 2008/03/11 05:34:02 matt Exp $");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/malloc.h>
41 #include <uvm/uvm_extern.h>
43 #define _VAX_BUS_DMA_PRIVATE
44 #include <machine/bus.h>
45 #include <machine/cpu.h>
46 #include <machine/sgmap.h>
48 #include <dev/qbus/ubavar.h>
50 #include <arch/vax/uba/uba_common.h>
52 int uba_bus_dmamap_create_sgmap(bus_dma_tag_t, bus_size_t, int,
53 bus_size_t, bus_size_t, int, bus_dmamap_t *);
55 void uba_bus_dmamap_destroy_sgmap(bus_dma_tag_t, bus_dmamap_t);
57 int uba_bus_dmamap_load_sgmap(bus_dma_tag_t, bus_dmamap_t, void *,
58 bus_size_t, struct proc *, int);
60 int uba_bus_dmamap_load_mbuf_sgmap(bus_dma_tag_t, bus_dmamap_t,
61 struct mbuf *, int);
63 int uba_bus_dmamap_load_uio_sgmap(bus_dma_tag_t, bus_dmamap_t,
64 struct uio *, int);
66 int uba_bus_dmamap_load_raw_sgmap(bus_dma_tag_t, bus_dmamap_t,
67 bus_dma_segment_t *, int, bus_size_t, int);
69 void uba_bus_dmamap_unload_sgmap(bus_dma_tag_t, bus_dmamap_t);
71 void uba_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
72 bus_size_t, int);
74 void
75 uba_dma_init(struct uba_vsoftc *sc)
77 bus_dma_tag_t t;
78 struct pte *pte;
81 * Initialize the DMA tag used for sgmap-mapped DMA.
83 t = &sc->uv_dmat;
84 t->_cookie = sc;
85 t->_wbase = 0;
86 t->_wsize = sc->uv_size;
87 t->_boundary = 0;
88 t->_sgmap = &sc->uv_sgmap;
89 t->_dmamap_create = uba_bus_dmamap_create_sgmap;
90 t->_dmamap_destroy = uba_bus_dmamap_destroy_sgmap;
91 t->_dmamap_load = uba_bus_dmamap_load_sgmap;
92 t->_dmamap_load_mbuf = uba_bus_dmamap_load_mbuf_sgmap;
93 t->_dmamap_load_uio = uba_bus_dmamap_load_uio_sgmap;
94 t->_dmamap_load_raw = uba_bus_dmamap_load_raw_sgmap;
95 t->_dmamap_unload = uba_bus_dmamap_unload_sgmap;
96 t->_dmamap_sync = uba_bus_dmamap_sync;
98 t->_dmamem_alloc = _bus_dmamem_alloc;
99 t->_dmamem_free = _bus_dmamem_free;
100 t->_dmamem_map = _bus_dmamem_map;
101 t->_dmamem_unmap = _bus_dmamem_unmap;
102 t->_dmamem_mmap = _bus_dmamem_mmap;
105 * Map in Unibus map registers, if not mapped in already.
107 if (sc->uv_uba) {
108 pte = sc->uv_uba->uba_map;
109 } else {
110 pte = (struct pte *)vax_map_physmem(sc->uv_addr,
111 vax_btoc(vax_btoc(sc->uv_size) * sizeof(struct pte)));
112 if (pte == 0)
113 panic("uba_dma_init");
116 * Initialize the SGMAP.
118 vax_sgmap_init(t, &sc->uv_sgmap, "uba_sgmap", 0, sc->uv_size, pte, 0);
123 * Create a UBA SGMAP-mapped DMA map.
126 uba_bus_dmamap_create_sgmap(bus_dma_tag_t t, bus_size_t size, int nsegments,
127 bus_size_t maxsegsz, bus_size_t boundary, int flags,
128 bus_dmamap_t *dmamp)
130 bus_dmamap_t map;
131 int error;
133 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
134 boundary, flags, dmamp);
135 if (error)
136 return (error);
138 map = *dmamp;
140 if (flags & BUS_DMA_ALLOCNOW) {
141 error = vax_sgmap_alloc(map, vax_round_page(size),
142 t->_sgmap, flags);
143 if (error)
144 uba_bus_dmamap_destroy_sgmap(t, map);
147 return (error);
151 * Destroy a UBA SGMAP-mapped DMA map.
153 void
154 uba_bus_dmamap_destroy_sgmap(bus_dma_tag_t t, bus_dmamap_t map)
157 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
158 vax_sgmap_free(map, t->_sgmap);
160 _bus_dmamap_destroy(t, map);
164 * Load a UBA SGMAP-mapped DMA map with a linear buffer.
167 uba_bus_dmamap_load_sgmap(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
168 bus_size_t buflen, struct proc *p, int flags)
170 int error;
172 error = vax_sgmap_load(t, map, buf, buflen, p, flags, t->_sgmap);
174 * XXX - Set up BDPs.
177 return (error);
181 * Load a UBA SGMAP-mapped DMA map with an mbuf chain.
184 uba_bus_dmamap_load_mbuf_sgmap( bus_dma_tag_t t, bus_dmamap_t map,
185 struct mbuf *m, int flags)
187 int error;
189 error = vax_sgmap_load_mbuf(t, map, m, flags, t->_sgmap);
191 return (error);
195 * Load a UBA SGMAP-mapped DMA map with a uio.
198 uba_bus_dmamap_load_uio_sgmap(bus_dma_tag_t t, bus_dmamap_t map,
199 struct uio *uio, int flags)
201 int error;
203 error = vax_sgmap_load_uio(t, map, uio, flags, t->_sgmap);
205 return (error);
209 * Load a UBA SGMAP-mapped DMA map with raw memory.
212 uba_bus_dmamap_load_raw_sgmap(bus_dma_tag_t t, bus_dmamap_t map,
213 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
215 int error;
217 error = vax_sgmap_load_raw(t, map, segs, nsegs, size, flags,
218 t->_sgmap);
220 return (error);
224 * Unload a UBA DMA map.
226 void
227 uba_bus_dmamap_unload_sgmap(bus_dma_tag_t t, bus_dmamap_t map)
230 * Invalidate any SGMAP page table entries used by this
231 * mapping.
233 vax_sgmap_unload(t, map, t->_sgmap);
236 * Do the generic bits of the unload.
238 _bus_dmamap_unload(t, map);
242 * Sync the bus map. This is only needed if BDP's are used.
244 void
245 uba_bus_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset,
246 bus_size_t len, int ops)
248 /* Only BDP handling, but not yet. */