1 /* $NetBSD: vme_machdep.c,v 1.61 2009/11/21 04:16:51 rmind Exp $ */
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.61 2009/11/21 04:16:51 rmind Exp $");
35 #include <sys/param.h>
36 #include <sys/extent.h>
37 #include <sys/systm.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/errno.h>
43 #include <sys/syslog.h>
45 #include <uvm/uvm_extern.h>
47 #define _SPARC_BUS_DMA_PRIVATE
48 #include <machine/bus.h>
49 #include <sparc/sparc/iommuvar.h>
50 #include <machine/autoconf.h>
51 #include <machine/oldmon.h>
52 #include <machine/cpu.h>
53 #include <machine/ctlreg.h>
55 #include <dev/vme/vmereg.h>
56 #include <dev/vme/vmevar.h>
58 #include <sparc/sparc/asm.h>
59 #include <sparc/sparc/vaddrs.h>
60 #include <sparc/sparc/cpuvar.h>
61 #include <sparc/dev/vmereg.h>
63 struct sparcvme_softc
{
64 struct device sc_dev
; /* base device */
65 bus_space_tag_t sc_bustag
;
66 bus_dma_tag_t sc_dmatag
;
67 struct vmebusreg
*sc_reg
; /* VME control registers */
68 struct vmebusvec
*sc_vec
; /* VME interrupt vector */
69 struct rom_range
*sc_range
; /* ROM range property */
71 volatile uint32_t *sc_ioctags
; /* VME IO-cache tag registers */
72 volatile uint32_t *sc_iocflush
;/* VME IO-cache flush registers */
73 int (*sc_vmeintr
)(void *);
75 struct sparcvme_softc
*sparcvme_sc
;/*XXX*/
77 /* autoconfiguration driver */
78 static int vmematch_iommu(device_t
, cfdata_t
, void *);
79 static void vmeattach_iommu(device_t
, device_t
, void *);
80 static int vmematch_mainbus(device_t
, cfdata_t
, void *);
81 static void vmeattach_mainbus(device_t
, device_t
, void *);
86 int vmeintr4m(void *);
87 static int sparc_vme_error(void);
91 static int sparc_vme_probe(void *, vme_addr_t
, vme_size_t
,
92 vme_am_t
, vme_datasize_t
,
94 bus_space_tag_t
, bus_space_handle_t
),
96 static int sparc_vme_map(void *, vme_addr_t
, vme_size_t
, vme_am_t
,
97 vme_datasize_t
, vme_swap_t
,
98 bus_space_tag_t
*, bus_space_handle_t
*,
100 static void sparc_vme_unmap(void *, vme_mapresc_t
);
101 static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t
*);
102 static const struct evcnt
*sparc_vme_intr_evcnt(void *, vme_intr_handle_t
);
103 static void * sparc_vme_intr_establish(void *, vme_intr_handle_t
, int,
104 int (*)(void *), void *);
105 static void sparc_vme_intr_disestablish(void *, void *);
107 static int vmebus_translate(struct sparcvme_softc
*, vme_am_t
,
108 vme_addr_t
, bus_addr_t
*);
111 static void sparc_vme_iommu_barrier(bus_space_tag_t
, bus_space_handle_t
,
112 bus_size_t
, bus_size_t
, int);
120 #if defined(SUN4) || defined(SUN4M)
121 static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t
);
125 static int sparc_vct4_dmamap_create(void *, vme_size_t
, vme_am_t
,
126 vme_datasize_t
, vme_swap_t
, int, vme_size_t
, vme_addr_t
,
127 int, bus_dmamap_t
*);
128 static int sparc_vme4_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
, void *,
129 bus_size_t
, struct proc
*, int);
130 static void sparc_vme4_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
131 static void sparc_vme4_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
132 bus_addr_t
, bus_size_t
, int);
136 static int sparc_vct_iommu_dmamap_create(void *, vme_size_t
, vme_am_t
,
137 vme_datasize_t
, vme_swap_t
, int, vme_size_t
, vme_addr_t
,
138 int, bus_dmamap_t
*);
139 static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t
, bus_size_t
,
140 int, bus_size_t
, bus_size_t
, int, bus_dmamap_t
*);
142 static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t
, bus_dmamap_t
,
143 void *, bus_size_t
, struct proc
*, int);
144 static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t
, bus_dmamap_t
);
145 static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t
, bus_dmamap_t
,
146 bus_addr_t
, bus_size_t
, int);
149 #if defined(SUN4) || defined(SUN4M)
150 static int sparc_vme_dmamem_map(bus_dma_tag_t
, bus_dma_segment_t
*,
151 int, size_t, void **, int);
155 static void sparc_vme_dmamap_destroy(bus_dma_tag_t
, bus_dmamap_t
);
156 static void sparc_vme_dmamem_unmap(bus_dma_tag_t
, void *, size_t);
157 static paddr_t
sparc_vme_dmamem_mmap(bus_dma_tag_t
,
158 bus_dma_segment_t
*, int, off_t
, int, int);
161 int sparc_vme_mmap_cookie(vme_addr_t
, vme_am_t
, bus_space_handle_t
*);
163 CFATTACH_DECL(vme_mainbus
, sizeof(struct sparcvme_softc
),
164 vmematch_mainbus
, vmeattach_mainbus
, NULL
, NULL
);
166 CFATTACH_DECL(vme_iommu
, sizeof(struct sparcvme_softc
),
167 vmematch_iommu
, vmeattach_iommu
, NULL
, NULL
);
169 static int vme_attached
;
171 int (*vmeerr_handler
)(void);
173 #define VMEMOD_D32 0x40 /* ??? */
175 /* If the PROM does not provide the `ranges' property, we make up our own */
176 struct rom_range vmebus_translations
[] = {
177 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA)
178 { VME_AM_A16
|_DS
, 0, PMAP_VME16
, 0xffff0000, 0 },
179 { VME_AM_A24
|_DS
, 0, PMAP_VME16
, 0xff000000, 0 },
180 { VME_AM_A32
|_DS
, 0, PMAP_VME16
, 0x00000000, 0 },
181 { VME_AM_A16
|VMEMOD_D32
|_DS
, 0, PMAP_VME32
, 0xffff0000, 0 },
182 { VME_AM_A24
|VMEMOD_D32
|_DS
, 0, PMAP_VME32
, 0xff000000, 0 },
183 { VME_AM_A32
|VMEMOD_D32
|_DS
, 0, PMAP_VME32
, 0x00000000, 0 }
188 * The VME bus logic on sun4 machines maps DMA requests in the first MB
189 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used
190 * for DVMA space allocations. The DMA addresses returned by
191 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE.
193 struct extent
*vme_dvmamap
;
196 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit
197 * VME space to the last 8MB of DVMA space and the first 1MB of
198 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space
199 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space).
200 * The following constants define subregions in the IOMMU DVMA map
201 * for VME DVMA allocations. The DMA addresses returned by
202 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE.
204 #define VME_IOMMU_DVMA_BASE 0xff800000
205 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE
206 #define VME_IOMMU_DVMA_AM24_END 0xff900000
207 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE
208 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END
210 struct vme_chipset_tag sparc_vme_chipset_tag
= {
216 sparc_vme_intr_evcnt
,
217 sparc_vme_intr_establish
,
218 sparc_vme_intr_disestablish
,
219 0, 0, 0 /* bus specific DMA stuff */
224 struct sparc_bus_dma_tag sparc_vme4_dma_tag
= {
228 sparc_vme4_dmamap_load
,
229 _bus_dmamap_load_mbuf
,
230 _bus_dmamap_load_uio
,
231 _bus_dmamap_load_raw
,
232 sparc_vme4_dmamap_unload
,
233 sparc_vme4_dmamap_sync
,
237 sparc_vme_dmamem_map
,
244 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag
= {
246 sparc_vme_iommu_dmamap_create
,
248 sparc_vme_iommu_dmamap_load
,
249 _bus_dmamap_load_mbuf
,
250 _bus_dmamap_load_uio
,
251 _bus_dmamap_load_raw
,
252 sparc_vme_iommu_dmamap_unload
,
253 sparc_vme_iommu_dmamap_sync
,
257 sparc_vme_dmamem_map
,
265 vmematch_mainbus(device_t parent
, cfdata_t cf
, void *aux
)
267 struct mainbus_attach_args
*ma
= aux
;
269 if (!CPU_ISSUN4
|| vme_attached
)
272 return (strcmp("vme", ma
->ma_name
) == 0);
276 vmematch_iommu(device_t parent
, cfdata_t cf
, void *aux
)
278 struct iommu_attach_args
*ia
= aux
;
283 return (strcmp("vme", ia
->iom_name
) == 0);
288 vmeattach_mainbus(device_t parent
, device_t self
, void *aux
)
291 struct mainbus_attach_args
*ma
= aux
;
292 struct sparcvme_softc
*sc
= device_private(self
);
293 struct vmebus_attach_args vba
;
297 sc
->sc_bustag
= ma
->ma_bustag
;
298 sc
->sc_dmatag
= ma
->ma_dmatag
;
300 /* VME interrupt entry point */
301 sc
->sc_vmeintr
= vmeintr4
;
303 /*XXX*/ sparc_vme_chipset_tag
.cookie
= sc
;
304 /*XXX*/ sparc_vme_chipset_tag
.vct_dmamap_create
= sparc_vct4_dmamap_create
;
305 /*XXX*/ sparc_vme_chipset_tag
.vct_dmamap_destroy
= sparc_vct_dmamap_destroy
;
306 /*XXX*/ sparc_vme4_dma_tag
._cookie
= sc
;
308 vba
.va_vct
= &sparc_vme_chipset_tag
;
309 vba
.va_bdt
= &sparc_vme4_dma_tag
;
310 vba
.va_slaveconfig
= 0;
312 /* Fall back to our own `range' construction */
313 sc
->sc_range
= vmebus_translations
;
315 sizeof(vmebus_translations
)/sizeof(vmebus_translations
[0]);
317 vme_dvmamap
= extent_create("vmedvma", VME4_DVMA_BASE
, VME4_DVMA_END
,
318 M_DEVBUF
, 0, 0, EX_NOWAIT
);
319 if (vme_dvmamap
== NULL
)
320 panic("vme: unable to allocate DVMA map");
323 (void)config_found(self
, &vba
, 0);
331 vmeattach_iommu(struct device
*parent
, struct device
*self
, void *aux
)
334 struct sparcvme_softc
*sc
= device_private(self
);
335 struct iommu_attach_args
*ia
= aux
;
336 struct vmebus_attach_args vba
;
337 bus_space_handle_t bh
;
341 sc
->sc_bustag
= ia
->iom_bustag
;
342 sc
->sc_dmatag
= ia
->iom_dmatag
;
344 /* VME interrupt entry point */
345 sc
->sc_vmeintr
= vmeintr4m
;
347 /*XXX*/ sparc_vme_chipset_tag
.cookie
= sc
;
348 /*XXX*/ sparc_vme_chipset_tag
.vct_dmamap_create
= sparc_vct_iommu_dmamap_create
;
349 /*XXX*/ sparc_vme_chipset_tag
.vct_dmamap_destroy
= sparc_vct_dmamap_destroy
;
350 /*XXX*/ sparc_vme_iommu_dma_tag
._cookie
= sc
;
352 vba
.va_vct
= &sparc_vme_chipset_tag
;
353 vba
.va_bdt
= &sparc_vme_iommu_dma_tag
;
354 vba
.va_slaveconfig
= 0;
359 * Map VME control space
361 if (ia
->iom_nreg
< 2) {
362 printf("%s: only %d register sets\n", device_xname(self
),
367 if (bus_space_map(ia
->iom_bustag
,
368 (bus_addr_t
) BUS_ADDR(ia
->iom_reg
[0].oa_space
,
369 ia
->iom_reg
[0].oa_base
),
370 (bus_size_t
)ia
->iom_reg
[0].oa_size
,
371 BUS_SPACE_MAP_LINEAR
,
373 panic("%s: can't map vmebusreg", device_xname(self
));
375 sc
->sc_reg
= (struct vmebusreg
*)bh
;
377 if (bus_space_map(ia
->iom_bustag
,
378 (bus_addr_t
) BUS_ADDR(ia
->iom_reg
[1].oa_space
,
379 ia
->iom_reg
[1].oa_base
),
380 (bus_size_t
)ia
->iom_reg
[1].oa_size
,
381 BUS_SPACE_MAP_LINEAR
,
383 panic("%s: can't map vmebusvec", device_xname(self
));
385 sc
->sc_vec
= (struct vmebusvec
*)bh
;
388 * Map VME IO cache tags and flush control.
390 if (bus_space_map(ia
->iom_bustag
,
391 (bus_addr_t
) BUS_ADDR(
392 ia
->iom_reg
[1].oa_space
,
393 ia
->iom_reg
[1].oa_base
+ VME_IOC_TAGOFFSET
),
395 BUS_SPACE_MAP_LINEAR
,
397 panic("%s: can't map IOC tags", device_xname(self
));
399 sc
->sc_ioctags
= (uint32_t *)bh
;
401 if (bus_space_map(ia
->iom_bustag
,
402 (bus_addr_t
) BUS_ADDR(
403 ia
->iom_reg
[1].oa_space
,
404 ia
->iom_reg
[1].oa_base
+ VME_IOC_FLUSHOFFSET
),
406 BUS_SPACE_MAP_LINEAR
,
408 panic("%s: can't map IOC flush registers", device_xname(self
));
410 sc
->sc_iocflush
= (uint32_t *)bh
;
413 * Get "range" property.
415 if (prom_getprop(node
, "ranges", sizeof(struct rom_range
),
416 &sc
->sc_nrange
, &sc
->sc_range
) != 0) {
417 panic("%s: can't get ranges property", device_xname(self
));
421 vmeerr_handler
= sparc_vme_error
;
424 * Invalidate all IO-cache entries.
426 for (cline
= VME_IOC_SIZE
/VME_IOC_LINESZ
; cline
> 0;) {
427 sc
->sc_ioctags
[--cline
] = 0;
430 /* Enable IO-cache */
431 sc
->sc_reg
->vmebus_cr
|= VMEBUS_CR_C
;
433 printf(": version 0x%x\n",
434 sc
->sc_reg
->vmebus_cr
& VMEBUS_CR_IMPL
);
436 (void)config_found(self
, &vba
, 0);
442 sparc_vme_error(void)
444 struct sparcvme_softc
*sc
= sparcvme_sc
;
448 afsr
= sc
->sc_reg
->vmebus_afsr
;
449 afpa
= sc
->sc_reg
->vmebus_afar
;
450 snprintb(bits
, sizeof(bits
), VMEBUS_AFSR_BITS
, afsr
);
451 printf("VME error:\n\tAFSR %s\n", bits
);
452 printf("\taddress: 0x%x%x\n", afsr
, afpa
);
458 vmebus_translate(struct sparcvme_softc
*sc
, vme_am_t mod
, vme_addr_t addr
,
463 for (i
= 0; i
< sc
->sc_nrange
; i
++) {
464 struct rom_range
*rp
= &sc
->sc_range
[i
];
466 if (rp
->cspace
!= mod
)
469 /* We've found the connection to the parent bus */
470 *bap
= BUS_ADDR(rp
->pspace
, rp
->poffset
+ addr
);
476 struct vmeprobe_myarg
{
477 int (*cb
)(void *, bus_space_tag_t
, bus_space_handle_t
);
480 int res
; /* backwards */
483 static int vmeprobe_mycb(void *, void *);
486 vmeprobe_mycb(void *bh
, void *arg
)
488 struct vmeprobe_myarg
*a
= arg
;
490 a
->res
= (*a
->cb
)(a
->cbarg
, a
->tag
, (bus_space_handle_t
)bh
);
495 sparc_vme_probe(void *cookie
, vme_addr_t addr
, vme_size_t len
, vme_am_t mod
,
496 vme_datasize_t datasize
,
497 int (*callback
)(void *, bus_space_tag_t
, bus_space_handle_t
),
500 struct sparcvme_softc
*sc
= cookie
;
503 struct vmeprobe_myarg myarg
;
506 if (vmebus_translate(sc
, mod
, addr
, &paddr
) != 0)
509 size
= (datasize
== VME_D8
? 1 : (datasize
== VME_D16
? 2 : 4));
514 myarg
.tag
= sc
->sc_bustag
;
516 res
= bus_space_probe(sc
->sc_bustag
, paddr
, size
, 0,
517 0, vmeprobe_mycb
, &myarg
);
518 return (res
? 0 : (myarg
.res
? myarg
.res
: EIO
));
521 for (i
= 0; i
< len
/ size
; i
++) {
523 res
= bus_space_probe(sc
->sc_bustag
, paddr
, size
, 0,
533 sparc_vme_map(void *cookie
, vme_addr_t addr
, vme_size_t size
, vme_am_t mod
,
534 vme_datasize_t datasize
, vme_swap_t swap
,
535 bus_space_tag_t
*tp
, bus_space_handle_t
*hp
, vme_mapresc_t
*rp
)
537 struct sparcvme_softc
*sc
= cookie
;
541 error
= vmebus_translate(sc
, mod
, addr
, &paddr
);
546 return (bus_space_map(sc
->sc_bustag
, paddr
, size
, 0, hp
));
550 sparc_vme_mmap_cookie(vme_addr_t addr
, vme_am_t mod
, bus_space_handle_t
*hp
)
552 struct sparcvme_softc
*sc
= sparcvme_sc
;
556 error
= vmebus_translate(sc
, mod
, addr
, &paddr
);
560 return (bus_space_mmap(sc
->sc_bustag
, paddr
, 0,
561 0/*prot is ignored*/, 0));
567 sparc_vme_iommu_barrier(bus_space_tag_t t
, bus_space_handle_t h
,
568 bus_size_t offset
, bus_size_t size
.
571 struct vmebusreg
*vbp
= t
->cookie
;
573 /* Read async fault status to flush write-buffers */
574 (*(volatile int *)&vbp
->vmebus_afsr
);
582 * VME Interrupt Priority Level to sparc Processor Interrupt Level.
584 static int vme_ipl_to_pil
[] = {
597 * All VME device interrupts go through vmeintr(). This function reads
598 * the VME vector from the bus, then dispatches the device interrupt
599 * handler. All handlers for devices that map to the same Processor
600 * Interrupt Level (according to the table above) are on a linked list
601 * of `sparc_vme_intr_handle' structures. The head of which is passed
602 * down as the argument to `vmeintr(void *arg)'.
604 struct sparc_vme_intr_handle
{
606 struct sparc_vme_intr_handle
*next
;
607 int vec
; /* VME interrupt vector */
608 int pri
; /* VME interrupt priority */
609 struct sparcvme_softc
*sc
;/*XXX*/
616 struct sparc_vme_intr_handle
*ihp
= (vme_intr_handle_t
)arg
;
620 level
= (ihp
->pri
<< 1) | 1;
622 vec
= ldcontrolb((void *)(AC_VMEINTVEC
| level
));
627 * This seems to happen only with the i82586 based
630 printf("vme: spurious interrupt at VME level %d\n", ihp
->pri
);
632 return (1); /* XXX - pretend we handled it, for now */
635 for (; ihp
; ihp
= ihp
->next
)
636 if (ihp
->vec
== vec
&& ihp
->ih
.ih_fun
) {
637 splx(ihp
->ih
.ih_classipl
);
638 rv
|= (ihp
->ih
.ih_fun
)(ihp
->ih
.ih_arg
);
649 struct sparc_vme_intr_handle
*ihp
= (vme_intr_handle_t
)arg
;
653 level
= (ihp
->pri
<< 1) | 1;
658 /* Flush VME <=> Sbus write buffers */
659 (*(volatile int *)&ihp
->sc
->sc_reg
->vmebus_afsr
);
661 pending
= *((int*)ICR_SI_PEND
);
662 if ((pending
& SINTR_VME(ihp
->pri
)) == 0) {
663 printf("vmeintr: non pending at pri %x(p 0x%x)\n",
669 /* Why gives this a bus timeout sometimes? */
670 vec
= ihp
->sc
->sc_vec
->vmebusvec
[level
];
672 /* so, arrange to catch the fault... */
674 extern int fkbyte(volatile char *, struct pcb
*);
675 volatile char *addr
= &ihp
->sc
->sc_vec
->vmebusvec
[level
];
682 xpcb
= lwp_getpcb(curlwp
);
683 saveonfault
= (u_long
)xpcb
->pcb_onfault
;
684 vec
= fkbyte(addr
, xpcb
);
685 xpcb
->pcb_onfault
= (void *)saveonfault
;
694 * This seems to happen only with the i82586 based
697 printf("vme: spurious interrupt at VME level %d\n", ihp
->pri
);
698 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n",
699 *((int*)ICR_SI_PEND
),
700 ihp
->sc
->sc_reg
->vmebus_afsr
,
701 ihp
->sc
->sc_reg
->vmebus_afar
);
703 return (1); /* XXX - pretend we handled it, for now */
706 for (; ihp
; ihp
= ihp
->next
)
707 if (ihp
->vec
== vec
&& ihp
->ih
.ih_fun
) {
708 splx(ihp
->ih
.ih_classipl
);
709 rv
|= (ihp
->ih
.ih_fun
)(ihp
->ih
.ih_arg
);
717 sparc_vme_intr_map(void *cookie
, int level
, int vec
,
718 vme_intr_handle_t
*ihp
)
720 struct sparc_vme_intr_handle
*ih
;
722 ih
= (vme_intr_handle_t
)
723 malloc(sizeof(struct sparc_vme_intr_handle
), M_DEVBUF
, M_NOWAIT
);
726 ih
->sc
= cookie
;/*XXX*/
731 static const struct evcnt
*
732 sparc_vme_intr_evcnt(void *cookie
, vme_intr_handle_t vih
)
735 /* XXX for now, no evcnt parent reported */
740 sparc_vme_intr_establish(void *cookie
, vme_intr_handle_t vih
, int level
,
741 int (*func
)(void *), void *arg
)
743 struct sparcvme_softc
*sc
= cookie
;
744 struct sparc_vme_intr_handle
*svih
=
745 (struct sparc_vme_intr_handle
*)vih
;
749 /* Translate VME priority to processor IPL */
750 pil
= vme_ipl_to_pil
[svih
->pri
];
753 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n",
756 svih
->ih
.ih_fun
= func
;
757 svih
->ih
.ih_arg
= arg
;
758 svih
->ih
.ih_classipl
= level
; /* note: used slightly differently
759 than in intr.c (no shift) */
762 /* ensure the interrupt subsystem will call us at this level */
763 for (ih
= intrhand
[pil
]; ih
!= NULL
; ih
= ih
->ih_next
)
764 if (ih
->ih_fun
== sc
->sc_vmeintr
)
768 ih
= malloc(sizeof(struct intrhand
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
771 ih
->ih_fun
= sc
->sc_vmeintr
;
773 intr_establish(pil
, 0, ih
, NULL
, false);
775 svih
->next
= (vme_intr_handle_t
)ih
->ih_arg
;
782 sparc_vme_unmap(void *cookie
, vme_mapresc_t resc
)
785 /* Not implemented */
786 panic("sparc_vme_unmap");
790 sparc_vme_intr_disestablish(void *cookie
, void *a
)
793 /* Not implemented */
794 panic("sparc_vme_intr_disestablish");
803 #if defined(SUN4) || defined(SUN4M)
805 sparc_vct_dmamap_destroy(void *cookie
, bus_dmamap_t map
)
807 struct sparcvme_softc
*sc
= cookie
;
809 bus_dmamap_destroy(sc
->sc_dmatag
, map
);
815 sparc_vct4_dmamap_create(void *cookie
, vme_size_t size
, vme_am_t am
,
816 vme_datasize_t datasize
, vme_swap_t swap
,
817 int nsegments
, vme_size_t maxsegsz
,
818 vme_addr_t boundary
, int flags
,
821 struct sparcvme_softc
*sc
= cookie
;
823 /* Allocate a base map through parent bus ops */
824 return (bus_dmamap_create(sc
->sc_dmatag
, size
, nsegments
, maxsegsz
,
825 boundary
, flags
, dmamp
));
829 sparc_vme4_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
,
830 void *buf
, bus_size_t buflen
,
831 struct proc
*p
, int flags
)
838 int pagesz
= PAGE_SIZE
;
841 cache_flush(buf
, buflen
); /* XXX - move to bus_dma_sync */
844 voff
= va
& (pagesz
- 1);
848 * Allocate an integral number of pages from DVMA space
849 * covering the passed buffer.
851 sgsize
= (buflen
+ voff
+ pagesz
- 1) & -pagesz
;
852 error
= extent_alloc(vme_dvmamap
, sgsize
, pagesz
,
854 (flags
& BUS_DMA_NOWAIT
) == 0
860 dva
= (bus_addr_t
)ldva
;
862 map
->dm_mapsize
= buflen
;
864 /* Adjust DVMA address to VME view */
865 map
->dm_segs
[0].ds_addr
= dva
+ voff
- VME4_DVMA_BASE
;
866 map
->dm_segs
[0].ds_len
= buflen
;
867 map
->dm_segs
[0]._ds_sgsize
= sgsize
;
869 pmap
= (p
== NULL
) ? pmap_kernel() : p
->p_vmspace
->vm_map
.pmap
;
871 for (; sgsize
!= 0; ) {
874 * Get the physical address for this page.
876 (void) pmap_extract(pmap
, va
, &pa
);
882 pmap_enter(pmap_kernel(), dva
,
884 VM_PROT_READ
|VM_PROT_WRITE
, PMAP_WIRED
);
890 pmap_update(pmap_kernel());
896 sparc_vme4_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
898 bus_dma_segment_t
*segs
= map
->dm_segs
;
899 int nsegs
= map
->dm_nsegs
;
904 for (i
= 0; i
< nsegs
; i
++) {
905 /* Go from VME to CPU view */
906 dva
= segs
[i
].ds_addr
+ VME4_DVMA_BASE
;
908 len
= segs
[i
]._ds_sgsize
;
910 /* Remove double-mapping in DVMA space */
911 pmap_remove(pmap_kernel(), dva
, dva
+ len
);
913 /* Release DVMA space */
915 error
= extent_free(vme_dvmamap
, dva
, len
, EX_NOWAIT
);
918 printf("warning: %ld of DVMA space lost\n", len
);
920 pmap_update(pmap_kernel());
922 /* Mark the mappings as invalid. */
928 sparc_vme4_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
,
929 bus_addr_t offset
, bus_size_t len
, int ops
)
933 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
934 * Currently the cache is flushed in bus_dma_load()...
941 sparc_vme_iommu_dmamap_create(bus_dma_tag_t t
, bus_size_t size
,
942 int nsegments
, bus_size_t maxsegsz
,
943 bus_size_t boundary
, int flags
,
947 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n");
952 sparc_vct_iommu_dmamap_create(void *cookie
, vme_size_t size
, vme_am_t am
,
953 vme_datasize_t datasize
, vme_swap_t swap
,
954 int nsegments
, vme_size_t maxsegsz
,
955 vme_addr_t boundary
, int flags
,
958 struct sparcvme_softc
*sc
= cookie
;
962 /* Allocate a base map through parent bus ops */
963 error
= bus_dmamap_create(sc
->sc_dmatag
, size
, nsegments
, maxsegsz
,
964 boundary
, flags
, &map
);
969 * Each I/O cache line maps to a 8K section of VME DVMA space, so
970 * we must ensure that DVMA alloctions are always 8K aligned.
972 map
->_dm_align
= VME_IOC_PAGESZ
;
974 /* Set map region based on Address Modifier */
975 switch ((am
& VME_AM_ADRSIZEMASK
)) {
978 /* 1 MB of DVMA space */
979 map
->_dm_ex_start
= VME_IOMMU_DVMA_AM24_BASE
;
980 map
->_dm_ex_end
= VME_IOMMU_DVMA_AM24_END
;
983 /* 8 MB of DVMA space */
984 map
->_dm_ex_start
= VME_IOMMU_DVMA_AM32_BASE
;
985 map
->_dm_ex_end
= VME_IOMMU_DVMA_AM32_END
;
994 sparc_vme_iommu_dmamap_load(bus_dma_tag_t t
, bus_dmamap_t map
,
995 void *buf
, bus_size_t buflen
,
996 struct proc
*p
, int flags
)
998 struct sparcvme_softc
*sc
= t
->_cookie
;
999 volatile uint32_t *ioctags
;
1002 /* Round request to a multiple of the I/O cache size */
1003 buflen
= (buflen
+ VME_IOC_PAGESZ
- 1) & -VME_IOC_PAGESZ
;
1004 error
= bus_dmamap_load(sc
->sc_dmatag
, map
, buf
, buflen
, p
, flags
);
1008 /* Allocate I/O cache entries for this range */
1009 ioctags
= sc
->sc_ioctags
+ VME_IOC_LINE(map
->dm_segs
[0].ds_addr
);
1010 while (buflen
> 0) {
1011 *ioctags
= VME_IOC_IC
| VME_IOC_W
;
1012 ioctags
+= VME_IOC_LINESZ
/sizeof(*ioctags
);
1013 buflen
-= VME_IOC_PAGESZ
;
1017 * Adjust DVMA address to VME view.
1018 * Note: the DVMA base address is the same for all
1019 * VME address spaces.
1021 map
->dm_segs
[0].ds_addr
-= VME_IOMMU_DVMA_BASE
;
1027 sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t
, bus_dmamap_t map
)
1029 struct sparcvme_softc
*sc
= t
->_cookie
;
1030 volatile uint32_t *flushregs
;
1033 /* Go from VME to CPU view */
1034 map
->dm_segs
[0].ds_addr
+= VME_IOMMU_DVMA_BASE
;
1036 /* Flush VME I/O cache */
1037 len
= map
->dm_segs
[0]._ds_sgsize
;
1038 flushregs
= sc
->sc_iocflush
+ VME_IOC_LINE(map
->dm_segs
[0].ds_addr
);
1041 flushregs
+= VME_IOC_LINESZ
/sizeof(*flushregs
);
1042 len
-= VME_IOC_PAGESZ
;
1046 * Start a read from `tag space' which will not complete until
1047 * all cache flushes have finished
1051 bus_dmamap_unload(sc
->sc_dmatag
, map
);
1055 sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t
, bus_dmamap_t map
,
1056 bus_addr_t offset
, bus_size_t len
, int ops
)
1060 * XXX Should perform cache flushes as necessary.
1065 #if defined(SUN4) || defined(SUN4M)
1067 sparc_vme_dmamem_map(bus_dma_tag_t t
, bus_dma_segment_t
*segs
, int nsegs
,
1068 size_t size
, void **kvap
, int flags
)
1070 struct sparcvme_softc
*sc
= t
->_cookie
;
1072 return (bus_dmamem_map(sc
->sc_dmatag
, segs
, nsegs
, size
, kvap
, flags
));
1074 #endif /* SUN4 || SUN4M */