1 /* $NetBSD: astro.c,v 1.7 2009/11/29 10:33:56 skrll Exp $ */
3 /* $OpenBSD: astro.c,v 1.8 2007/10/06 23:50:54 krw Exp $ */
6 * Copyright (c) 2007 Mark Kettenis
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/device.h>
24 #include <sys/extent.h>
25 #include <sys/malloc.h>
26 #include <sys/reboot.h>
29 #include <uvm/uvm_extern.h>
31 #include <machine/iomod.h>
32 #include <machine/autoconf.h>
33 #include <machine/pdc.h>
34 #include <machine/endian.h>
36 #include <hp700/dev/cpudevs.h>
37 #include <hp700/hp700/machdep.h>
44 uint8_t resv1
[0x0300 - 0x0010];
45 uint64_t lmmio_direct0_base
;
46 uint64_t lmmio_direct0_mask
;
47 uint64_t lmmio_direct0_route
;
48 uint64_t lmmio_direct1_base
;
49 uint64_t lmmio_direct1_mask
;
50 uint64_t lmmio_direct1_route
;
51 uint64_t lmmio_direct2_base
;
52 uint64_t lmmio_direct2_mask
;
53 uint64_t lmmio_direct2_route
;
54 uint64_t lmmio_direct3_base
;
55 uint64_t lmmio_direct3_mask
;
56 uint64_t lmmio_direct3_route
;
57 uint64_t lmmio_dist_base
;
58 uint64_t lmmio_dist_mask
;
59 uint64_t lmmio_dist_route
;
60 uint64_t gmmio_dist_base
;
61 uint64_t gmmio_dist_mask
;
62 uint64_t gmmio_dist_route
;
63 uint64_t ios_dist_base
;
64 uint64_t ios_dist_mask
;
65 uint64_t ios_dist_route
;
66 uint8_t resv2
[0x03c0 - 0x03a8];
67 uint64_t ios_direct_base
;
68 uint64_t ios_direct_mask
;
69 uint64_t ios_direct_route
;
70 uint8_t resv3
[0x22000 - 0x03d8];
73 uint8_t resv4
[0x22040 - 0x22010];
75 uint8_t resv5
[0x22050 - 0x22048];
77 uint8_t resv6
[0x22200 - 0x22058];
78 uint64_t rope0_control
;
79 uint64_t rope1_control
;
80 uint64_t rope2_control
;
81 uint64_t rope3_control
;
82 uint64_t rope4_control
;
83 uint64_t rope5_control
;
84 uint64_t rope6_control
;
85 uint64_t rope7_control
;
86 uint8_t resv7
[0x22300 - 0x22240];
95 uint64_t tlb_pdir_base
;
98 #define ASTRO_IOC_CTRL_TE 0x0001 /* TOC Enable */
99 #define ASTRO_IOC_CTRL_CE 0x0002 /* Coalesce Enable */
100 #define ASTRO_IOC_CTRL_DE 0x0004 /* Dillon Enable */
101 #define ASTRO_IOC_CTRL_IE 0x0008 /* IOS Enable */
102 #define ASTRO_IOC_CTRL_OS 0x0010 /* Outbound Synchronous */
103 #define ASTRO_IOC_CTRL_IS 0x0020 /* Inbound Synchronous */
104 #define ASTRO_IOC_CTRL_RC 0x0040 /* Read Current Enable */
105 #define ASTRO_IOC_CTRL_L0 0x0080 /* 0-length Read Enable */
106 #define ASTRO_IOC_CTRL_RM 0x0100 /* Real Mode */
107 #define ASTRO_IOC_CTRL_NC 0x0200 /* Non-coherent Mode */
108 #define ASTRO_IOC_CTRL_ID 0x0400 /* Interrupt Disable */
109 #define ASTRO_IOC_CTRL_D4 0x0800 /* Disable 4-byte Coalescing */
110 #define ASTRO_IOC_CTRL_CC 0x1000 /* Increase Coalescing counter value */
111 #define ASTRO_IOC_CTRL_DD 0x2000 /* Disable distr. range coalescing */
112 #define ASTRO_IOC_CTRL_DC 0x4000 /* Disable the coalescing counter */
114 #define IOTTE_V 0x8000000000000000LL /* Entry valid */
115 #define IOTTE_PAMASK 0x000000fffffff000LL
116 #define IOTTE_CI 0x00000000000000ffLL /* Coherent index */
121 bus_dma_tag_t sc_dmat
;
122 struct astro_regs
volatile *sc_regs
;
125 char sc_dvmamapname
[20];
126 struct extent
*sc_dvmamap
;
127 struct hppa_bus_dma_tag sc_dmatag
;
131 * per-map DVMA page table
133 struct iommu_page_entry
{
134 SPLAY_ENTRY(iommu_page_entry
) ipe_node
;
140 struct iommu_page_map
{
141 SPLAY_HEAD(iommu_page_tree
, iommu_page_entry
) ipm_tree
;
142 int ipm_maxpage
; /* Size of allocated page map */
143 int ipm_pagecnt
; /* Number of entries in use */
144 struct iommu_page_entry ipm_map
[1];
148 * per-map IOMMU state
150 struct iommu_map_state
{
151 struct astro_softc
*ims_sc
;
152 bus_addr_t ims_dvmastart
;
153 bus_size_t ims_dvmasize
;
154 struct iommu_page_map ims_map
; /* map must be last (array at end) */
157 int astro_match(device_t
, cfdata_t
, void *);
158 void astro_attach(device_t
, device_t
, void *);
159 static void astro_callback(device_t self
, struct confargs
*ca
);
161 CFATTACH_DECL_NEW(astro
, sizeof(struct astro_softc
),
162 astro_match
, astro_attach
, NULL
, NULL
);
164 extern struct cfdriver astro_cd
;
166 int iommu_dvmamap_create(void *, bus_size_t
, int, bus_size_t
, bus_size_t
,
167 int, bus_dmamap_t
*);
168 void iommu_dvmamap_destroy(void *, bus_dmamap_t
);
169 int iommu_dvmamap_load(void *, bus_dmamap_t
, void *, bus_size_t
,
171 int iommu_dvmamap_load_mbuf(void *, bus_dmamap_t
, struct mbuf
*, int);
172 int iommu_dvmamap_load_uio(void *, bus_dmamap_t
, struct uio
*, int);
173 int iommu_dvmamap_load_raw(void *, bus_dmamap_t
, bus_dma_segment_t
*,
174 int, bus_size_t
, int);
175 void iommu_dvmamap_unload(void *, bus_dmamap_t
);
176 void iommu_dvmamap_sync(void *, bus_dmamap_t
, bus_addr_t
, bus_size_t
, int);
177 int iommu_dvmamem_alloc(void *, bus_size_t
, bus_size_t
, bus_size_t
,
178 bus_dma_segment_t
*, int, int *, int);
179 void iommu_dvmamem_free(void *, bus_dma_segment_t
*, int);
180 int iommu_dvmamem_map(void *, bus_dma_segment_t
*, int, size_t,
182 void iommu_dvmamem_unmap(void *, void *, size_t);
183 paddr_t
iommu_dvmamem_mmap(void *, bus_dma_segment_t
*, int, off_t
, int, int);
185 void iommu_enter(struct astro_softc
*, bus_addr_t
, paddr_t
, vaddr_t
, int);
186 void iommu_remove(struct astro_softc
*, bus_addr_t
);
188 struct iommu_map_state
*iommu_iomap_create(int);
189 void iommu_iomap_destroy(struct iommu_map_state
*);
190 int iommu_iomap_insert_page(struct iommu_map_state
*, vaddr_t
, paddr_t
);
191 bus_addr_t
iommu_iomap_translate(struct iommu_map_state
*, paddr_t
);
192 void iommu_iomap_clear_pages(struct iommu_map_state
*);
194 static int iommu_iomap_load_map(struct astro_softc
*, bus_dmamap_t
, int);
196 const struct hppa_bus_dma_tag astro_dmat
= {
198 iommu_dvmamap_create
, iommu_dvmamap_destroy
,
199 iommu_dvmamap_load
, iommu_dvmamap_load_mbuf
,
200 iommu_dvmamap_load_uio
, iommu_dvmamap_load_raw
,
201 iommu_dvmamap_unload
, iommu_dvmamap_sync
,
203 iommu_dvmamem_alloc
, iommu_dvmamem_free
, iommu_dvmamem_map
,
204 iommu_dvmamem_unmap
, iommu_dvmamem_mmap
208 astro_match(device_t parent
, cfdata_t cf
, void *aux
)
210 struct confargs
*ca
= aux
;
212 /* Astro is a U-Turn variant. */
213 if (ca
->ca_type
.iodc_type
!= HPPA_TYPE_IOA
||
214 ca
->ca_type
.iodc_sv_model
!= HPPA_IOA_UTURN
)
217 if (ca
->ca_type
.iodc_model
== 0x58 &&
218 ca
->ca_type
.iodc_revision
>= 0x20)
225 astro_attach(device_t parent
, device_t self
, void *aux
)
227 struct confargs
*ca
= aux
, nca
;
228 struct astro_softc
*sc
= device_private(self
);
229 volatile struct astro_regs
*r
;
230 bus_space_handle_t ioh
;
231 uint32_t rid
, ioc_ctrl
;
241 sc
->sc_dmat
= ca
->ca_dmatag
;
242 if (bus_space_map(ca
->ca_iot
, ca
->ca_hpa
, sizeof(struct astro_regs
),
244 aprint_error(": can't map IO space\n");
247 sc
->sc_regs
= r
= (struct astro_regs
*)ca
->ca_hpa
;
249 rid
= le32toh(r
->rid
);
250 aprint_normal(": Astro rev %d.%d\n", (rid
& 7) + 1, (rid
>> 3) & 3);
252 ioc_ctrl
= le32toh(r
->ioc_ctrl
);
253 ioc_ctrl
&= ~ASTRO_IOC_CTRL_CE
;
254 ioc_ctrl
&= ~ASTRO_IOC_CTRL_RM
;
255 ioc_ctrl
&= ~ASTRO_IOC_CTRL_NC
;
256 r
->ioc_ctrl
= htole32(ioc_ctrl
);
262 /* XXX This gives us 256MB of iova space. */
265 r
->tlb_ibase
= htole32(0);
266 r
->tlb_imask
= htole32(0xffffffff << iova_bits
);
268 /* Page size is 4K. */
269 r
->tlb_tcnfg
= htole32(0);
272 r
->tlb_pcom
= htole32(31);
275 * Allocate memory for I/O pagetables. They need to be physically
279 size
= (1 << (iova_bits
- PAGE_SHIFT
)) * sizeof(uint64_t);
281 if (uvm_pglistalloc(size
, 0, -1, PAGE_SIZE
, 0, &mlist
, 1, 0) != 0)
282 panic("astrottach: no memory");
284 va
= uvm_km_alloc(kernel_map
, size
, 0, UVM_KMF_VAONLY
| UVM_KMF_NOWAIT
);
287 panic("astroattach: no memory");
288 sc
->sc_pdir
= (uint64_t *)va
;
290 m
= TAILQ_FIRST(&mlist
);
291 r
->tlb_pdir_base
= htole64(VM_PAGE_TO_PHYS(m
));
294 for (; m
!= NULL
; m
= TAILQ_NEXT(m
, pageq
.queue
)) {
295 pa
= VM_PAGE_TO_PHYS(m
);
296 pmap_enter(pmap_kernel(), va
, pa
,
297 VM_PROT_READ
|VM_PROT_WRITE
, PMAP_WIRED
);
300 pmap_update(pmap_kernel());
301 memset(sc
->sc_pdir
, 0, size
);
304 * The PDC might have set up some devices to do DMA. It will do
305 * this for the onboard USB controller if an USB keyboard is used
306 * for console input. In that case, bad things will happen if we
307 * enable iova space. So reset the PDC devices before we do that.
308 * Don't do this if we're using a serial console though, since it
309 * will stop working if we do. This is fine since the serial port
312 pagezero_cookie
= hp700_pagezero_map();
313 if (PAGE0
->mem_cons
.pz_class
!= PCL_DUPLEX
)
314 pdc_call((iodcio_t
)pdc
, 0, PDC_IO
, PDC_IO_RESET_DEVICES
);
315 hp700_pagezero_unmap(pagezero_cookie
);
317 /* Enable iova space. */
318 r
->tlb_ibase
= htole32(1);
321 * Now all the hardware's working we need to allocate a dvma map.
323 snprintf(sc
->sc_dvmamapname
, sizeof(sc
->sc_dvmamapname
),
324 "%s_dvma", device_xname(sc
->sc_dv
));
325 sc
->sc_dvmamap
= extent_create(sc
->sc_dvmamapname
, 0, (1 << iova_bits
),
326 M_DEVBUF
, 0, 0, EX_NOWAIT
);
328 sc
->sc_dmatag
= astro_dmat
;
329 sc
->sc_dmatag
._cookie
= sc
;
331 nca
= *ca
; /* clone from us */
332 // nca.ca_hpamask = HPPA_IOBEGIN;
333 nca
.ca_dmatag
= &sc
->sc_dmatag
;
334 nca
.ca_hpabase
= 0; /* HPPA_UNDEF */
335 nca
.ca_nmodules
= MAXMODBUS
;
336 pdc_scanbus(self
, &nca
, astro_callback
);
340 astro_callback(device_t self
, struct confargs
*ca
)
343 config_found_sm_loc(self
, "gedoens", NULL
, ca
, mbprint
, mbsubmatch
);
347 iommu_dvmamap_create(void *v
, bus_size_t size
, int nsegments
,
348 bus_size_t maxsegsz
, bus_size_t boundary
, int flags
, bus_dmamap_t
*dmamap
)
350 struct astro_softc
*sc
= v
;
352 struct iommu_map_state
*ims
;
355 error
= bus_dmamap_create(sc
->sc_dmat
, size
, nsegments
, maxsegsz
,
356 boundary
, flags
, &map
);
360 ims
= iommu_iomap_create(atop(round_page(size
)));
362 bus_dmamap_destroy(sc
->sc_dmat
, map
);
367 map
->_dm_cookie
= ims
;
374 iommu_dvmamap_destroy(void *v
, bus_dmamap_t map
)
376 struct astro_softc
*sc
= v
;
379 * The specification (man page) requires a loaded
380 * map to be unloaded before it is destroyed.
383 iommu_dvmamap_unload(sc
, map
);
386 iommu_iomap_destroy(map
->_dm_cookie
);
387 map
->_dm_cookie
= NULL
;
389 bus_dmamap_destroy(sc
->sc_dmat
, map
);
393 iommu_iomap_load_map(struct astro_softc
*sc
, bus_dmamap_t map
, int flags
)
395 struct iommu_map_state
*ims
= map
->_dm_cookie
;
396 struct iommu_page_map
*ipm
= &ims
->ims_map
;
397 struct iommu_page_entry
*e
;
402 bus_size_t align
, boundary
;
408 boundary
= map
->_dm_boundary
;
411 iommu_iomap_clear_pages(ims
);
413 for (seg
= 0; seg
< map
->dm_nsegs
; seg
++) {
414 struct hppa_bus_dma_segment
*ds
= &map
->dm_segs
[seg
];
416 paend
= round_page(ds
->ds_addr
+ ds
->ds_len
);
417 for (pa
= trunc_page(ds
->ds_addr
), va
= trunc_page(ds
->_ds_va
);
418 pa
< paend
; pa
+= PAGE_SIZE
, va
+= PAGE_SIZE
) {
419 err
= iommu_iomap_insert_page(ims
, va
, pa
);
421 printf("iomap insert error: %d for "
422 "va 0x%lx pa 0x%lx\n", err
, va
, pa
);
423 bus_dmamap_unload(sc
->sc_dmat
, map
);
424 iommu_iomap_clear_pages(ims
);
429 sgsize
= ims
->ims_map
.ipm_pagecnt
* PAGE_SIZE
;
432 err
= extent_alloc(sc
->sc_dvmamap
, sgsize
, align
, boundary
,
433 EX_NOWAIT
| EX_BOUNDZERO
, &dvmaddr
);
438 ims
->ims_dvmastart
= dvmaddr
;
439 ims
->ims_dvmasize
= sgsize
;
442 for (i
= 0, e
= ipm
->ipm_map
; i
< ipm
->ipm_pagecnt
; ++i
, ++e
) {
444 iommu_enter(sc
, e
->ipe_dva
, e
->ipe_pa
, e
->ipe_va
, flags
);
448 for (seg
= 0; seg
< map
->dm_nsegs
; seg
++) {
449 struct hppa_bus_dma_segment
*ds
= &map
->dm_segs
[seg
];
450 ds
->ds_addr
= iommu_iomap_translate(ims
, ds
->ds_addr
);
457 iommu_dvmamap_load(void *v
, bus_dmamap_t map
, void *addr
, bus_size_t size
,
458 struct proc
*p
, int flags
)
460 struct astro_softc
*sc
= v
;
463 err
= bus_dmamap_load(sc
->sc_dmat
, map
, addr
, size
, p
, flags
);
467 return iommu_iomap_load_map(sc
, map
, flags
);
471 iommu_dvmamap_load_mbuf(void *v
, bus_dmamap_t map
, struct mbuf
*m
, int flags
)
473 struct astro_softc
*sc
= v
;
476 err
= bus_dmamap_load_mbuf(sc
->sc_dmat
, map
, m
, flags
);
480 return iommu_iomap_load_map(sc
, map
, flags
);
484 iommu_dvmamap_load_uio(void *v
, bus_dmamap_t map
, struct uio
*uio
, int flags
)
486 struct astro_softc
*sc
= v
;
488 printf("load_uio\n");
490 return (bus_dmamap_load_uio(sc
->sc_dmat
, map
, uio
, flags
));
494 iommu_dvmamap_load_raw(void *v
, bus_dmamap_t map
, bus_dma_segment_t
*segs
,
495 int nsegs
, bus_size_t size
, int flags
)
497 struct astro_softc
*sc
= v
;
499 printf("load_raw\n");
501 return (bus_dmamap_load_raw(sc
->sc_dmat
, map
, segs
, nsegs
, size
, flags
));
505 iommu_dvmamap_unload(void *v
, bus_dmamap_t map
)
507 struct astro_softc
*sc
= v
;
508 struct iommu_map_state
*ims
= map
->_dm_cookie
;
509 struct iommu_page_map
*ipm
= &ims
->ims_map
;
510 struct iommu_page_entry
*e
;
513 /* Remove the IOMMU entries. */
514 for (i
= 0, e
= ipm
->ipm_map
; i
< ipm
->ipm_pagecnt
; ++i
, ++e
)
515 iommu_remove(sc
, e
->ipe_dva
);
517 /* Clear the iomap. */
518 iommu_iomap_clear_pages(ims
);
520 bus_dmamap_unload(sc
->sc_dmat
, map
);
523 err
= extent_free(sc
->sc_dvmamap
, ims
->ims_dvmastart
,
524 ims
->ims_dvmasize
, EX_NOWAIT
);
525 ims
->ims_dvmastart
= 0;
526 ims
->ims_dvmasize
= 0;
529 printf("warning: %ld of DVMA space lost\n", ims
->ims_dvmasize
);
533 iommu_dvmamap_sync(void *v
, bus_dmamap_t map
, bus_addr_t off
,
534 bus_size_t len
, int ops
)
536 /* Nothing to do; DMA is cache-coherent. */
540 iommu_dvmamem_alloc(void *v
, bus_size_t size
, bus_size_t alignment
,
541 bus_size_t boundary
, bus_dma_segment_t
*segs
,
542 int nsegs
, int *rsegs
, int flags
)
544 struct astro_softc
*sc
= v
;
546 return (bus_dmamem_alloc(sc
->sc_dmat
, size
, alignment
, boundary
,
547 segs
, nsegs
, rsegs
, flags
));
551 iommu_dvmamem_free(void *v
, bus_dma_segment_t
*segs
, int nsegs
)
553 struct astro_softc
*sc
= v
;
555 bus_dmamem_free(sc
->sc_dmat
, segs
, nsegs
);
559 iommu_dvmamem_map(void *v
, bus_dma_segment_t
*segs
, int nsegs
, size_t size
,
560 void **kvap
, int flags
)
562 struct astro_softc
*sc
= v
;
564 return (bus_dmamem_map(sc
->sc_dmat
, segs
, nsegs
, size
, kvap
, flags
));
568 iommu_dvmamem_unmap(void *v
, void *kva
, size_t size
)
570 struct astro_softc
*sc
= v
;
572 bus_dmamem_unmap(sc
->sc_dmat
, kva
, size
);
576 iommu_dvmamem_mmap(void *v
, bus_dma_segment_t
*segs
, int nsegs
, off_t off
,
579 struct astro_softc
*sc
= v
;
581 return (bus_dmamem_mmap(sc
->sc_dmat
, segs
, nsegs
, off
, prot
, flags
));
585 * Utility function used by splay tree to order page entries by pa.
588 iomap_compare(struct iommu_page_entry
*a
, struct iommu_page_entry
*b
)
590 return ((a
->ipe_pa
> b
->ipe_pa
) ? 1 :
591 (a
->ipe_pa
< b
->ipe_pa
) ? -1 : 0);
594 SPLAY_PROTOTYPE(iommu_page_tree
, iommu_page_entry
, ipe_node
, iomap_compare
);
596 SPLAY_GENERATE(iommu_page_tree
, iommu_page_entry
, ipe_node
, iomap_compare
);
599 * Create a new iomap.
601 struct iommu_map_state
*
602 iommu_iomap_create(int n
)
604 struct iommu_map_state
*ims
;
606 /* Safety for heavily fragmented data, such as mbufs */
611 ims
= malloc(sizeof(*ims
) + (n
- 1) * sizeof(ims
->ims_map
.ipm_map
[0]),
612 M_DEVBUF
, M_NOWAIT
| M_ZERO
);
616 /* Initialize the map. */
617 ims
->ims_map
.ipm_maxpage
= n
;
618 SPLAY_INIT(&ims
->ims_map
.ipm_tree
);
627 iommu_iomap_destroy(struct iommu_map_state
*ims
)
630 if (ims
->ims_map
.ipm_pagecnt
> 0)
631 printf("iommu_iomap_destroy: %d page entries in use\n",
632 ims
->ims_map
.ipm_pagecnt
);
639 * Insert a pa entry in the iomap.
642 iommu_iomap_insert_page(struct iommu_map_state
*ims
, vaddr_t va
, paddr_t pa
)
644 struct iommu_page_map
*ipm
= &ims
->ims_map
;
645 struct iommu_page_entry
*e
;
647 if (ipm
->ipm_pagecnt
>= ipm
->ipm_maxpage
) {
648 struct iommu_page_entry ipe
;
651 if (SPLAY_FIND(iommu_page_tree
, &ipm
->ipm_tree
, &ipe
))
657 e
= &ipm
->ipm_map
[ipm
->ipm_pagecnt
];
663 e
= SPLAY_INSERT(iommu_page_tree
, &ipm
->ipm_tree
, e
);
665 /* Duplicates are okay, but only count them once. */
675 * Translate a physical address (pa) into a DVMA address.
678 iommu_iomap_translate(struct iommu_map_state
*ims
, paddr_t pa
)
680 struct iommu_page_map
*ipm
= &ims
->ims_map
;
681 struct iommu_page_entry
*e
;
682 struct iommu_page_entry pe
;
683 paddr_t offset
= pa
& PAGE_MASK
;
685 pe
.ipe_pa
= trunc_page(pa
);
687 e
= SPLAY_FIND(iommu_page_tree
, &ipm
->ipm_tree
, &pe
);
690 panic("couldn't find pa %lx\n", pa
);
694 return (e
->ipe_dva
| offset
);
698 * Clear the iomap table and tree.
701 iommu_iomap_clear_pages(struct iommu_map_state
*ims
)
703 ims
->ims_map
.ipm_pagecnt
= 0;
704 SPLAY_INIT(&ims
->ims_map
.ipm_tree
);
708 * Add an entry to the IOMMU table.
711 iommu_enter(struct astro_softc
*sc
, bus_addr_t dva
, paddr_t pa
, vaddr_t va
,
714 volatile uint64_t *tte_ptr
= &sc
->sc_pdir
[dva
>> PAGE_SHIFT
];
719 printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva
, pa
, va
);
723 tte
= le64toh(*tte_ptr
);
726 printf("Overwriting valid tte entry (dva %lx pa %lx "
727 "&tte %p tte %llx)\n", dva
, pa
, tte_ptr
, tte
);
728 extent_print(sc
->sc_dvmamap
);
729 panic("IOMMU overwrite");
733 ci
= lci(HPPA_SID_KERNEL
, va
);
735 tte
= (pa
& IOTTE_PAMASK
) | ((ci
>> 12) & IOTTE_CI
);
738 *tte_ptr
= htole64(tte
);
739 fdcache(HPPA_SID_KERNEL
, (vaddr_t
)tte_ptr
, sizeof(*tte_ptr
));
743 * Remove an entry from the IOMMU table.
746 iommu_remove(struct astro_softc
*sc
, bus_addr_t dva
)
748 volatile struct astro_regs
*r
= sc
->sc_regs
;
749 uint64_t *tte_ptr
= &sc
->sc_pdir
[dva
>> PAGE_SHIFT
];
753 if (dva
!= trunc_page(dva
)) {
754 printf("iommu_remove: unaligned dva: %lx\n", dva
);
755 dva
= trunc_page(dva
);
759 tte
= le64toh(*tte_ptr
);
762 if ((tte
& IOTTE_V
) == 0) {
763 printf("Removing invalid tte entry (dva %lx &tte %p "
764 "tte %llx)\n", dva
, tte_ptr
, tte
);
765 extent_print(sc
->sc_dvmamap
);
766 panic("IOMMU remove overwrite");
770 *tte_ptr
= htole64(tte
& ~IOTTE_V
);
773 r
->tlb_pcom
= htole32(dva
| PAGE_SHIFT
);