4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright (c) 2009, Intel Corporation.
28 * All rights reserved.
33 #include <vm/hat_i86.h>
34 #include <sys/sysmacros.h>
35 #include <sys/smp_impldefs.h>
39 typedef struct intrmap_private
{
41 immu_inv_wait_t ir_inv_wait
;
43 uint32_t ir_sid_svt_sq
;
46 #define INTRMAP_PRIVATE(intrmap) ((intrmap_private_t *)intrmap)
48 /* interrupt remapping table entry */
49 typedef struct intrmap_rte
{
54 #define IRTE_HIGH(sid_svt_sq) (sid_svt_sq)
55 #define IRTE_LOW(dst, vector, dlm, tm, rh, dm, fpd, p) \
56 (((uint64_t)(dst) << 32) | \
57 ((uint64_t)(vector) << 16) | \
58 ((uint64_t)(dlm) << 5) | \
59 ((uint64_t)(tm) << 4) | \
60 ((uint64_t)(rh) << 3) | \
61 ((uint64_t)(dm) << 2) | \
62 ((uint64_t)(fpd) << 1) | \
66 SVT_NO_VERIFY
= 0, /* no verification */
67 SVT_ALL_VERIFY
, /* using sid and sq to verify */
68 SVT_BUS_VERIFY
, /* verify #startbus and #endbus */
73 SQ_VERIFY_ALL
= 0, /* verify all 16 bits */
74 SQ_VERIFY_IGR_1
, /* ignore bit 3 */
75 SQ_VERIFY_IGR_2
, /* ignore bit 2-3 */
76 SQ_VERIFY_IGR_3
/* ignore bit 1-3 */
80 * S field of the Interrupt Remapping Table Address Register
81 * the size of the interrupt remapping table is 1 << (immu_intrmap_irta_s + 1)
83 static uint_t intrmap_irta_s
= INTRMAP_MAX_IRTA_SIZE
;
86 * If true, arrange to suppress broadcast EOI by setting edge-triggered mode
87 * even for level-triggered interrupts in the interrupt-remapping engine.
88 * If false, broadcast EOI can still be suppressed if the CPU supports the
89 * APIC_SVR_SUPPRESS_BROADCAST_EOI bit. In both cases, the IOAPIC is still
90 * programmed with the correct trigger mode, and pcplusmp must send an EOI
91 * to the IOAPIC by writing to the IOAPIC's EOI register to make up for the
92 * missing broadcast EOI.
94 static int intrmap_suppress_brdcst_eoi
= 0;
97 * whether verify the source id of interrupt request
99 static int intrmap_enable_sid_verify
= 0;
101 /* fault types for DVMA remapping */
102 static char *immu_dvma_faults
[] = {
104 "The present field in root-entry is Clear",
105 "The present field in context-entry is Clear",
106 "Hardware detected invalid programming of a context-entry",
107 "The DMA request attempted to access an address beyond max support",
108 "The Write field in a page-table entry is Clear when DMA write",
109 "The Read field in a page-table entry is Clear when DMA read",
110 "Access the next level page table resulted in error",
111 "Access the root-entry table resulted in error",
112 "Access the context-entry table resulted in error",
113 "Reserved field not initialized to zero in a present root-entry",
114 "Reserved field not initialized to zero in a present context-entry",
115 "Reserved field not initialized to zero in a present page-table entry",
116 "DMA blocked due to the Translation Type field in context-entry",
117 "Incorrect fault event reason number",
119 #define DVMA_MAX_FAULTS (sizeof (immu_dvma_faults)/(sizeof (char *))) - 1
121 /* fault types for interrupt remapping */
122 static char *immu_intrmap_faults
[] = {
123 "reserved field set in IRTE",
124 "interrupt_index exceed the intr-remap table size",
125 "present field in IRTE is clear",
126 "hardware access intr-remap table address resulted in error",
127 "reserved field set in IRTE, include various conditional",
128 "hardware blocked an interrupt request in Compatibility format",
129 "remappable interrupt request blocked due to verification failure"
131 #define INTRMAP_MAX_FAULTS \
132 (sizeof (immu_intrmap_faults) / (sizeof (char *))) - 1
134 /* Function prototypes */
135 static int immu_intrmap_init(int apic_mode
);
136 static void immu_intrmap_switchon(int suppress_brdcst_eoi
);
137 static void immu_intrmap_alloc(void **intrmap_private_tbl
, dev_info_t
*dip
,
138 uint16_t type
, int count
, uchar_t ioapic_index
);
139 static void immu_intrmap_map(void *intrmap_private
, void *intrmap_data
,
140 uint16_t type
, int count
);
141 static void immu_intrmap_free(void **intrmap_privatep
);
142 static void immu_intrmap_rdt(void *intrmap_private
, ioapic_rdt_t
*irdt
);
143 static void immu_intrmap_msi(void *intrmap_private
, msi_regs_t
*mregs
);
145 static struct apic_intrmap_ops intrmap_ops
= {
147 immu_intrmap_switchon
,
155 /* apic mode, APIC/X2APIC */
156 static int intrmap_apic_mode
= LOCAL_APIC
;
163 bitset_find_free(bitset_t
*b
, uint_t post
)
166 uint_t cap
= bitset_capacity(b
);
173 for (i
= post
; i
< cap
; i
++) {
174 if (!bitset_in_set(b
, i
))
178 for (i
= 0; i
< post
; i
++) {
179 if (!bitset_in_set(b
, i
))
183 return (INTRMAP_IDX_FULL
); /* no free index */
187 * helper function to find 'count' contigous free
188 * interrupt remapping table entries
191 bitset_find_multi_free(bitset_t
*b
, uint_t post
, uint_t count
)
194 uint_t cap
= bitset_capacity(b
);
196 if (post
== INTRMAP_IDX_FULL
) {
197 return (INTRMAP_IDX_FULL
);
201 return (INTRMAP_IDX_FULL
);
205 for (i
= post
; (i
+ count
) <= cap
; i
++) {
206 for (j
= 0; j
< count
; j
++) {
207 if (bitset_in_set(b
, (i
+ j
))) {
216 for (i
= 0; (i
< post
) && ((i
+ count
) <= cap
); i
++) {
217 for (j
= 0; j
< count
; j
++) {
218 if (bitset_in_set(b
, (i
+ j
))) {
227 return (INTRMAP_IDX_FULL
); /* no free index */
230 /* alloc one interrupt remapping table entry */
232 alloc_tbl_entry(intrmap_t
*intrmap
)
237 mutex_enter(&intrmap
->intrmap_lock
);
238 idx
= intrmap
->intrmap_free
;
239 if (idx
!= INTRMAP_IDX_FULL
) {
240 bitset_add(&intrmap
->intrmap_map
, idx
);
241 intrmap
->intrmap_free
=
242 bitset_find_free(&intrmap
->intrmap_map
, idx
+ 1);
243 mutex_exit(&intrmap
->intrmap_lock
);
247 /* no free intr entry, use compatible format intr */
248 mutex_exit(&intrmap
->intrmap_lock
);
250 if (intrmap_apic_mode
!= LOCAL_X2APIC
) {
255 * x2apic mode not allowed compatible
258 delay(IMMU_ALLOC_RESOURCE_DELAY
);
264 /* alloc 'cnt' contigous interrupt remapping table entries */
266 alloc_tbl_multi_entries(intrmap_t
*intrmap
, uint_t cnt
)
271 mutex_enter(&intrmap
->intrmap_lock
);
272 pos
= intrmap
->intrmap_free
;
273 idx
= bitset_find_multi_free(&intrmap
->intrmap_map
, pos
, cnt
);
275 if (idx
!= INTRMAP_IDX_FULL
) {
276 if (idx
<= pos
&& pos
< (idx
+ cnt
)) {
277 intrmap
->intrmap_free
= bitset_find_free(
278 &intrmap
->intrmap_map
, idx
+ cnt
);
280 for (i
= 0; i
< cnt
; i
++) {
281 bitset_add(&intrmap
->intrmap_map
, idx
+ i
);
283 mutex_exit(&intrmap
->intrmap_lock
);
287 mutex_exit(&intrmap
->intrmap_lock
);
289 if (intrmap_apic_mode
!= LOCAL_X2APIC
) {
293 /* x2apic mode not allowed comapitible interrupt */
294 delay(IMMU_ALLOC_RESOURCE_DELAY
);
300 /* init interrupt remapping table */
302 init_unit(immu_t
*immu
)
307 ddi_dma_attr_t intrmap_dma_attr
= {
310 0xffffffffffffffffULL
,
312 MMU_PAGESIZE
, /* page aligned */
316 0xffffffffffffffffULL
,
322 ddi_device_acc_attr_t intrmap_acc_attr
= {
329 * Using interrupt remapping implies using the queue
330 * invalidation interface. According to Intel,
331 * hardware that supports interrupt remapping should
334 ASSERT(IMMU_ECAP_GET_QI(immu
->immu_regs_excap
));
336 if (intrmap_apic_mode
== LOCAL_X2APIC
) {
337 if (!IMMU_ECAP_GET_EIM(immu
->immu_regs_excap
)) {
338 return (DDI_FAILURE
);
342 if (intrmap_irta_s
> INTRMAP_MAX_IRTA_SIZE
) {
343 intrmap_irta_s
= INTRMAP_MAX_IRTA_SIZE
;
346 intrmap
= kmem_zalloc(sizeof (intrmap_t
), KM_SLEEP
);
348 if (ddi_dma_alloc_handle(immu
->immu_dip
,
352 &(intrmap
->intrmap_dma_hdl
)) != DDI_SUCCESS
) {
353 kmem_free(intrmap
, sizeof (intrmap_t
));
354 return (DDI_FAILURE
);
357 intrmap
->intrmap_size
= 1 << (intrmap_irta_s
+ 1);
358 size
= intrmap
->intrmap_size
* INTRMAP_RTE_SIZE
;
359 if (ddi_dma_mem_alloc(intrmap
->intrmap_dma_hdl
,
362 DDI_DMA_CONSISTENT
| IOMEM_DATA_UNCACHED
,
365 &(intrmap
->intrmap_vaddr
),
367 &(intrmap
->intrmap_acc_hdl
)) != DDI_SUCCESS
) {
368 ddi_dma_free_handle(&(intrmap
->intrmap_dma_hdl
));
369 kmem_free(intrmap
, sizeof (intrmap_t
));
370 return (DDI_FAILURE
);
373 ASSERT(!((uintptr_t)intrmap
->intrmap_vaddr
& MMU_PAGEOFFSET
));
374 bzero(intrmap
->intrmap_vaddr
, size
);
375 intrmap
->intrmap_paddr
= pfn_to_pa(
376 hat_getpfnum(kas
.a_hat
, intrmap
->intrmap_vaddr
));
378 mutex_init(&(intrmap
->intrmap_lock
), NULL
, MUTEX_DRIVER
, NULL
);
379 bitset_init(&intrmap
->intrmap_map
);
380 bitset_resize(&intrmap
->intrmap_map
, intrmap
->intrmap_size
);
381 intrmap
->intrmap_free
= 0;
383 immu
->immu_intrmap
= intrmap
;
385 return (DDI_SUCCESS
);
389 get_immu(dev_info_t
*dip
, uint16_t type
, uchar_t ioapic_index
)
393 if (!DDI_INTR_IS_MSI_OR_MSIX(type
)) {
394 immu
= immu_dmar_ioapic_immu(ioapic_index
);
397 immu
= immu_dmar_get_immu(dip
);
404 get_top_pcibridge(dev_info_t
*dip
, void *arg
)
406 dev_info_t
**topdipp
= arg
;
407 immu_devi_t
*immu_devi
;
409 mutex_enter(&(DEVI(dip
)->devi_lock
));
410 immu_devi
= DEVI(dip
)->devi_iommu
;
411 mutex_exit(&(DEVI(dip
)->devi_lock
));
413 if (immu_devi
== NULL
|| immu_devi
->imd_pcib_type
== IMMU_PCIB_BAD
||
414 immu_devi
->imd_pcib_type
== IMMU_PCIB_ENDPOINT
) {
415 return (DDI_WALK_CONTINUE
);
420 return (DDI_WALK_CONTINUE
);
424 intrmap_top_pcibridge(dev_info_t
*rdip
)
426 dev_info_t
*top_pcibridge
= NULL
;
428 if (immu_walk_ancestor(rdip
, NULL
, get_top_pcibridge
,
429 &top_pcibridge
, NULL
, 0) != DDI_SUCCESS
) {
433 return (top_pcibridge
);
436 /* function to get interrupt request source id */
438 get_sid(dev_info_t
*dip
, uint16_t type
, uchar_t ioapic_index
)
441 immu_devi_t
*immu_devi
;
445 if (!intrmap_enable_sid_verify
) {
449 if (!DDI_INTR_IS_MSI_OR_MSIX(type
)) {
450 /* for interrupt through I/O APIC */
451 sid
= immu_dmar_ioapic_sid(ioapic_index
);
452 svt
= SVT_ALL_VERIFY
;
455 /* MSI/MSI-X interrupt */
457 pdip
= intrmap_top_pcibridge(dip
);
459 immu_devi
= DEVI(pdip
)->devi_iommu
;
461 if (immu_devi
->imd_pcib_type
== IMMU_PCIB_PCIE_PCI
) {
462 /* device behind pcie to pci bridge */
463 sid
= (immu_devi
->imd_bus
<< 8) | immu_devi
->imd_sec
;
464 svt
= SVT_BUS_VERIFY
;
467 /* pcie device or device behind pci to pci bridge */
468 sid
= (immu_devi
->imd_bus
<< 8) |
469 immu_devi
->imd_devfunc
;
470 svt
= SVT_ALL_VERIFY
;
475 return (sid
| (svt
<< 18) | (sq
<< 16));
479 intrmap_enable(immu_t
*immu
)
484 intrmap
= immu
->immu_intrmap
;
486 irta_reg
= intrmap
->intrmap_paddr
| intrmap_irta_s
;
487 if (intrmap_apic_mode
== LOCAL_X2APIC
) {
488 irta_reg
|= (0x1 << 11);
491 immu_regs_intrmap_enable(immu
, irta_reg
);
494 /* ####################################################################### */
497 * immu_intr_handler()
498 * the fault event handler for a single immu unit
501 immu_intr_handler(immu_t
*immu
)
504 int index
, fault_reg_offset
;
506 boolean_t found_fault
;
509 mutex_enter(&(immu
->immu_intr_lock
));
510 mutex_enter(&(immu
->immu_regs_lock
));
512 /* read the fault status */
513 status
= immu_regs_get32(immu
, IMMU_REG_FAULT_STS
);
515 idip
= immu
->immu_dip
;
518 /* check if we have a pending fault for this immu unit */
519 if ((status
& IMMU_FAULT_STS_PPF
) == 0) {
520 mutex_exit(&(immu
->immu_regs_lock
));
521 mutex_exit(&(immu
->immu_intr_lock
));
522 return (DDI_INTR_UNCLAIMED
);
526 * handle all primary pending faults
528 index
= IMMU_FAULT_GET_INDEX(status
);
529 max_fault_index
= IMMU_CAP_GET_NFR(immu
->immu_regs_cap
) - 1;
530 fault_reg_offset
= IMMU_CAP_GET_FRO(immu
->immu_regs_cap
);
532 found_fault
= B_FALSE
;
536 uint8_t fault_reason
;
542 /* read the higher 64bits */
543 val
= immu_regs_get64(immu
, fault_reg_offset
+ index
* 16 + 8);
545 /* check if this fault register has pending fault */
546 if (!IMMU_FRR_GET_F(val
)) {
550 found_fault
= B_TRUE
;
552 /* get the fault reason, fault type and sid */
553 fault_reason
= IMMU_FRR_GET_FR(val
);
554 fault_type
= IMMU_FRR_GET_FT(val
);
555 sid
= IMMU_FRR_GET_SID(val
);
557 /* read the first 64bits */
558 val
= immu_regs_get64(immu
, fault_reg_offset
+ index
* 16);
559 pg_addr
= val
& IMMU_PAGEMASK
;
562 /* clear the fault */
563 immu_regs_put32(immu
, fault_reg_offset
+ index
* 16 + 12,
564 (((uint32_t)1) << 31));
566 /* report the fault info */
567 if (fault_reason
< 0x20) {
568 /* immu-remapping fault */
569 ddi_err(DER_WARN
, idip
,
570 "generated a fault event when translating DMA %s\n"
571 "\t on address 0x%" PRIx64
" for PCI(%d, %d, %d), "
572 "the reason is:\n\t %s",
573 fault_type
? "read" : "write", pg_addr
,
574 (sid
>> 8) & 0xff, (sid
>> 3) & 0x1f, sid
& 0x7,
575 immu_dvma_faults
[MIN(fault_reason
,
577 immu_print_fault_info(sid
, pg_addr
);
578 } else if (fault_reason
< 0x27) {
579 /* intr-remapping fault */
580 ddi_err(DER_WARN
, idip
,
581 "generated a fault event when translating "
582 "interrupt request\n"
583 "\t on index 0x%" PRIx64
" for PCI(%d, %d, %d), "
584 "the reason is:\n\t %s",
586 (sid
>> 8) & 0xff, (sid
>> 3) & 0x1f, sid
& 0x7,
587 immu_intrmap_faults
[MIN((fault_reason
- 0x20),
588 INTRMAP_MAX_FAULTS
)]);
590 ddi_err(DER_WARN
, idip
, "Unknown fault reason: 0x%x",
595 if (index
> max_fault_index
)
599 /* Clear the fault */
601 ddi_err(DER_MODE
, idip
,
602 "Fault register set but no fault present");
604 immu_regs_put32(immu
, IMMU_REG_FAULT_STS
, 1);
605 mutex_exit(&(immu
->immu_regs_lock
));
606 mutex_exit(&(immu
->immu_intr_lock
));
607 return (DDI_INTR_CLAIMED
);
609 /* ######################################################################### */
612 * Interrupt remap entry points
615 /* initialize interrupt remapping */
617 immu_intrmap_init(int apic_mode
)
620 int error
= DDI_FAILURE
;
622 if (immu_intrmap_enable
== B_FALSE
) {
623 return (DDI_SUCCESS
);
626 intrmap_apic_mode
= apic_mode
;
628 immu
= list_head(&immu_list
);
629 for (; immu
; immu
= list_next(&immu_list
, immu
)) {
630 if ((immu
->immu_intrmap_running
== B_TRUE
) &&
631 IMMU_ECAP_GET_IR(immu
->immu_regs_excap
)) {
632 if (init_unit(immu
) == DDI_SUCCESS
) {
639 * if all IOMMU units disable intr remapping,
647 /* enable interrupt remapping */
649 immu_intrmap_switchon(int suppress_brdcst_eoi
)
654 intrmap_suppress_brdcst_eoi
= suppress_brdcst_eoi
;
656 immu
= list_head(&immu_list
);
657 for (; immu
; immu
= list_next(&immu_list
, immu
)) {
658 if (immu
->immu_intrmap_setup
== B_TRUE
) {
659 intrmap_enable(immu
);
664 /* alloc remapping entry for the interrupt */
666 immu_intrmap_alloc(void **intrmap_private_tbl
, dev_info_t
*dip
,
667 uint16_t type
, int count
, uchar_t ioapic_index
)
671 immu_inv_wait_t
*iwp
;
674 intrmap_private_t
*intrmap_private
;
676 if (intrmap_private_tbl
[0] == INTRMAP_DISABLE
||
677 intrmap_private_tbl
[0] != NULL
) {
681 intrmap_private_tbl
[0] =
682 kmem_zalloc(sizeof (intrmap_private_t
), KM_SLEEP
);
683 intrmap_private
= INTRMAP_PRIVATE(intrmap_private_tbl
[0]);
685 immu
= get_immu(dip
, type
, ioapic_index
);
686 if ((immu
!= NULL
) && (immu
->immu_intrmap_running
== B_TRUE
)) {
687 intrmap_private
->ir_immu
= immu
;
689 goto intrmap_disable
;
692 intrmap
= immu
->immu_intrmap
;
695 idx
= alloc_tbl_entry(intrmap
);
697 idx
= alloc_tbl_multi_entries(intrmap
, count
);
700 if (idx
== INTRMAP_IDX_FULL
) {
701 goto intrmap_disable
;
704 intrmap_private
->ir_idx
= idx
;
706 sid_svt_sq
= intrmap_private
->ir_sid_svt_sq
=
707 get_sid(dip
, type
, ioapic_index
);
708 iwp
= &intrmap_private
->ir_inv_wait
;
709 immu_init_inv_wait(iwp
, "intrmaplocal", B_TRUE
);
712 if (IMMU_CAP_GET_CM(immu
->immu_regs_cap
)) {
713 immu_qinv_intr_one_cache(immu
, idx
, iwp
);
715 immu_regs_wbf_flush(immu
);
720 for (i
= 1; i
< count
; i
++) {
721 intrmap_private_tbl
[i
] =
722 kmem_zalloc(sizeof (intrmap_private_t
), KM_SLEEP
);
724 INTRMAP_PRIVATE(intrmap_private_tbl
[i
])->ir_immu
= immu
;
725 INTRMAP_PRIVATE(intrmap_private_tbl
[i
])->ir_sid_svt_sq
=
727 INTRMAP_PRIVATE(intrmap_private_tbl
[i
])->ir_idx
= idx
+ i
;
730 if (IMMU_CAP_GET_CM(immu
->immu_regs_cap
)) {
731 immu_qinv_intr_caches(immu
, idx
, count
, iwp
);
733 immu_regs_wbf_flush(immu
);
739 kmem_free(intrmap_private_tbl
[0], sizeof (intrmap_private_t
));
740 intrmap_private_tbl
[0] = INTRMAP_DISABLE
;
744 /* remapping the interrupt */
746 immu_intrmap_map(void *intrmap_private
, void *intrmap_data
, uint16_t type
,
750 immu_inv_wait_t
*iwp
;
752 ioapic_rdt_t
*irdt
= (ioapic_rdt_t
*)intrmap_data
;
753 msi_regs_t
*mregs
= (msi_regs_t
*)intrmap_data
;
756 uint32_t dst
, sid_svt_sq
;
757 uchar_t vector
, dlm
, tm
, rh
, dm
;
759 if (intrmap_private
== INTRMAP_DISABLE
)
762 idx
= INTRMAP_PRIVATE(intrmap_private
)->ir_idx
;
763 immu
= INTRMAP_PRIVATE(intrmap_private
)->ir_immu
;
764 iwp
= &INTRMAP_PRIVATE(intrmap_private
)->ir_inv_wait
;
765 intrmap
= immu
->immu_intrmap
;
766 sid_svt_sq
= INTRMAP_PRIVATE(intrmap_private
)->ir_sid_svt_sq
;
768 if (!DDI_INTR_IS_MSI_OR_MSIX(type
)) {
769 dm
= RDT_DM(irdt
->ir_lo
);
771 tm
= RDT_TM(irdt
->ir_lo
);
772 dlm
= RDT_DLM(irdt
->ir_lo
);
776 * Mark the IRTE's TM as Edge to suppress broadcast EOI.
778 if (intrmap_suppress_brdcst_eoi
) {
779 tm
= TRIGGER_MODE_EDGE
;
782 vector
= RDT_VECTOR(irdt
->ir_lo
);
784 dm
= MSI_ADDR_DM_PHYSICAL
;
785 rh
= MSI_ADDR_RH_FIXED
;
786 tm
= TRIGGER_MODE_EDGE
;
788 dst
= mregs
->mr_addr
;
790 vector
= mregs
->mr_data
& 0xff;
793 if (intrmap_apic_mode
== LOCAL_APIC
)
794 dst
= (dst
& 0xFF) << 8;
797 irte
.lo
= IRTE_LOW(dst
, vector
, dlm
, tm
, rh
, dm
, 0, 1);
798 irte
.hi
= IRTE_HIGH(sid_svt_sq
);
800 /* set interrupt remapping table entry */
801 bcopy(&irte
, intrmap
->intrmap_vaddr
+
802 idx
* INTRMAP_RTE_SIZE
,
805 immu_qinv_intr_one_cache(immu
, idx
, iwp
);
808 for (i
= 0; i
< count
; i
++) {
809 irte
.lo
= IRTE_LOW(dst
, vector
, dlm
, tm
, rh
, dm
, 0, 1);
810 irte
.hi
= IRTE_HIGH(sid_svt_sq
);
812 /* set interrupt remapping table entry */
813 bcopy(&irte
, intrmap
->intrmap_vaddr
+
814 idx
* INTRMAP_RTE_SIZE
,
820 immu_qinv_intr_caches(immu
, idx
, count
, iwp
);
824 /* free the remapping entry */
826 immu_intrmap_free(void **intrmap_privatep
)
829 immu_inv_wait_t
*iwp
;
833 if (*intrmap_privatep
== INTRMAP_DISABLE
|| *intrmap_privatep
== NULL
) {
834 *intrmap_privatep
= NULL
;
838 immu
= INTRMAP_PRIVATE(*intrmap_privatep
)->ir_immu
;
839 iwp
= &INTRMAP_PRIVATE(*intrmap_privatep
)->ir_inv_wait
;
840 intrmap
= immu
->immu_intrmap
;
841 idx
= INTRMAP_PRIVATE(*intrmap_privatep
)->ir_idx
;
843 bzero(intrmap
->intrmap_vaddr
+ idx
* INTRMAP_RTE_SIZE
,
846 immu_qinv_intr_one_cache(immu
, idx
, iwp
);
848 mutex_enter(&intrmap
->intrmap_lock
);
849 bitset_del(&intrmap
->intrmap_map
, idx
);
850 if (intrmap
->intrmap_free
== INTRMAP_IDX_FULL
) {
851 intrmap
->intrmap_free
= idx
;
853 mutex_exit(&intrmap
->intrmap_lock
);
855 kmem_free(*intrmap_privatep
, sizeof (intrmap_private_t
));
856 *intrmap_privatep
= NULL
;
859 /* record the ioapic rdt entry */
861 immu_intrmap_rdt(void *intrmap_private
, ioapic_rdt_t
*irdt
)
863 uint32_t rdt_entry
, tm
, pol
, idx
, vector
;
865 rdt_entry
= irdt
->ir_lo
;
867 if (intrmap_private
!= INTRMAP_DISABLE
&& intrmap_private
!= NULL
) {
868 idx
= INTRMAP_PRIVATE(intrmap_private
)->ir_idx
;
869 tm
= RDT_TM(rdt_entry
);
870 pol
= RDT_POL(rdt_entry
);
871 vector
= RDT_VECTOR(rdt_entry
);
872 irdt
->ir_lo
= (tm
<< INTRMAP_IOAPIC_TM_SHIFT
) |
873 (pol
<< INTRMAP_IOAPIC_POL_SHIFT
) |
874 ((idx
>> 15) << INTRMAP_IOAPIC_IDX15_SHIFT
) |
876 irdt
->ir_hi
= (idx
<< INTRMAP_IOAPIC_IDX_SHIFT
) |
877 (1 << INTRMAP_IOAPIC_FORMAT_SHIFT
);
879 irdt
->ir_hi
<<= APIC_ID_BIT_OFFSET
;
883 /* record the msi interrupt structure */
886 immu_intrmap_msi(void *intrmap_private
, msi_regs_t
*mregs
)
890 if (intrmap_private
!= INTRMAP_DISABLE
&& intrmap_private
!= NULL
) {
891 idx
= INTRMAP_PRIVATE(intrmap_private
)->ir_idx
;
894 mregs
->mr_addr
= MSI_ADDR_HDR
|
895 ((idx
& 0x7fff) << INTRMAP_MSI_IDX_SHIFT
) |
896 (1 << INTRMAP_MSI_FORMAT_SHIFT
) |
897 (1 << INTRMAP_MSI_SHV_SHIFT
) |
898 ((idx
>> 15) << INTRMAP_MSI_IDX15_SHIFT
);
900 mregs
->mr_addr
= MSI_ADDR_HDR
|
901 (MSI_ADDR_RH_FIXED
<< MSI_ADDR_RH_SHIFT
) |
902 (MSI_ADDR_DM_PHYSICAL
<< MSI_ADDR_DM_SHIFT
) |
903 (mregs
->mr_addr
<< MSI_ADDR_DEST_SHIFT
);
904 mregs
->mr_data
= (MSI_DATA_TM_EDGE
<< MSI_DATA_TM_SHIFT
) |
909 /* ######################################################################### */
911 * Functions exported by immu_intr.c
914 immu_intrmap_setup(list_t
*listp
)
919 * Check if ACPI DMAR tables say that
920 * interrupt remapping is supported
922 if (immu_dmar_intrmap_supported() == B_FALSE
) {
927 * Check if interrupt remapping is disabled.
929 if (immu_intrmap_enable
== B_FALSE
) {
933 psm_vt_ops
= &intrmap_ops
;
935 immu
= list_head(listp
);
936 for (; immu
; immu
= list_next(listp
, immu
)) {
937 mutex_init(&(immu
->immu_intrmap_lock
), NULL
,
938 MUTEX_DEFAULT
, NULL
);
939 mutex_enter(&(immu
->immu_intrmap_lock
));
940 immu_init_inv_wait(&immu
->immu_intrmap_inv_wait
,
941 "intrmapglobal", B_TRUE
);
942 immu
->immu_intrmap_setup
= B_TRUE
;
943 mutex_exit(&(immu
->immu_intrmap_lock
));
948 immu_intrmap_startup(immu_t
*immu
)
951 mutex_enter(&(immu
->immu_intrmap_lock
));
952 if (immu
->immu_intrmap_setup
== B_TRUE
) {
953 immu
->immu_intrmap_running
= B_TRUE
;
955 mutex_exit(&(immu
->immu_intrmap_lock
));
959 * Register a Intel IOMMU unit (i.e. DMAR unit's)
963 immu_intr_register(immu_t
*immu
)
966 char intr_handler_name
[IMMU_MAXNAMELEN
];
970 uint32_t localapic_id
= 0;
972 if (psm_get_localapicid
)
973 localapic_id
= psm_get_localapicid(0);
975 msi_addr
= (MSI_ADDR_HDR
|
976 ((localapic_id
& 0xFF) << MSI_ADDR_DEST_SHIFT
) |
977 (MSI_ADDR_RH_FIXED
<< MSI_ADDR_RH_SHIFT
) |
978 (MSI_ADDR_DM_PHYSICAL
<< MSI_ADDR_DM_SHIFT
));
980 if (intrmap_apic_mode
== LOCAL_X2APIC
) {
981 uaddr
= localapic_id
& 0xFFFFFF00;
986 /* Dont need to hold immu_intr_lock since we are in boot */
987 irq
= vect
= psm_get_ipivect(IMMU_INTR_IPL
, -1);
988 if (psm_xlate_vector_by_irq
!= NULL
)
989 vect
= psm_xlate_vector_by_irq(irq
);
991 msi_data
= ((MSI_DATA_DELIVERY_FIXED
<<
992 MSI_DATA_DELIVERY_SHIFT
) | vect
);
994 (void) snprintf(intr_handler_name
, sizeof (intr_handler_name
),
995 "%s-intr-handler", immu
->immu_name
);
997 (void) add_avintr(NULL
, IMMU_INTR_IPL
,
998 (avfunc
)(immu_intr_handler
), intr_handler_name
, irq
,
999 (caddr_t
)immu
, NULL
, NULL
, NULL
);
1001 immu_regs_intr_enable(immu
, msi_addr
, msi_data
, uaddr
);
1003 (void) immu_intr_handler(immu
);