4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
27 * immu_regs.c - File that operates on a IMMU unit's regsiters
29 #include <sys/dditypes.h>
31 #include <sys/archsystm.h>
32 #include <sys/x86_archext.h>
34 #include <sys/sysmacros.h>
38 #define get_reg32(immu, offset) ddi_get32((immu)->immu_regs_handle, \
39 (uint32_t *)(immu->immu_regs_addr + (offset)))
40 #define get_reg64(immu, offset) ddi_get64((immu)->immu_regs_handle, \
41 (uint64_t *)(immu->immu_regs_addr + (offset)))
42 #define put_reg32(immu, offset, val) ddi_put32\
43 ((immu)->immu_regs_handle, \
44 (uint32_t *)(immu->immu_regs_addr + (offset)), val)
45 #define put_reg64(immu, offset, val) ddi_put64\
46 ((immu)->immu_regs_handle, \
47 (uint64_t *)(immu->immu_regs_addr + (offset)), val)
49 static void immu_regs_inv_wait(immu_inv_wait_t
*iwp
);
51 struct immu_flushops immu_regs_flushops
= {
52 immu_regs_context_fsi
,
53 immu_regs_context_dsi
,
54 immu_regs_context_gbl
,
62 * wait max 60s for the hardware completion
64 #define IMMU_MAX_WAIT_TIME 60000000
65 #define wait_completion(immu, offset, getf, completion, status) \
67 clock_t stick = ddi_get_lbolt(); \
71 status = getf(immu, offset); \
72 ntick = ddi_get_lbolt(); \
76 if (ntick - stick >= drv_usectohz(IMMU_MAX_WAIT_TIME)) { \
77 ddi_err(DER_PANIC, NULL, \
78 "immu wait completion time out"); \
86 static ddi_device_acc_attr_t immu_regs_attr
= {
94 * flush the iotlb cache
97 iotlb_flush(immu_t
*immu
, uint_t domain_id
,
98 uint64_t addr
, uint_t am
, uint_t hint
, immu_iotlb_inv_t type
)
100 uint64_t command
= 0, iva
= 0;
101 uint_t iva_offset
, iotlb_offset
;
104 /* no lock needed since cap and excap fields are RDONLY */
105 iva_offset
= IMMU_ECAP_GET_IRO(immu
->immu_regs_excap
);
106 iotlb_offset
= iva_offset
+ 8;
109 * prepare drain read/write command
111 if (IMMU_CAP_GET_DWD(immu
->immu_regs_cap
)) {
112 command
|= TLB_INV_DRAIN_WRITE
;
115 if (IMMU_CAP_GET_DRD(immu
->immu_regs_cap
)) {
116 command
|= TLB_INV_DRAIN_READ
;
120 * if the hardward doesn't support page selective invalidation, we
121 * will use domain type. Otherwise, use global type
125 command
|= TLB_INV_PAGE
| TLB_INV_IVT
|
126 TLB_INV_DID(domain_id
);
127 iva
= addr
| am
| TLB_IVA_HINT(hint
);
130 command
|= TLB_INV_DOMAIN
| TLB_INV_IVT
|
131 TLB_INV_DID(domain_id
);
134 command
|= TLB_INV_GLOBAL
| TLB_INV_IVT
;
137 ddi_err(DER_MODE
, NULL
, "%s: incorrect iotlb flush type",
143 put_reg64(immu
, iva_offset
, iva
);
144 put_reg64(immu
, iotlb_offset
, command
);
145 wait_completion(immu
, iotlb_offset
, get_reg64
,
146 (!(status
& TLB_INV_IVT
)), status
);
150 * immu_regs_iotlb_psi()
151 * iotlb page specific invalidation
155 immu_regs_iotlb_psi(immu_t
*immu
, uint_t did
, uint64_t dvma
, uint_t snpages
,
156 uint_t hint
, immu_inv_wait_t
*iwp
)
167 if (!IMMU_CAP_GET_PSI(immu
->immu_regs_cap
)) {
168 immu_regs_iotlb_dsi(immu
, did
, iwp
);
172 max_am
= IMMU_CAP_GET_MAMV(immu
->immu_regs_cap
);
174 mutex_enter(&(immu
->immu_regs_lock
));
176 npages_left
= snpages
;
177 for (i
= 0; i
< immu_flush_gran
&& npages_left
> 0; i
++) {
178 /* First calculate alignment of DVMA */
183 for (align
= (1 << 12), dvma_am
= 1;
184 (dvma
& align
) == 0; align
<<= 1, dvma_am
++)
189 /* Calculate the npg_am */
190 npages
= npages_left
;
191 for (npg_am
= 0, npages
>>= 1; npages
; npages
>>= 1, npg_am
++)
194 am
= MIN(max_am
, MIN(dvma_am
, npg_am
));
196 iotlb_flush(immu
, did
, dvma
, am
, hint
, IOTLB_PSI
);
199 npages_left
-= npages
;
200 dvma
+= (npages
* IMMU_PAGESIZE
);
204 iotlb_flush(immu
, did
, 0, 0, 0, IOTLB_DSI
);
206 mutex_exit(&(immu
->immu_regs_lock
));
210 * immu_regs_iotlb_dsi()
211 * domain specific invalidation
215 immu_regs_iotlb_dsi(immu_t
*immu
, uint_t domain_id
, immu_inv_wait_t
*iwp
)
217 mutex_enter(&(immu
->immu_regs_lock
));
218 iotlb_flush(immu
, domain_id
, 0, 0, 0, IOTLB_DSI
);
219 mutex_exit(&(immu
->immu_regs_lock
));
223 * immu_regs_iotlb_gbl()
224 * global iotlb invalidation
228 immu_regs_iotlb_gbl(immu_t
*immu
, immu_inv_wait_t
*iwp
)
230 mutex_enter(&(immu
->immu_regs_lock
));
231 iotlb_flush(immu
, 0, 0, 0, 0, IOTLB_GLOBAL
);
232 mutex_exit(&(immu
->immu_regs_lock
));
256 * calculate agaw for a IOMMU unit
259 set_agaw(immu_t
*immu
)
261 int mgaw
, magaw
, agaw
;
263 int max_sagaw_mask
, sagaw_mask
, mask
;
267 * mgaw is the maximum guest address width.
268 * Addresses above this value will be
269 * blocked by the IOMMU unit.
270 * sagaw is a bitmask that lists all the
271 * AGAWs supported by this IOMMU unit.
273 mgaw
= IMMU_CAP_MGAW(immu
->immu_regs_cap
);
274 sagaw_mask
= IMMU_CAP_SAGAW(immu
->immu_regs_cap
);
276 magaw
= gaw2agaw(mgaw
);
279 * Get bitpos corresponding to
284 * Maximum SAGAW is specified by
287 max_sagaw_mask
= ((1 << 5) - 1);
289 if (sagaw_mask
> max_sagaw_mask
) {
290 ddi_err(DER_WARN
, NULL
, "%s: SAGAW bitmask (%x) "
291 "is larger than maximu SAGAW bitmask "
292 "(%x) specified by Intel Vt-d spec",
293 immu
->immu_name
, sagaw_mask
, max_sagaw_mask
);
294 return (DDI_FAILURE
);
298 * Find a supported AGAW <= magaw
300 * sagaw_mask bitpos AGAW (bits) nlevels
301 * ==============================================
306 * 1 0 0 0 0 4 64(66) 6
311 for (mask
= 1, bitpos
= 0; bitpos
< 5;
312 bitpos
++, mask
<<= 1) {
313 if (mask
& sagaw_mask
) {
314 nlevels
= bitpos
+ 2;
315 agaw
= 30 + (bitpos
* 9);
319 /* calculated agaw can be > 64 */
320 agaw
= (agaw
> 64) ? 64 : agaw
;
322 if (agaw
< 30 || agaw
> magaw
) {
323 ddi_err(DER_WARN
, NULL
, "%s: Calculated AGAW (%d) "
324 "is outside valid limits [30,%d] specified by Vt-d spec "
325 "and magaw", immu
->immu_name
, agaw
, magaw
);
326 return (DDI_FAILURE
);
329 if (nlevels
< 2 || nlevels
> 6) {
330 ddi_err(DER_WARN
, NULL
, "%s: Calculated pagetable "
331 "level (%d) is outside valid limits [2,6]",
332 immu
->immu_name
, nlevels
);
333 return (DDI_FAILURE
);
336 ddi_err(DER_LOG
, NULL
, "Calculated pagetable "
337 "level (%d), agaw = %d", nlevels
, agaw
);
339 immu
->immu_dvma_nlevels
= nlevels
;
340 immu
->immu_dvma_agaw
= agaw
;
342 return (DDI_SUCCESS
);
346 setup_regs(immu_t
*immu
)
351 * This lock may be acquired by the IOMMU interrupt handler
353 mutex_init(&(immu
->immu_regs_lock
), NULL
, MUTEX_DRIVER
,
354 (void *)ipltospl(IMMU_INTR_IPL
));
357 * map the register address space
359 error
= ddi_regs_map_setup(immu
->immu_dip
, 0,
360 (caddr_t
*)&(immu
->immu_regs_addr
), (offset_t
)0,
361 (offset_t
)IMMU_REGSZ
, &immu_regs_attr
,
362 &(immu
->immu_regs_handle
));
364 if (error
== DDI_FAILURE
) {
365 ddi_err(DER_WARN
, NULL
, "%s: Intel IOMMU register map failed",
367 mutex_destroy(&(immu
->immu_regs_lock
));
368 return (DDI_FAILURE
);
372 * get the register value
374 immu
->immu_regs_cap
= get_reg64(immu
, IMMU_REG_CAP
);
375 immu
->immu_regs_excap
= get_reg64(immu
, IMMU_REG_EXCAP
);
378 * if the hardware access is non-coherent, we need clflush
380 if (IMMU_ECAP_GET_C(immu
->immu_regs_excap
)) {
381 immu
->immu_dvma_coherent
= B_TRUE
;
383 immu
->immu_dvma_coherent
= B_FALSE
;
384 if (!is_x86_feature(x86_featureset
, X86FSET_CLFSH
)) {
385 ddi_err(DER_WARN
, NULL
,
386 "immu unit %s can't be enabled due to "
387 "missing clflush functionality", immu
->immu_name
);
388 ddi_regs_map_free(&(immu
->immu_regs_handle
));
389 mutex_destroy(&(immu
->immu_regs_lock
));
390 return (DDI_FAILURE
);
394 /* Setup SNP and TM reserved fields */
395 immu
->immu_SNP_reserved
= immu_regs_is_SNP_reserved(immu
);
396 immu
->immu_TM_reserved
= immu_regs_is_TM_reserved(immu
);
398 if (IMMU_ECAP_GET_CH(immu
->immu_regs_excap
) && immu_use_tm
)
399 immu
->immu_ptemask
= PDTE_MASK_TM
;
401 immu
->immu_ptemask
= 0;
404 * Check for Mobile 4 series chipset
406 if (immu_quirk_mobile4
== B_TRUE
&&
407 !IMMU_CAP_GET_RWBF(immu
->immu_regs_cap
)) {
408 ddi_err(DER_LOG
, NULL
,
409 "IMMU: Mobile 4 chipset quirk detected. "
410 "Force-setting RWBF");
411 IMMU_CAP_SET_RWBF(immu
->immu_regs_cap
);
415 * retrieve the maximum number of domains
417 immu
->immu_max_domains
= IMMU_CAP_ND(immu
->immu_regs_cap
);
422 if (set_agaw(immu
) != DDI_SUCCESS
) {
423 ddi_regs_map_free(&(immu
->immu_regs_handle
));
424 mutex_destroy(&(immu
->immu_regs_lock
));
425 return (DDI_FAILURE
);
427 immu
->immu_regs_cmdval
= 0;
429 immu
->immu_flushops
= &immu_regs_flushops
;
431 return (DDI_SUCCESS
);
434 /* ############### Functions exported ################## */
438 * Setup mappings to a IMMU unit's registers
439 * so that they can be read/written
442 immu_regs_setup(list_t
*listp
)
447 for (i
= 0; i
< IMMU_MAXSEG
; i
++) {
448 immu
= list_head(listp
);
449 for (; immu
; immu
= list_next(listp
, immu
)) {
450 /* do your best, continue on error */
451 if (setup_regs(immu
) != DDI_SUCCESS
) {
452 immu
->immu_regs_setup
= B_FALSE
;
454 immu
->immu_regs_setup
= B_TRUE
;
464 immu_regs_resume(immu_t
*immu
)
469 * remap the register address space
471 error
= ddi_regs_map_setup(immu
->immu_dip
, 0,
472 (caddr_t
*)&(immu
->immu_regs_addr
), (offset_t
)0,
473 (offset_t
)IMMU_REGSZ
, &immu_regs_attr
,
474 &(immu
->immu_regs_handle
));
475 if (error
!= DDI_SUCCESS
) {
476 return (DDI_FAILURE
);
479 immu_regs_set_root_table(immu
);
481 immu_regs_intr_enable(immu
, immu
->immu_regs_intr_msi_addr
,
482 immu
->immu_regs_intr_msi_data
, immu
->immu_regs_intr_uaddr
);
484 (void) immu_intr_handler(immu
);
486 immu_regs_intrmap_enable(immu
, immu
->immu_intrmap_irta_reg
);
488 immu_regs_qinv_enable(immu
, immu
->immu_qinv_reg_value
);
495 * immu_regs_suspend()
498 immu_regs_suspend(immu_t
*immu
)
501 immu
->immu_intrmap_running
= B_FALSE
;
503 /* Finally, unmap the regs */
504 ddi_regs_map_free(&(immu
->immu_regs_handle
));
508 * immu_regs_startup()
509 * set a IMMU unit's registers to startup the unit
512 immu_regs_startup(immu_t
*immu
)
516 if (immu
->immu_regs_setup
== B_FALSE
) {
520 mutex_enter(&(immu
->immu_regs_lock
));
521 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
522 immu
->immu_regs_cmdval
| IMMU_GCMD_TE
);
523 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
524 get_reg32
, (status
& IMMU_GSTS_TES
), status
);
525 immu
->immu_regs_cmdval
|= IMMU_GCMD_TE
;
526 immu
->immu_regs_running
= B_TRUE
;
527 mutex_exit(&(immu
->immu_regs_lock
));
529 ddi_err(DER_NOTE
, NULL
, "%s running", immu
->immu_name
);
533 * immu_regs_shutdown()
537 immu_regs_shutdown(immu_t
*immu
)
541 if (immu
->immu_regs_running
== B_FALSE
) {
545 mutex_enter(&(immu
->immu_regs_lock
));
546 immu
->immu_regs_cmdval
&= ~IMMU_GCMD_TE
;
547 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
548 immu
->immu_regs_cmdval
);
549 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
550 get_reg32
, !(status
& IMMU_GSTS_TES
), status
);
551 immu
->immu_regs_running
= B_FALSE
;
552 mutex_exit(&(immu
->immu_regs_lock
));
554 ddi_err(DER_NOTE
, NULL
, "IOMMU %s stopped", immu
->immu_name
);
559 * Set a IMMU unit regs to setup a IMMU unit's
563 immu_regs_intr_enable(immu_t
*immu
, uint32_t msi_addr
, uint32_t msi_data
,
566 mutex_enter(&(immu
->immu_regs_lock
));
567 immu
->immu_regs_intr_msi_addr
= msi_addr
;
568 immu
->immu_regs_intr_uaddr
= uaddr
;
569 immu
->immu_regs_intr_msi_data
= msi_data
;
570 put_reg32(immu
, IMMU_REG_FEVNT_ADDR
, msi_addr
);
571 put_reg32(immu
, IMMU_REG_FEVNT_UADDR
, uaddr
);
572 put_reg32(immu
, IMMU_REG_FEVNT_DATA
, msi_data
);
573 put_reg32(immu
, IMMU_REG_FEVNT_CON
, 0);
574 mutex_exit(&(immu
->immu_regs_lock
));
578 * immu_regs_passthru_supported()
579 * Returns B_TRUE ifi passthru is supported
582 immu_regs_passthru_supported(immu_t
*immu
)
584 if (IMMU_ECAP_GET_PT(immu
->immu_regs_excap
)) {
588 ddi_err(DER_WARN
, NULL
, "Passthru not supported");
593 * immu_regs_is_TM_reserved()
594 * Returns B_TRUE if TM field is reserved
597 immu_regs_is_TM_reserved(immu_t
*immu
)
599 if (IMMU_ECAP_GET_DI(immu
->immu_regs_excap
) ||
600 IMMU_ECAP_GET_CH(immu
->immu_regs_excap
)) {
607 * immu_regs_is_SNP_reserved()
608 * Returns B_TRUE if SNP field is reserved
611 immu_regs_is_SNP_reserved(immu_t
*immu
)
614 return (IMMU_ECAP_GET_SC(immu
->immu_regs_excap
) ? B_FALSE
: B_TRUE
);
618 * immu_regs_wbf_flush()
619 * If required and supported, write to IMMU
620 * unit's regs to flush DMA write buffer(s)
623 immu_regs_wbf_flush(immu_t
*immu
)
627 if (!IMMU_CAP_GET_RWBF(immu
->immu_regs_cap
)) {
631 mutex_enter(&(immu
->immu_regs_lock
));
632 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
633 immu
->immu_regs_cmdval
| IMMU_GCMD_WBF
);
634 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
635 get_reg32
, (!(status
& IMMU_GSTS_WBFS
)), status
);
636 mutex_exit(&(immu
->immu_regs_lock
));
640 * immu_regs_cpu_flush()
641 * flush the cpu cache line after CPU memory writes, so
642 * IOMMU can see the writes
645 immu_regs_cpu_flush(immu_t
*immu
, caddr_t addr
, uint_t size
)
647 uintptr_t startline
, endline
;
649 if (immu
->immu_dvma_coherent
== B_TRUE
)
652 startline
= (uintptr_t)addr
& ~(uintptr_t)(x86_clflush_size
- 1);
653 endline
= ((uintptr_t)addr
+ size
- 1) &
654 ~(uintptr_t)(x86_clflush_size
- 1);
655 while (startline
<= endline
) {
656 clflush_insn((caddr_t
)startline
);
657 startline
+= x86_clflush_size
;
664 * immu_regs_context_flush()
665 * flush the context cache
668 context_flush(immu_t
*immu
, uint8_t function_mask
,
669 uint16_t sid
, uint_t did
, immu_context_inv_t type
)
671 uint64_t command
= 0;
679 command
|= CCMD_INV_ICC
| CCMD_INV_DEVICE
681 | CCMD_INV_SID(sid
) | CCMD_INV_FM(function_mask
);
684 command
|= CCMD_INV_ICC
| CCMD_INV_DOMAIN
688 command
|= CCMD_INV_ICC
| CCMD_INV_GLOBAL
;
691 ddi_err(DER_PANIC
, NULL
,
692 "%s: incorrect context cache flush type",
697 mutex_enter(&(immu
->immu_regs_lock
));
698 put_reg64(immu
, IMMU_REG_CONTEXT_CMD
, command
);
699 wait_completion(immu
, IMMU_REG_CONTEXT_CMD
, get_reg64
,
700 (!(status
& CCMD_INV_ICC
)), status
);
701 mutex_exit(&(immu
->immu_regs_lock
));
706 immu_regs_context_fsi(immu_t
*immu
, uint8_t function_mask
,
707 uint16_t source_id
, uint_t domain_id
, immu_inv_wait_t
*iwp
)
709 context_flush(immu
, function_mask
, source_id
, domain_id
, CONTEXT_FSI
);
714 immu_regs_context_dsi(immu_t
*immu
, uint_t domain_id
, immu_inv_wait_t
*iwp
)
716 context_flush(immu
, 0, 0, domain_id
, CONTEXT_DSI
);
721 immu_regs_context_gbl(immu_t
*immu
, immu_inv_wait_t
*iwp
)
723 context_flush(immu
, 0, 0, 0, CONTEXT_GLOBAL
);
727 * Nothing to do, all register operations are synchronous.
731 immu_regs_inv_wait(immu_inv_wait_t
*iwp
)
736 immu_regs_set_root_table(immu_t
*immu
)
740 mutex_enter(&(immu
->immu_regs_lock
));
741 put_reg64(immu
, IMMU_REG_ROOTENTRY
,
742 immu
->immu_ctx_root
->hwpg_paddr
);
743 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
744 immu
->immu_regs_cmdval
| IMMU_GCMD_SRTP
);
745 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
746 get_reg32
, (status
& IMMU_GSTS_RTPS
), status
);
747 mutex_exit(&(immu
->immu_regs_lock
));
751 /* enable queued invalidation interface */
753 immu_regs_qinv_enable(immu_t
*immu
, uint64_t qinv_reg_value
)
757 if (immu_qinv_enable
== B_FALSE
)
760 mutex_enter(&immu
->immu_regs_lock
);
761 immu
->immu_qinv_reg_value
= qinv_reg_value
;
762 /* Initialize the Invalidation Queue Tail register to zero */
763 put_reg64(immu
, IMMU_REG_INVAL_QT
, 0);
765 /* set invalidation queue base address register */
766 put_reg64(immu
, IMMU_REG_INVAL_QAR
, qinv_reg_value
);
768 /* enable queued invalidation interface */
769 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
770 immu
->immu_regs_cmdval
| IMMU_GCMD_QIE
);
771 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
772 get_reg32
, (status
& IMMU_GSTS_QIES
), status
);
773 mutex_exit(&immu
->immu_regs_lock
);
775 immu
->immu_regs_cmdval
|= IMMU_GCMD_QIE
;
776 immu
->immu_qinv_running
= B_TRUE
;
780 /* enable interrupt remapping hardware unit */
782 immu_regs_intrmap_enable(immu_t
*immu
, uint64_t irta_reg
)
786 if (immu_intrmap_enable
== B_FALSE
)
789 /* set interrupt remap table pointer */
790 mutex_enter(&(immu
->immu_regs_lock
));
791 immu
->immu_intrmap_irta_reg
= irta_reg
;
792 put_reg64(immu
, IMMU_REG_IRTAR
, irta_reg
);
793 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
794 immu
->immu_regs_cmdval
| IMMU_GCMD_SIRTP
);
795 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
796 get_reg32
, (status
& IMMU_GSTS_IRTPS
), status
);
797 mutex_exit(&(immu
->immu_regs_lock
));
799 /* global flush intr entry cache */
800 immu_qinv_intr_global(immu
, &immu
->immu_intrmap_inv_wait
);
802 /* enable interrupt remapping */
803 mutex_enter(&(immu
->immu_regs_lock
));
804 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
805 immu
->immu_regs_cmdval
| IMMU_GCMD_IRE
);
806 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
807 get_reg32
, (status
& IMMU_GSTS_IRES
),
809 immu
->immu_regs_cmdval
|= IMMU_GCMD_IRE
;
811 /* set compatible mode */
812 put_reg32(immu
, IMMU_REG_GLOBAL_CMD
,
813 immu
->immu_regs_cmdval
| IMMU_GCMD_CFI
);
814 wait_completion(immu
, IMMU_REG_GLOBAL_STS
,
815 get_reg32
, (status
& IMMU_GSTS_CFIS
),
817 immu
->immu_regs_cmdval
|= IMMU_GCMD_CFI
;
818 mutex_exit(&(immu
->immu_regs_lock
));
820 immu
->immu_intrmap_running
= B_TRUE
;
824 immu_regs_get64(immu_t
*immu
, uint_t reg
)
826 return (get_reg64(immu
, reg
));
830 immu_regs_get32(immu_t
*immu
, uint_t reg
)
832 return (get_reg32(immu
, reg
));
836 immu_regs_put64(immu_t
*immu
, uint_t reg
, uint64_t val
)
838 put_reg64(immu
, reg
, val
);
842 immu_regs_put32(immu_t
*immu
, uint_t reg
, uint32_t val
)
844 put_reg32(immu
, reg
, val
);