4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
27 * Copyright (c) 2009, Intel Corporation.
28 * All rights reserved.
32 #include <sys/archsystm.h>
33 #include <vm/hat_i86.h>
34 #include <sys/types.h>
36 #include <sys/sysmacros.h>
39 /* invalidation queue table entry size */
40 #define QINV_ENTRY_SIZE 0x10
42 /* max value of Queue Size field of Invalidation Queue Address Register */
43 #define QINV_MAX_QUEUE_SIZE 0x7
45 /* status data size of invalidation wait descriptor */
46 #define QINV_SYNC_DATA_SIZE 0x4
48 /* invalidation queue head and tail */
49 #define QINV_IQA_HEAD(QH) BITX((QH), 18, 4)
50 #define QINV_IQA_TAIL_SHIFT 4
52 /* invalidation queue entry structure */
53 typedef struct qinv_inv_dsc
{
58 /* physical contigous pages for invalidation queue */
59 typedef struct qinv_mem
{
60 kmutex_t qinv_mem_lock
;
61 ddi_dma_handle_t qinv_mem_dma_hdl
;
62 ddi_acc_handle_t qinv_mem_acc_hdl
;
63 caddr_t qinv_mem_vaddr
;
64 paddr_t qinv_mem_paddr
;
66 uint16_t qinv_mem_head
;
67 uint16_t qinv_mem_tail
;
72 * invalidation queue state
73 * This structure describes the state information of the
74 * invalidation queue table and related status memeory for
75 * invalidation wait descriptor
77 * qinv_table - invalidation queue table
78 * qinv_sync - sync status memory for invalidation wait descriptor
81 qinv_mem_t qinv_table
;
85 static void immu_qinv_inv_wait(immu_inv_wait_t
*iwp
);
87 static struct immu_flushops immu_qinv_flushops
= {
88 immu_qinv_context_fsi
,
89 immu_qinv_context_dsi
,
90 immu_qinv_context_gbl
,
97 /* helper macro for making queue invalidation descriptor */
98 #define INV_DSC_TYPE(dsc) ((dsc)->lo & 0xF)
99 #define CC_INV_DSC_HIGH (0)
100 #define CC_INV_DSC_LOW(fm, sid, did, g) (((uint64_t)(fm) << 48) | \
101 ((uint64_t)(sid) << 32) | \
102 ((uint64_t)(did) << 16) | \
103 ((uint64_t)(g) << 4) | \
106 #define IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
107 ((uint64_t)(ih) << 6) | \
110 #define IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
111 ((uint64_t)(dr) << 7) | \
112 ((uint64_t)(dw) << 6) | \
113 ((uint64_t)(g) << 4) | \
116 #define DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
118 #define DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
119 ((uint64_t)(sid) << 32) | \
120 ((uint64_t)(max_invs_pd) << 16) | \
123 #define IEC_INV_DSC_HIGH (0)
124 #define IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
125 ((uint64_t)(im) << 27) | \
126 ((uint64_t)(g) << 4) | \
129 #define INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
131 #define INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
132 ((uint64_t)(fn) << 6) | \
133 ((uint64_t)(sw) << 5) | \
134 ((uint64_t)(iflag) << 4) | \
138 * QS field of Invalidation Queue Address Register
139 * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
141 static uint_t qinv_iqa_qs
= 6;
144 * the invalidate desctiptor type of queued invalidation interface
146 static char *qinv_dsc_type
[] = {
148 "Context Cache Invalidate Descriptor",
149 "IOTLB Invalidate Descriptor",
150 "Device-IOTLB Invalidate Descriptor",
151 "Interrupt Entry Cache Invalidate Descriptor",
152 "Invalidation Wait Descriptor",
153 "Incorrect queue invalidation type"
156 #define QINV_MAX_DSC_TYPE (sizeof (qinv_dsc_type) / sizeof (char *))
159 * the queued invalidation interface functions
161 static void qinv_submit_inv_dsc(immu_t
*immu
, qinv_dsc_t
*dsc
);
162 static void qinv_context_common(immu_t
*immu
, uint8_t function_mask
,
163 uint16_t source_id
, uint_t domain_id
, ctt_inv_g_t type
);
164 static void qinv_iotlb_common(immu_t
*immu
, uint_t domain_id
,
165 uint64_t addr
, uint_t am
, uint_t hint
, tlb_inv_g_t type
);
166 static void qinv_iec_common(immu_t
*immu
, uint_t iidx
,
167 uint_t im
, uint_t g
);
168 static void immu_qinv_inv_wait(immu_inv_wait_t
*iwp
);
169 static void qinv_wait_sync(immu_t
*immu
, immu_inv_wait_t
*iwp
);
171 static void qinv_dev_iotlb_common(immu_t
*immu
, uint16_t sid
,
172 uint64_t addr
, uint_t size
, uint_t max_invs_pd
);
175 /* submit invalidation request descriptor to invalidation queue */
177 qinv_submit_inv_dsc(immu_t
*immu
, qinv_dsc_t
*dsc
)
180 qinv_mem_t
*qinv_table
;
186 qinv
= (qinv_t
*)immu
->immu_qinv
;
187 qinv_table
= &(qinv
->qinv_table
);
189 mutex_enter(&qinv_table
->qinv_mem_lock
);
190 tail
= qinv_table
->qinv_mem_tail
;
191 qinv_table
->qinv_mem_tail
++;
193 if (qinv_table
->qinv_mem_tail
== qinv_table
->qinv_mem_size
)
194 qinv_table
->qinv_mem_tail
= 0;
196 while (qinv_table
->qinv_mem_head
== qinv_table
->qinv_mem_tail
) {
201 * inv queue table exhausted, wait hardware to fetch
204 qinv_table
->qinv_mem_head
= QINV_IQA_HEAD(
205 immu_regs_get64(immu
, IMMU_REG_INVAL_QH
));
208 IMMU_DPROBE3(immu__qinv__sub
, uint64_t, dsc
->lo
, uint64_t, dsc
->hi
,
211 bcopy(dsc
, qinv_table
->qinv_mem_vaddr
+ tail
* QINV_ENTRY_SIZE
,
214 immu_regs_put64(immu
, IMMU_REG_INVAL_QT
,
215 qinv_table
->qinv_mem_tail
<< QINV_IQA_TAIL_SHIFT
);
217 mutex_exit(&qinv_table
->qinv_mem_lock
);
220 /* queued invalidation interface -- invalidate context cache */
222 qinv_context_common(immu_t
*immu
, uint8_t function_mask
,
223 uint16_t source_id
, uint_t domain_id
, ctt_inv_g_t type
)
227 dsc
.lo
= CC_INV_DSC_LOW(function_mask
, source_id
, domain_id
, type
);
228 dsc
.hi
= CC_INV_DSC_HIGH
;
230 qinv_submit_inv_dsc(immu
, &dsc
);
233 /* queued invalidation interface -- invalidate iotlb */
235 qinv_iotlb_common(immu_t
*immu
, uint_t domain_id
,
236 uint64_t addr
, uint_t am
, uint_t hint
, tlb_inv_g_t type
)
242 if (IMMU_CAP_GET_DRD(immu
->immu_regs_cap
))
244 if (IMMU_CAP_GET_DWD(immu
->immu_regs_cap
))
249 if (!IMMU_CAP_GET_PSI(immu
->immu_regs_cap
) ||
250 am
> IMMU_CAP_GET_MAMV(immu
->immu_regs_cap
) ||
251 addr
& IMMU_PAGEOFFSET
) {
252 type
= TLB_INV_G_DOMAIN
;
253 goto qinv_ignore_psi
;
255 dsc
.lo
= IOTLB_INV_DSC_LOW(domain_id
, dr
, dw
, type
);
256 dsc
.hi
= IOTLB_INV_DSC_HIGH(addr
, hint
, am
);
260 case TLB_INV_G_DOMAIN
:
261 dsc
.lo
= IOTLB_INV_DSC_LOW(domain_id
, dr
, dw
, type
);
265 case TLB_INV_G_GLOBAL
:
266 dsc
.lo
= IOTLB_INV_DSC_LOW(0, dr
, dw
, type
);
270 ddi_err(DER_WARN
, NULL
, "incorrect iotlb flush type");
274 qinv_submit_inv_dsc(immu
, &dsc
);
277 /* queued invalidation interface -- invalidate dev_iotlb */
279 qinv_dev_iotlb_common(immu_t
*immu
, uint16_t sid
,
280 uint64_t addr
, uint_t size
, uint_t max_invs_pd
)
284 dsc
.lo
= DEV_IOTLB_INV_DSC_LOW(sid
, max_invs_pd
);
285 dsc
.hi
= DEV_IOTLB_INV_DSC_HIGH(addr
, size
);
287 qinv_submit_inv_dsc(immu
, &dsc
);
290 /* queued invalidation interface -- invalidate interrupt entry cache */
292 qinv_iec_common(immu_t
*immu
, uint_t iidx
, uint_t im
, uint_t g
)
296 dsc
.lo
= IEC_INV_DSC_LOW(iidx
, im
, g
);
297 dsc
.hi
= IEC_INV_DSC_HIGH
;
299 qinv_submit_inv_dsc(immu
, &dsc
);
303 * queued invalidation interface -- invalidation wait descriptor
304 * wait until the invalidation request finished
307 qinv_wait_sync(immu_t
*immu
, immu_inv_wait_t
*iwp
)
310 volatile uint32_t *status
;
316 status
= &iwp
->iwp_vstatus
;
317 paddr
= iwp
->iwp_pstatus
;
319 *status
= IMMU_INV_DATA_PENDING
;
323 * sdata = IMMU_INV_DATA_DONE, fence = 1, sw = 1, if = 0
324 * indicate the invalidation wait descriptor completion by
325 * performing a coherent DWORD write to the status address,
326 * not by generating an invalidation completion event
328 dsc
.lo
= INV_WAIT_DSC_LOW(IMMU_INV_DATA_DONE
, 1, 1, 0);
329 dsc
.hi
= INV_WAIT_DSC_HIGH(paddr
);
331 qinv_submit_inv_dsc(immu
, &dsc
);
336 while (*status
!= IMMU_INV_DATA_DONE
) {
340 DTRACE_PROBE2(immu__wait__sync
, const char *, iwp
->iwp_name
,
343 while (*status
!= IMMU_INV_DATA_DONE
)
350 immu_qinv_inv_wait(immu_inv_wait_t
*iwp
)
352 volatile uint32_t *status
= &iwp
->iwp_vstatus
;
357 while (*status
!= IMMU_INV_DATA_DONE
) {
361 DTRACE_PROBE2(immu__wait__async
, const char *, iwp
->iwp_name
,
365 while (*status
!= IMMU_INV_DATA_DONE
)
371 * call ddi_dma_mem_alloc to allocate physical contigous
372 * pages for invalidation queue table
375 qinv_setup(immu_t
*immu
)
380 ddi_dma_attr_t qinv_dma_attr
= {
383 0xffffffffffffffffULL
,
385 MMU_PAGESIZE
, /* page aligned */
389 0xffffffffffffffffULL
,
395 ddi_device_acc_attr_t qinv_acc_attr
= {
401 mutex_init(&(immu
->immu_qinv_lock
), NULL
, MUTEX_DRIVER
, NULL
);
404 mutex_enter(&(immu
->immu_qinv_lock
));
406 immu
->immu_qinv
= NULL
;
407 if (!IMMU_ECAP_GET_QI(immu
->immu_regs_excap
) ||
408 immu_qinv_enable
== B_FALSE
) {
409 mutex_exit(&(immu
->immu_qinv_lock
));
410 return (DDI_SUCCESS
);
413 if (qinv_iqa_qs
> QINV_MAX_QUEUE_SIZE
)
414 qinv_iqa_qs
= QINV_MAX_QUEUE_SIZE
;
416 qinv
= kmem_zalloc(sizeof (qinv_t
), KM_SLEEP
);
418 if (ddi_dma_alloc_handle(root_devinfo
,
419 &qinv_dma_attr
, DDI_DMA_SLEEP
, NULL
,
420 &(qinv
->qinv_table
.qinv_mem_dma_hdl
)) != DDI_SUCCESS
) {
421 ddi_err(DER_WARN
, root_devinfo
,
422 "alloc invalidation queue table handler failed");
423 goto queue_table_handle_failed
;
426 if (ddi_dma_alloc_handle(root_devinfo
,
427 &qinv_dma_attr
, DDI_DMA_SLEEP
, NULL
,
428 &(qinv
->qinv_sync
.qinv_mem_dma_hdl
)) != DDI_SUCCESS
) {
429 ddi_err(DER_WARN
, root_devinfo
,
430 "alloc invalidation queue sync mem handler failed");
431 goto sync_table_handle_failed
;
434 qinv
->qinv_table
.qinv_mem_size
= (1 << (qinv_iqa_qs
+ 8));
435 size
= qinv
->qinv_table
.qinv_mem_size
* QINV_ENTRY_SIZE
;
437 /* alloc physical contiguous pages for invalidation queue */
438 if (ddi_dma_mem_alloc(qinv
->qinv_table
.qinv_mem_dma_hdl
,
441 DDI_DMA_CONSISTENT
| IOMEM_DATA_UNCACHED
,
444 &(qinv
->qinv_table
.qinv_mem_vaddr
),
446 &(qinv
->qinv_table
.qinv_mem_acc_hdl
)) != DDI_SUCCESS
) {
447 ddi_err(DER_WARN
, root_devinfo
,
448 "alloc invalidation queue table failed");
449 goto queue_table_mem_failed
;
452 ASSERT(!((uintptr_t)qinv
->qinv_table
.qinv_mem_vaddr
& MMU_PAGEOFFSET
));
453 bzero(qinv
->qinv_table
.qinv_mem_vaddr
, size
);
455 /* get the base physical address of invalidation request queue */
456 qinv
->qinv_table
.qinv_mem_paddr
= pfn_to_pa(
457 hat_getpfnum(kas
.a_hat
, qinv
->qinv_table
.qinv_mem_vaddr
));
459 qinv
->qinv_table
.qinv_mem_head
= qinv
->qinv_table
.qinv_mem_tail
= 0;
461 qinv
->qinv_sync
.qinv_mem_size
= qinv
->qinv_table
.qinv_mem_size
;
462 size
= qinv
->qinv_sync
.qinv_mem_size
* QINV_SYNC_DATA_SIZE
;
464 /* alloc status memory for invalidation wait descriptor */
465 if (ddi_dma_mem_alloc(qinv
->qinv_sync
.qinv_mem_dma_hdl
,
468 DDI_DMA_CONSISTENT
| IOMEM_DATA_UNCACHED
,
471 &(qinv
->qinv_sync
.qinv_mem_vaddr
),
473 &(qinv
->qinv_sync
.qinv_mem_acc_hdl
)) != DDI_SUCCESS
) {
474 ddi_err(DER_WARN
, root_devinfo
,
475 "alloc invalidation queue sync mem failed");
476 goto sync_table_mem_failed
;
479 ASSERT(!((uintptr_t)qinv
->qinv_sync
.qinv_mem_vaddr
& MMU_PAGEOFFSET
));
480 bzero(qinv
->qinv_sync
.qinv_mem_vaddr
, size
);
481 qinv
->qinv_sync
.qinv_mem_paddr
= pfn_to_pa(
482 hat_getpfnum(kas
.a_hat
, qinv
->qinv_sync
.qinv_mem_vaddr
));
484 qinv
->qinv_sync
.qinv_mem_head
= qinv
->qinv_sync
.qinv_mem_tail
= 0;
486 mutex_init(&(qinv
->qinv_table
.qinv_mem_lock
), NULL
, MUTEX_DRIVER
, NULL
);
487 mutex_init(&(qinv
->qinv_sync
.qinv_mem_lock
), NULL
, MUTEX_DRIVER
, NULL
);
489 immu
->immu_qinv
= qinv
;
491 mutex_exit(&(immu
->immu_qinv_lock
));
493 return (DDI_SUCCESS
);
495 sync_table_mem_failed
:
496 ddi_dma_mem_free(&(qinv
->qinv_table
.qinv_mem_acc_hdl
));
498 queue_table_mem_failed
:
499 ddi_dma_free_handle(&(qinv
->qinv_sync
.qinv_mem_dma_hdl
));
501 sync_table_handle_failed
:
502 ddi_dma_free_handle(&(qinv
->qinv_table
.qinv_mem_dma_hdl
));
504 queue_table_handle_failed
:
505 kmem_free(qinv
, sizeof (qinv_t
));
507 mutex_exit(&(immu
->immu_qinv_lock
));
509 return (DDI_FAILURE
);
513 * ###########################################################################
515 * Functions exported by immu_qinv.c
517 * ###########################################################################
521 * initialize invalidation request queue structure.
524 immu_qinv_setup(list_t
*listp
)
529 if (immu_qinv_enable
== B_FALSE
) {
530 return (DDI_FAILURE
);
534 immu
= list_head(listp
);
535 for (; immu
; immu
= list_next(listp
, immu
)) {
536 if (qinv_setup(immu
) == DDI_SUCCESS
) {
537 immu
->immu_qinv_setup
= B_TRUE
;
544 return (nerr
> 0 ? DDI_FAILURE
: DDI_SUCCESS
);
548 immu_qinv_startup(immu_t
*immu
)
551 uint64_t qinv_reg_value
;
553 if (immu
->immu_qinv_setup
== B_FALSE
) {
557 qinv
= (qinv_t
*)immu
->immu_qinv
;
558 qinv_reg_value
= qinv
->qinv_table
.qinv_mem_paddr
| qinv_iqa_qs
;
559 immu_regs_qinv_enable(immu
, qinv_reg_value
);
560 immu
->immu_flushops
= &immu_qinv_flushops
;
561 immu
->immu_qinv_running
= B_TRUE
;
565 * queued invalidation interface
566 * function based context cache invalidation
569 immu_qinv_context_fsi(immu_t
*immu
, uint8_t function_mask
,
570 uint16_t source_id
, uint_t domain_id
, immu_inv_wait_t
*iwp
)
572 qinv_context_common(immu
, function_mask
, source_id
,
573 domain_id
, CTT_INV_G_DEVICE
);
574 qinv_wait_sync(immu
, iwp
);
578 * queued invalidation interface
579 * domain based context cache invalidation
582 immu_qinv_context_dsi(immu_t
*immu
, uint_t domain_id
, immu_inv_wait_t
*iwp
)
584 qinv_context_common(immu
, 0, 0, domain_id
, CTT_INV_G_DOMAIN
);
585 qinv_wait_sync(immu
, iwp
);
589 * queued invalidation interface
590 * invalidation global context cache
593 immu_qinv_context_gbl(immu_t
*immu
, immu_inv_wait_t
*iwp
)
595 qinv_context_common(immu
, 0, 0, 0, CTT_INV_G_GLOBAL
);
596 qinv_wait_sync(immu
, iwp
);
600 * queued invalidation interface
601 * paged based iotlb invalidation
604 immu_qinv_iotlb_psi(immu_t
*immu
, uint_t domain_id
,
605 uint64_t dvma
, uint_t count
, uint_t hint
, immu_inv_wait_t
*iwp
)
610 max_am
= IMMU_CAP_GET_MAMV(immu
->immu_regs_cap
);
612 /* choose page specified invalidation */
613 if (IMMU_CAP_GET_PSI(immu
->immu_regs_cap
)) {
614 while (am
<= max_am
) {
615 if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma
), am
) + count
)
616 <= ADDR_AM_MAX(am
)) {
617 qinv_iotlb_common(immu
, domain_id
,
618 dvma
, am
, hint
, TLB_INV_G_PAGE
);
624 qinv_iotlb_common(immu
, domain_id
,
625 dvma
, 0, hint
, TLB_INV_G_DOMAIN
);
628 /* choose domain invalidation */
630 qinv_iotlb_common(immu
, domain_id
, dvma
,
631 0, hint
, TLB_INV_G_DOMAIN
);
634 qinv_wait_sync(immu
, iwp
);
638 * queued invalidation interface
639 * domain based iotlb invalidation
642 immu_qinv_iotlb_dsi(immu_t
*immu
, uint_t domain_id
, immu_inv_wait_t
*iwp
)
644 qinv_iotlb_common(immu
, domain_id
, 0, 0, 0, TLB_INV_G_DOMAIN
);
645 qinv_wait_sync(immu
, iwp
);
649 * queued invalidation interface
650 * global iotlb invalidation
653 immu_qinv_iotlb_gbl(immu_t
*immu
, immu_inv_wait_t
*iwp
)
655 qinv_iotlb_common(immu
, 0, 0, 0, 0, TLB_INV_G_GLOBAL
);
656 qinv_wait_sync(immu
, iwp
);
659 /* queued invalidation interface -- global invalidate interrupt entry cache */
661 immu_qinv_intr_global(immu_t
*immu
, immu_inv_wait_t
*iwp
)
663 qinv_iec_common(immu
, 0, 0, IEC_INV_GLOBAL
);
664 qinv_wait_sync(immu
, iwp
);
667 /* queued invalidation interface -- invalidate single interrupt entry cache */
669 immu_qinv_intr_one_cache(immu_t
*immu
, uint_t iidx
, immu_inv_wait_t
*iwp
)
671 qinv_iec_common(immu
, iidx
, 0, IEC_INV_INDEX
);
672 qinv_wait_sync(immu
, iwp
);
675 /* queued invalidation interface -- invalidate interrupt entry caches */
677 immu_qinv_intr_caches(immu_t
*immu
, uint_t iidx
, uint_t cnt
,
678 immu_inv_wait_t
*iwp
)
684 /* requested interrupt count is not a power of 2 */
686 for (i
= 0; i
< cnt
; i
++) {
687 qinv_iec_common(immu
, iidx
+ cnt
, 0, IEC_INV_INDEX
);
689 qinv_wait_sync(immu
, iwp
);
693 while ((2 << mask
) < cnt
) {
697 if (mask
> IMMU_ECAP_GET_MHMV(immu
->immu_regs_excap
)) {
698 for (i
= 0; i
< cnt
; i
++) {
699 qinv_iec_common(immu
, iidx
+ cnt
, 0, IEC_INV_INDEX
);
701 qinv_wait_sync(immu
, iwp
);
705 qinv_iec_common(immu
, iidx
, mask
, IEC_INV_INDEX
);
707 qinv_wait_sync(immu
, iwp
);
711 immu_qinv_report_fault(immu_t
*immu
)
717 /* access qinv data */
718 mutex_enter(&(immu
->immu_qinv_lock
));
720 qinv
= (qinv_t
*)(immu
->immu_qinv
);
722 head
= QINV_IQA_HEAD(
723 immu_regs_get64(immu
, IMMU_REG_INVAL_QH
));
725 dsc
= (qinv_dsc_t
*)(qinv
->qinv_table
.qinv_mem_vaddr
726 + (head
* QINV_ENTRY_SIZE
));
728 /* report the error */
729 ddi_err(DER_WARN
, immu
->immu_dip
,
730 "generated a fault when fetching a descriptor from the"
731 "\tinvalidation queue, or detects that the fetched"
732 "\tdescriptor is invalid. The head register is "
736 qinv_dsc_type
[MIN(INV_DSC_TYPE(dsc
), QINV_MAX_DSC_TYPE
)]);
738 mutex_exit(&(immu
->immu_qinv_lock
));