dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / uts / i86pc / io / immu_qinv.c
blobde257294825ba6da5932eefa305a6afcb7f84f8f
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
27 * Copyright (c) 2009, Intel Corporation.
28 * All rights reserved.
31 #include <sys/ddi.h>
32 #include <sys/archsystm.h>
33 #include <vm/hat_i86.h>
34 #include <sys/types.h>
35 #include <sys/cpu.h>
36 #include <sys/sysmacros.h>
37 #include <sys/immu.h>
39 /* invalidation queue table entry size */
40 #define QINV_ENTRY_SIZE 0x10
42 /* max value of Queue Size field of Invalidation Queue Address Register */
43 #define QINV_MAX_QUEUE_SIZE 0x7
45 /* status data size of invalidation wait descriptor */
46 #define QINV_SYNC_DATA_SIZE 0x4
48 /* invalidation queue head and tail */
49 #define QINV_IQA_HEAD(QH) BITX((QH), 18, 4)
50 #define QINV_IQA_TAIL_SHIFT 4
52 /* invalidation queue entry structure */
53 typedef struct qinv_inv_dsc {
54 uint64_t lo;
55 uint64_t hi;
56 } qinv_dsc_t;
58 /* physical contigous pages for invalidation queue */
59 typedef struct qinv_mem {
60 kmutex_t qinv_mem_lock;
61 ddi_dma_handle_t qinv_mem_dma_hdl;
62 ddi_acc_handle_t qinv_mem_acc_hdl;
63 caddr_t qinv_mem_vaddr;
64 paddr_t qinv_mem_paddr;
65 uint_t qinv_mem_size;
66 uint16_t qinv_mem_head;
67 uint16_t qinv_mem_tail;
68 } qinv_mem_t;
72 * invalidation queue state
73 * This structure describes the state information of the
74 * invalidation queue table and related status memeory for
75 * invalidation wait descriptor
77 * qinv_table - invalidation queue table
78 * qinv_sync - sync status memory for invalidation wait descriptor
80 typedef struct qinv {
81 qinv_mem_t qinv_table;
82 qinv_mem_t qinv_sync;
83 } qinv_t;
85 static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
87 static struct immu_flushops immu_qinv_flushops = {
88 immu_qinv_context_fsi,
89 immu_qinv_context_dsi,
90 immu_qinv_context_gbl,
91 immu_qinv_iotlb_psi,
92 immu_qinv_iotlb_dsi,
93 immu_qinv_iotlb_gbl,
94 immu_qinv_inv_wait
97 /* helper macro for making queue invalidation descriptor */
98 #define INV_DSC_TYPE(dsc) ((dsc)->lo & 0xF)
99 #define CC_INV_DSC_HIGH (0)
100 #define CC_INV_DSC_LOW(fm, sid, did, g) (((uint64_t)(fm) << 48) | \
101 ((uint64_t)(sid) << 32) | \
102 ((uint64_t)(did) << 16) | \
103 ((uint64_t)(g) << 4) | \
106 #define IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
107 ((uint64_t)(ih) << 6) | \
108 ((uint64_t)(am)))
110 #define IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
111 ((uint64_t)(dr) << 7) | \
112 ((uint64_t)(dw) << 6) | \
113 ((uint64_t)(g) << 4) | \
116 #define DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
118 #define DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
119 ((uint64_t)(sid) << 32) | \
120 ((uint64_t)(max_invs_pd) << 16) | \
123 #define IEC_INV_DSC_HIGH (0)
124 #define IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
125 ((uint64_t)(im) << 27) | \
126 ((uint64_t)(g) << 4) | \
129 #define INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
131 #define INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
132 ((uint64_t)(fn) << 6) | \
133 ((uint64_t)(sw) << 5) | \
134 ((uint64_t)(iflag) << 4) | \
138 * QS field of Invalidation Queue Address Register
139 * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
141 static uint_t qinv_iqa_qs = 6;
144 * the invalidate desctiptor type of queued invalidation interface
146 static char *qinv_dsc_type[] = {
147 "Reserved",
148 "Context Cache Invalidate Descriptor",
149 "IOTLB Invalidate Descriptor",
150 "Device-IOTLB Invalidate Descriptor",
151 "Interrupt Entry Cache Invalidate Descriptor",
152 "Invalidation Wait Descriptor",
153 "Incorrect queue invalidation type"
156 #define QINV_MAX_DSC_TYPE (sizeof (qinv_dsc_type) / sizeof (char *))
159 * the queued invalidation interface functions
161 static void qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc);
162 static void qinv_context_common(immu_t *immu, uint8_t function_mask,
163 uint16_t source_id, uint_t domain_id, ctt_inv_g_t type);
164 static void qinv_iotlb_common(immu_t *immu, uint_t domain_id,
165 uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type);
166 static void qinv_iec_common(immu_t *immu, uint_t iidx,
167 uint_t im, uint_t g);
168 static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
169 static void qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp);
170 /*LINTED*/
171 static void qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
172 uint64_t addr, uint_t size, uint_t max_invs_pd);
175 /* submit invalidation request descriptor to invalidation queue */
176 static void
177 qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc)
179 qinv_t *qinv;
180 qinv_mem_t *qinv_table;
181 uint_t tail;
182 #ifdef DEBUG
183 uint_t count = 0;
184 #endif
186 qinv = (qinv_t *)immu->immu_qinv;
187 qinv_table = &(qinv->qinv_table);
189 mutex_enter(&qinv_table->qinv_mem_lock);
190 tail = qinv_table->qinv_mem_tail;
191 qinv_table->qinv_mem_tail++;
193 if (qinv_table->qinv_mem_tail == qinv_table->qinv_mem_size)
194 qinv_table->qinv_mem_tail = 0;
196 while (qinv_table->qinv_mem_head == qinv_table->qinv_mem_tail) {
197 #ifdef DEBUG
198 count++;
199 #endif
201 * inv queue table exhausted, wait hardware to fetch
202 * next descriptor
204 qinv_table->qinv_mem_head = QINV_IQA_HEAD(
205 immu_regs_get64(immu, IMMU_REG_INVAL_QH));
208 IMMU_DPROBE3(immu__qinv__sub, uint64_t, dsc->lo, uint64_t, dsc->hi,
209 uint_t, count);
211 bcopy(dsc, qinv_table->qinv_mem_vaddr + tail * QINV_ENTRY_SIZE,
212 QINV_ENTRY_SIZE);
214 immu_regs_put64(immu, IMMU_REG_INVAL_QT,
215 qinv_table->qinv_mem_tail << QINV_IQA_TAIL_SHIFT);
217 mutex_exit(&qinv_table->qinv_mem_lock);
220 /* queued invalidation interface -- invalidate context cache */
221 static void
222 qinv_context_common(immu_t *immu, uint8_t function_mask,
223 uint16_t source_id, uint_t domain_id, ctt_inv_g_t type)
225 qinv_dsc_t dsc;
227 dsc.lo = CC_INV_DSC_LOW(function_mask, source_id, domain_id, type);
228 dsc.hi = CC_INV_DSC_HIGH;
230 qinv_submit_inv_dsc(immu, &dsc);
233 /* queued invalidation interface -- invalidate iotlb */
234 static void
235 qinv_iotlb_common(immu_t *immu, uint_t domain_id,
236 uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type)
238 qinv_dsc_t dsc;
239 uint8_t dr = 0;
240 uint8_t dw = 0;
242 if (IMMU_CAP_GET_DRD(immu->immu_regs_cap))
243 dr = 1;
244 if (IMMU_CAP_GET_DWD(immu->immu_regs_cap))
245 dw = 1;
247 switch (type) {
248 case TLB_INV_G_PAGE:
249 if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
250 am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap) ||
251 addr & IMMU_PAGEOFFSET) {
252 type = TLB_INV_G_DOMAIN;
253 goto qinv_ignore_psi;
255 dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
256 dsc.hi = IOTLB_INV_DSC_HIGH(addr, hint, am);
257 break;
259 qinv_ignore_psi:
260 case TLB_INV_G_DOMAIN:
261 dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
262 dsc.hi = 0;
263 break;
265 case TLB_INV_G_GLOBAL:
266 dsc.lo = IOTLB_INV_DSC_LOW(0, dr, dw, type);
267 dsc.hi = 0;
268 break;
269 default:
270 ddi_err(DER_WARN, NULL, "incorrect iotlb flush type");
271 return;
274 qinv_submit_inv_dsc(immu, &dsc);
277 /* queued invalidation interface -- invalidate dev_iotlb */
278 static void
279 qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
280 uint64_t addr, uint_t size, uint_t max_invs_pd)
282 qinv_dsc_t dsc;
284 dsc.lo = DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd);
285 dsc.hi = DEV_IOTLB_INV_DSC_HIGH(addr, size);
287 qinv_submit_inv_dsc(immu, &dsc);
290 /* queued invalidation interface -- invalidate interrupt entry cache */
291 static void
292 qinv_iec_common(immu_t *immu, uint_t iidx, uint_t im, uint_t g)
294 qinv_dsc_t dsc;
296 dsc.lo = IEC_INV_DSC_LOW(iidx, im, g);
297 dsc.hi = IEC_INV_DSC_HIGH;
299 qinv_submit_inv_dsc(immu, &dsc);
303 * queued invalidation interface -- invalidation wait descriptor
304 * wait until the invalidation request finished
306 static void
307 qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp)
309 qinv_dsc_t dsc;
310 volatile uint32_t *status;
311 uint64_t paddr;
312 #ifdef DEBUG
313 uint_t count;
314 #endif
316 status = &iwp->iwp_vstatus;
317 paddr = iwp->iwp_pstatus;
319 *status = IMMU_INV_DATA_PENDING;
320 membar_producer();
323 * sdata = IMMU_INV_DATA_DONE, fence = 1, sw = 1, if = 0
324 * indicate the invalidation wait descriptor completion by
325 * performing a coherent DWORD write to the status address,
326 * not by generating an invalidation completion event
328 dsc.lo = INV_WAIT_DSC_LOW(IMMU_INV_DATA_DONE, 1, 1, 0);
329 dsc.hi = INV_WAIT_DSC_HIGH(paddr);
331 qinv_submit_inv_dsc(immu, &dsc);
333 if (iwp->iwp_sync) {
334 #ifdef DEBUG
335 count = 0;
336 while (*status != IMMU_INV_DATA_DONE) {
337 count++;
338 ht_pause();
340 DTRACE_PROBE2(immu__wait__sync, const char *, iwp->iwp_name,
341 uint_t, count);
342 #else
343 while (*status != IMMU_INV_DATA_DONE)
344 ht_pause();
345 #endif
349 static void
350 immu_qinv_inv_wait(immu_inv_wait_t *iwp)
352 volatile uint32_t *status = &iwp->iwp_vstatus;
353 #ifdef DEBUG
354 uint_t count;
356 count = 0;
357 while (*status != IMMU_INV_DATA_DONE) {
358 count++;
359 ht_pause();
361 DTRACE_PROBE2(immu__wait__async, const char *, iwp->iwp_name,
362 uint_t, count);
363 #else
365 while (*status != IMMU_INV_DATA_DONE)
366 ht_pause();
367 #endif
371 * call ddi_dma_mem_alloc to allocate physical contigous
372 * pages for invalidation queue table
374 static int
375 qinv_setup(immu_t *immu)
377 qinv_t *qinv;
378 size_t size;
380 ddi_dma_attr_t qinv_dma_attr = {
381 DMA_ATTR_V0,
383 0xffffffffffffffffULL,
384 0xffffffffU,
385 MMU_PAGESIZE, /* page aligned */
386 0x1,
387 0x1,
388 0xffffffffU,
389 0xffffffffffffffffULL,
395 ddi_device_acc_attr_t qinv_acc_attr = {
396 DDI_DEVICE_ATTR_V0,
397 DDI_NEVERSWAP_ACC,
398 DDI_STRICTORDER_ACC
401 mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DRIVER, NULL);
404 mutex_enter(&(immu->immu_qinv_lock));
406 immu->immu_qinv = NULL;
407 if (!IMMU_ECAP_GET_QI(immu->immu_regs_excap) ||
408 immu_qinv_enable == B_FALSE) {
409 mutex_exit(&(immu->immu_qinv_lock));
410 return (DDI_SUCCESS);
413 if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
414 qinv_iqa_qs = QINV_MAX_QUEUE_SIZE;
416 qinv = kmem_zalloc(sizeof (qinv_t), KM_SLEEP);
418 if (ddi_dma_alloc_handle(root_devinfo,
419 &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
420 &(qinv->qinv_table.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
421 ddi_err(DER_WARN, root_devinfo,
422 "alloc invalidation queue table handler failed");
423 goto queue_table_handle_failed;
426 if (ddi_dma_alloc_handle(root_devinfo,
427 &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
428 &(qinv->qinv_sync.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
429 ddi_err(DER_WARN, root_devinfo,
430 "alloc invalidation queue sync mem handler failed");
431 goto sync_table_handle_failed;
434 qinv->qinv_table.qinv_mem_size = (1 << (qinv_iqa_qs + 8));
435 size = qinv->qinv_table.qinv_mem_size * QINV_ENTRY_SIZE;
437 /* alloc physical contiguous pages for invalidation queue */
438 if (ddi_dma_mem_alloc(qinv->qinv_table.qinv_mem_dma_hdl,
439 size,
440 &qinv_acc_attr,
441 DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
442 DDI_DMA_SLEEP,
443 NULL,
444 &(qinv->qinv_table.qinv_mem_vaddr),
445 &size,
446 &(qinv->qinv_table.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
447 ddi_err(DER_WARN, root_devinfo,
448 "alloc invalidation queue table failed");
449 goto queue_table_mem_failed;
452 ASSERT(!((uintptr_t)qinv->qinv_table.qinv_mem_vaddr & MMU_PAGEOFFSET));
453 bzero(qinv->qinv_table.qinv_mem_vaddr, size);
455 /* get the base physical address of invalidation request queue */
456 qinv->qinv_table.qinv_mem_paddr = pfn_to_pa(
457 hat_getpfnum(kas.a_hat, qinv->qinv_table.qinv_mem_vaddr));
459 qinv->qinv_table.qinv_mem_head = qinv->qinv_table.qinv_mem_tail = 0;
461 qinv->qinv_sync.qinv_mem_size = qinv->qinv_table.qinv_mem_size;
462 size = qinv->qinv_sync.qinv_mem_size * QINV_SYNC_DATA_SIZE;
464 /* alloc status memory for invalidation wait descriptor */
465 if (ddi_dma_mem_alloc(qinv->qinv_sync.qinv_mem_dma_hdl,
466 size,
467 &qinv_acc_attr,
468 DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
469 DDI_DMA_SLEEP,
470 NULL,
471 &(qinv->qinv_sync.qinv_mem_vaddr),
472 &size,
473 &(qinv->qinv_sync.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
474 ddi_err(DER_WARN, root_devinfo,
475 "alloc invalidation queue sync mem failed");
476 goto sync_table_mem_failed;
479 ASSERT(!((uintptr_t)qinv->qinv_sync.qinv_mem_vaddr & MMU_PAGEOFFSET));
480 bzero(qinv->qinv_sync.qinv_mem_vaddr, size);
481 qinv->qinv_sync.qinv_mem_paddr = pfn_to_pa(
482 hat_getpfnum(kas.a_hat, qinv->qinv_sync.qinv_mem_vaddr));
484 qinv->qinv_sync.qinv_mem_head = qinv->qinv_sync.qinv_mem_tail = 0;
486 mutex_init(&(qinv->qinv_table.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
487 mutex_init(&(qinv->qinv_sync.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
489 immu->immu_qinv = qinv;
491 mutex_exit(&(immu->immu_qinv_lock));
493 return (DDI_SUCCESS);
495 sync_table_mem_failed:
496 ddi_dma_mem_free(&(qinv->qinv_table.qinv_mem_acc_hdl));
498 queue_table_mem_failed:
499 ddi_dma_free_handle(&(qinv->qinv_sync.qinv_mem_dma_hdl));
501 sync_table_handle_failed:
502 ddi_dma_free_handle(&(qinv->qinv_table.qinv_mem_dma_hdl));
504 queue_table_handle_failed:
505 kmem_free(qinv, sizeof (qinv_t));
507 mutex_exit(&(immu->immu_qinv_lock));
509 return (DDI_FAILURE);
513 * ###########################################################################
515 * Functions exported by immu_qinv.c
517 * ###########################################################################
521 * initialize invalidation request queue structure.
524 immu_qinv_setup(list_t *listp)
526 immu_t *immu;
527 int nerr;
529 if (immu_qinv_enable == B_FALSE) {
530 return (DDI_FAILURE);
533 nerr = 0;
534 immu = list_head(listp);
535 for (; immu; immu = list_next(listp, immu)) {
536 if (qinv_setup(immu) == DDI_SUCCESS) {
537 immu->immu_qinv_setup = B_TRUE;
538 } else {
539 nerr++;
540 break;
544 return (nerr > 0 ? DDI_FAILURE : DDI_SUCCESS);
547 void
548 immu_qinv_startup(immu_t *immu)
550 qinv_t *qinv;
551 uint64_t qinv_reg_value;
553 if (immu->immu_qinv_setup == B_FALSE) {
554 return;
557 qinv = (qinv_t *)immu->immu_qinv;
558 qinv_reg_value = qinv->qinv_table.qinv_mem_paddr | qinv_iqa_qs;
559 immu_regs_qinv_enable(immu, qinv_reg_value);
560 immu->immu_flushops = &immu_qinv_flushops;
561 immu->immu_qinv_running = B_TRUE;
565 * queued invalidation interface
566 * function based context cache invalidation
568 void
569 immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
570 uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp)
572 qinv_context_common(immu, function_mask, source_id,
573 domain_id, CTT_INV_G_DEVICE);
574 qinv_wait_sync(immu, iwp);
578 * queued invalidation interface
579 * domain based context cache invalidation
581 void
582 immu_qinv_context_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
584 qinv_context_common(immu, 0, 0, domain_id, CTT_INV_G_DOMAIN);
585 qinv_wait_sync(immu, iwp);
589 * queued invalidation interface
590 * invalidation global context cache
592 void
593 immu_qinv_context_gbl(immu_t *immu, immu_inv_wait_t *iwp)
595 qinv_context_common(immu, 0, 0, 0, CTT_INV_G_GLOBAL);
596 qinv_wait_sync(immu, iwp);
600 * queued invalidation interface
601 * paged based iotlb invalidation
603 void
604 immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
605 uint64_t dvma, uint_t count, uint_t hint, immu_inv_wait_t *iwp)
607 uint_t am = 0;
608 uint_t max_am;
610 max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
612 /* choose page specified invalidation */
613 if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
614 while (am <= max_am) {
615 if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma), am) + count)
616 <= ADDR_AM_MAX(am)) {
617 qinv_iotlb_common(immu, domain_id,
618 dvma, am, hint, TLB_INV_G_PAGE);
619 break;
621 am++;
623 if (am > max_am) {
624 qinv_iotlb_common(immu, domain_id,
625 dvma, 0, hint, TLB_INV_G_DOMAIN);
628 /* choose domain invalidation */
629 } else {
630 qinv_iotlb_common(immu, domain_id, dvma,
631 0, hint, TLB_INV_G_DOMAIN);
634 qinv_wait_sync(immu, iwp);
638 * queued invalidation interface
639 * domain based iotlb invalidation
641 void
642 immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
644 qinv_iotlb_common(immu, domain_id, 0, 0, 0, TLB_INV_G_DOMAIN);
645 qinv_wait_sync(immu, iwp);
649 * queued invalidation interface
650 * global iotlb invalidation
652 void
653 immu_qinv_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp)
655 qinv_iotlb_common(immu, 0, 0, 0, 0, TLB_INV_G_GLOBAL);
656 qinv_wait_sync(immu, iwp);
659 /* queued invalidation interface -- global invalidate interrupt entry cache */
660 void
661 immu_qinv_intr_global(immu_t *immu, immu_inv_wait_t *iwp)
663 qinv_iec_common(immu, 0, 0, IEC_INV_GLOBAL);
664 qinv_wait_sync(immu, iwp);
667 /* queued invalidation interface -- invalidate single interrupt entry cache */
668 void
669 immu_qinv_intr_one_cache(immu_t *immu, uint_t iidx, immu_inv_wait_t *iwp)
671 qinv_iec_common(immu, iidx, 0, IEC_INV_INDEX);
672 qinv_wait_sync(immu, iwp);
675 /* queued invalidation interface -- invalidate interrupt entry caches */
676 void
677 immu_qinv_intr_caches(immu_t *immu, uint_t iidx, uint_t cnt,
678 immu_inv_wait_t *iwp)
680 uint_t i, mask = 0;
682 ASSERT(cnt != 0);
684 /* requested interrupt count is not a power of 2 */
685 if (!ISP2(cnt)) {
686 for (i = 0; i < cnt; i++) {
687 qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
689 qinv_wait_sync(immu, iwp);
690 return;
693 while ((2 << mask) < cnt) {
694 mask++;
697 if (mask > IMMU_ECAP_GET_MHMV(immu->immu_regs_excap)) {
698 for (i = 0; i < cnt; i++) {
699 qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
701 qinv_wait_sync(immu, iwp);
702 return;
705 qinv_iec_common(immu, iidx, mask, IEC_INV_INDEX);
707 qinv_wait_sync(immu, iwp);
710 void
711 immu_qinv_report_fault(immu_t *immu)
713 uint16_t head;
714 qinv_dsc_t *dsc;
715 qinv_t *qinv;
717 /* access qinv data */
718 mutex_enter(&(immu->immu_qinv_lock));
720 qinv = (qinv_t *)(immu->immu_qinv);
722 head = QINV_IQA_HEAD(
723 immu_regs_get64(immu, IMMU_REG_INVAL_QH));
725 dsc = (qinv_dsc_t *)(qinv->qinv_table.qinv_mem_vaddr
726 + (head * QINV_ENTRY_SIZE));
728 /* report the error */
729 ddi_err(DER_WARN, immu->immu_dip,
730 "generated a fault when fetching a descriptor from the"
731 "\tinvalidation queue, or detects that the fetched"
732 "\tdescriptor is invalid. The head register is "
733 "0x%" PRIx64
734 "\tthe type is %s",
735 head,
736 qinv_dsc_type[MIN(INV_DSC_TYPE(dsc), QINV_MAX_DSC_TYPE)]);
738 mutex_exit(&(immu->immu_qinv_lock));