2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qapi/error.h"
26 #include "hw/sysbus.h"
27 #include "intel_iommu_internal.h"
28 #include "hw/pci/pci.h"
29 #include "hw/pci/pci_bus.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/i386/pc.h"
32 #include "hw/i386/apic-msidef.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/dma.h"
37 #include "sysemu/sysemu.h"
38 #include "hw/i386/apic_internal.h"
39 #include "kvm/kvm_i386.h"
40 #include "migration/vmstate.h"
43 /* context entry operations */
44 #define VTD_CE_GET_RID2PASID(ce) \
45 ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK)
46 #define VTD_CE_GET_PASID_DIR_TABLE(ce) \
47 ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK)
50 #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
51 #define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
54 * PCI bus number (or SID) is not reliable since the device is usaully
55 * initialized before guest can configure the PCI bridge
56 * (SECONDARY_BUS_NUMBER).
64 /* bus/devfn is PCI device's real BDF not the aliased one */
70 struct vtd_iotlb_key
{
77 static void vtd_address_space_refresh_all(IntelIOMMUState
*s
);
78 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
);
80 static void vtd_panic_require_caching_mode(void)
82 error_report("We need to set caching-mode=on for intel-iommu to enable "
83 "device assignment with IOMMU protection.");
87 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
88 uint64_t wmask
, uint64_t w1cmask
)
90 stq_le_p(&s
->csr
[addr
], val
);
91 stq_le_p(&s
->wmask
[addr
], wmask
);
92 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
95 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
97 stq_le_p(&s
->womask
[addr
], mask
);
100 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
101 uint32_t wmask
, uint32_t w1cmask
)
103 stl_le_p(&s
->csr
[addr
], val
);
104 stl_le_p(&s
->wmask
[addr
], wmask
);
105 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
108 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
110 stl_le_p(&s
->womask
[addr
], mask
);
113 /* "External" get/set operations */
114 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
116 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
117 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
118 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
119 stq_le_p(&s
->csr
[addr
],
120 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
123 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
125 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
126 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
127 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
128 stl_le_p(&s
->csr
[addr
],
129 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
132 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
134 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
135 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
136 return val
& ~womask
;
139 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
141 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
142 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
143 return val
& ~womask
;
146 /* "Internal" get/set operations */
147 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
149 return ldq_le_p(&s
->csr
[addr
]);
152 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
154 return ldl_le_p(&s
->csr
[addr
]);
157 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
159 stq_le_p(&s
->csr
[addr
], val
);
162 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
163 uint32_t clear
, uint32_t mask
)
165 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
166 stl_le_p(&s
->csr
[addr
], new_val
);
170 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
171 uint64_t clear
, uint64_t mask
)
173 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
174 stq_le_p(&s
->csr
[addr
], new_val
);
178 static inline void vtd_iommu_lock(IntelIOMMUState
*s
)
180 qemu_mutex_lock(&s
->iommu_lock
);
183 static inline void vtd_iommu_unlock(IntelIOMMUState
*s
)
185 qemu_mutex_unlock(&s
->iommu_lock
);
188 static void vtd_update_scalable_state(IntelIOMMUState
*s
)
190 uint64_t val
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
192 if (s
->scalable_mode
) {
193 s
->root_scalable
= val
& VTD_RTADDR_SMT
;
197 static void vtd_update_iq_dw(IntelIOMMUState
*s
)
199 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
201 if (s
->ecap
& VTD_ECAP_SMTS
&&
202 val
& VTD_IQA_DW_MASK
) {
209 /* Whether the address space needs to notify new mappings */
210 static inline gboolean
vtd_as_has_map_notifier(VTDAddressSpace
*as
)
212 return as
->notifier_flags
& IOMMU_NOTIFIER_MAP
;
215 /* GHashTable functions */
216 static gboolean
vtd_iotlb_equal(gconstpointer v1
, gconstpointer v2
)
218 const struct vtd_iotlb_key
*key1
= v1
;
219 const struct vtd_iotlb_key
*key2
= v2
;
221 return key1
->sid
== key2
->sid
&&
222 key1
->pasid
== key2
->pasid
&&
223 key1
->level
== key2
->level
&&
224 key1
->gfn
== key2
->gfn
;
227 static guint
vtd_iotlb_hash(gconstpointer v
)
229 const struct vtd_iotlb_key
*key
= v
;
230 uint64_t hash64
= key
->gfn
| ((uint64_t)(key
->sid
) << VTD_IOTLB_SID_SHIFT
) |
231 (uint64_t)(key
->level
- 1) << VTD_IOTLB_LVL_SHIFT
|
232 (uint64_t)(key
->pasid
) << VTD_IOTLB_PASID_SHIFT
;
234 return (guint
)((hash64
>> 32) ^ (hash64
& 0xffffffffU
));
237 static gboolean
vtd_as_equal(gconstpointer v1
, gconstpointer v2
)
239 const struct vtd_as_key
*key1
= v1
;
240 const struct vtd_as_key
*key2
= v2
;
242 return (key1
->bus
== key2
->bus
) && (key1
->devfn
== key2
->devfn
) &&
243 (key1
->pasid
== key2
->pasid
);
247 * Note that we use pointer to PCIBus as the key, so hashing/shifting
248 * based on the pointer value is intended. Note that we deal with
249 * collisions through vtd_as_equal().
251 static guint
vtd_as_hash(gconstpointer v
)
253 const struct vtd_as_key
*key
= v
;
254 guint value
= (guint
)(uintptr_t)key
->bus
;
256 return (guint
)(value
<< 8 | key
->devfn
);
259 /* Same implementation as vtd_as_hash() */
260 static guint
vtd_hiod_hash(gconstpointer v
)
262 return vtd_as_hash(v
);
265 static gboolean
vtd_hiod_equal(gconstpointer v1
, gconstpointer v2
)
267 const struct vtd_hiod_key
*key1
= v1
;
268 const struct vtd_hiod_key
*key2
= v2
;
270 return (key1
->bus
== key2
->bus
) && (key1
->devfn
== key2
->devfn
);
273 static void vtd_hiod_destroy(gpointer v
)
278 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
281 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
282 uint16_t domain_id
= *(uint16_t *)user_data
;
283 return entry
->domain_id
== domain_id
;
286 /* The shift of an addr for a certain level of paging structure */
287 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
290 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
293 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level
)
295 return ~((1ULL << vtd_slpt_level_shift(level
)) - 1);
298 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
301 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
302 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
303 uint64_t gfn
= (info
->addr
>> VTD_PAGE_SHIFT_4K
) & info
->mask
;
304 uint64_t gfn_tlb
= (info
->addr
& entry
->mask
) >> VTD_PAGE_SHIFT_4K
;
305 return (entry
->domain_id
== info
->domain_id
) &&
306 (((entry
->gfn
& info
->mask
) == gfn
) ||
307 (entry
->gfn
== gfn_tlb
));
310 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
311 * IntelIOMMUState to 1. Must be called with IOMMU lock held.
313 static void vtd_reset_context_cache_locked(IntelIOMMUState
*s
)
315 VTDAddressSpace
*vtd_as
;
316 GHashTableIter as_it
;
318 trace_vtd_context_cache_reset();
320 g_hash_table_iter_init(&as_it
, s
->vtd_address_spaces
);
322 while (g_hash_table_iter_next(&as_it
, NULL
, (void **)&vtd_as
)) {
323 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
325 s
->context_cache_gen
= 1;
328 /* Must be called with IOMMU lock held. */
329 static void vtd_reset_iotlb_locked(IntelIOMMUState
*s
)
332 g_hash_table_remove_all(s
->iotlb
);
335 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
338 vtd_reset_iotlb_locked(s
);
342 static void vtd_reset_caches(IntelIOMMUState
*s
)
345 vtd_reset_iotlb_locked(s
);
346 vtd_reset_context_cache_locked(s
);
350 static uint64_t vtd_get_iotlb_gfn(hwaddr addr
, uint32_t level
)
352 return (addr
& vtd_slpt_level_page_mask(level
)) >> VTD_PAGE_SHIFT_4K
;
355 /* Must be called with IOMMU lock held */
356 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
357 uint32_t pasid
, hwaddr addr
)
359 struct vtd_iotlb_key key
;
360 VTDIOTLBEntry
*entry
;
363 for (level
= VTD_SL_PT_LEVEL
; level
< VTD_SL_PML4_LEVEL
; level
++) {
364 key
.gfn
= vtd_get_iotlb_gfn(addr
, level
);
368 entry
= g_hash_table_lookup(s
->iotlb
, &key
);
378 /* Must be with IOMMU lock held */
379 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
380 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
381 uint8_t access_flags
, uint32_t level
,
384 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
385 struct vtd_iotlb_key
*key
= g_malloc(sizeof(*key
));
386 uint64_t gfn
= vtd_get_iotlb_gfn(addr
, level
);
388 trace_vtd_iotlb_page_update(source_id
, addr
, slpte
, domain_id
);
389 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
390 trace_vtd_iotlb_reset("iotlb exceeds size limit");
391 vtd_reset_iotlb_locked(s
);
395 entry
->domain_id
= domain_id
;
396 entry
->slpte
= slpte
;
397 entry
->access_flags
= access_flags
;
398 entry
->mask
= vtd_slpt_level_page_mask(level
);
399 entry
->pasid
= pasid
;
402 key
->sid
= source_id
;
406 g_hash_table_replace(s
->iotlb
, key
, entry
);
409 /* Given the reg addr of both the message data and address, generate an
412 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
413 hwaddr mesg_data_reg
)
417 assert(mesg_data_reg
< DMAR_REG_SIZE
);
418 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
420 msi
.address
= vtd_get_long_raw(s
, mesg_addr_reg
);
421 msi
.data
= vtd_get_long_raw(s
, mesg_data_reg
);
423 trace_vtd_irq_generate(msi
.address
, msi
.data
);
425 apic_get_class(NULL
)->send_msi(&msi
);
428 /* Generate a fault event to software via MSI if conditions are met.
429 * Notice that the value of FSTS_REG being passed to it should be the one
432 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
434 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
435 pre_fsts
& VTD_FSTS_IQE
) {
436 error_report_once("There are previous interrupt conditions "
437 "to be serviced by software, fault event "
441 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
442 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
443 error_report_once("Interrupt Mask set, irq is not generated");
445 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
446 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
450 /* Check if the Fault (F) field of the Fault Recording Register referenced by
453 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
455 /* Each reg is 128-bit */
456 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
457 addr
+= 8; /* Access the high 64-bit half */
459 assert(index
< DMAR_FRCD_REG_NR
);
461 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
464 /* Update the PPF field of Fault Status Register.
465 * Should be called whenever change the F field of any fault recording
468 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
471 uint32_t ppf_mask
= 0;
473 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
474 if (vtd_is_frcd_set(s
, i
)) {
475 ppf_mask
= VTD_FSTS_PPF
;
479 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
480 trace_vtd_fsts_ppf(!!ppf_mask
);
483 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
485 /* Each reg is 128-bit */
486 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
487 addr
+= 8; /* Access the high 64-bit half */
489 assert(index
< DMAR_FRCD_REG_NR
);
491 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
492 vtd_update_fsts_ppf(s
);
495 /* Must not update F field now, should be done later */
496 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
497 uint64_t hi
, uint64_t lo
)
499 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
501 assert(index
< DMAR_FRCD_REG_NR
);
503 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
504 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
506 trace_vtd_frr_new(index
, hi
, lo
);
509 /* Try to collapse multiple pending faults from the same requester */
510 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
514 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
516 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
517 frcd_reg
= vtd_get_quad_raw(s
, addr
);
518 if ((frcd_reg
& VTD_FRCD_F
) &&
519 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
522 addr
+= 16; /* 128-bit for each */
527 /* Log and report an DMAR (address translation) fault to software */
528 static void vtd_report_frcd_fault(IntelIOMMUState
*s
, uint64_t source_id
,
529 uint64_t hi
, uint64_t lo
)
531 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
533 if (fsts_reg
& VTD_FSTS_PFO
) {
534 error_report_once("New fault is not recorded due to "
535 "Primary Fault Overflow");
539 if (vtd_try_collapse_fault(s
, source_id
)) {
540 error_report_once("New fault is not recorded due to "
541 "compression of faults");
545 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
546 error_report_once("Next Fault Recording Reg is used, "
547 "new fault is not recorded, set PFO field");
548 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
552 vtd_record_frcd(s
, s
->next_frcd_reg
, hi
, lo
);
554 if (fsts_reg
& VTD_FSTS_PPF
) {
555 error_report_once("There are pending faults already, "
556 "fault event is not generated");
557 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
559 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
560 s
->next_frcd_reg
= 0;
563 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
564 VTD_FSTS_FRI(s
->next_frcd_reg
));
565 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
567 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
568 s
->next_frcd_reg
= 0;
570 /* This case actually cause the PPF to be Set.
571 * So generate fault event (interrupt).
573 vtd_generate_fault_event(s
, fsts_reg
);
577 /* Log and report an DMAR (address translation) fault to software */
578 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
579 hwaddr addr
, VTDFaultReason fault
,
580 bool is_write
, bool is_pasid
,
585 assert(fault
< VTD_FR_MAX
);
587 trace_vtd_dmar_fault(source_id
, fault
, addr
, is_write
);
589 lo
= VTD_FRCD_FI(addr
);
590 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
) |
591 VTD_FRCD_PV(pasid
) | VTD_FRCD_PP(is_pasid
);
596 vtd_report_frcd_fault(s
, source_id
, hi
, lo
);
600 static void vtd_report_ir_fault(IntelIOMMUState
*s
, uint64_t source_id
,
601 VTDFaultReason fault
, uint16_t index
)
605 lo
= VTD_FRCD_IR_IDX(index
);
606 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
608 vtd_report_frcd_fault(s
, source_id
, hi
, lo
);
611 /* Handle Invalidation Queue Errors of queued invalidation interface error
614 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
616 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
618 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
619 vtd_generate_fault_event(s
, fsts_reg
);
622 /* Set the IWC field and try to generate an invalidation completion interrupt */
623 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
625 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
626 trace_vtd_inv_desc_wait_irq("One pending, skip current");
629 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
630 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
631 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
632 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
633 "new event not generated");
636 /* Generate the interrupt event */
637 trace_vtd_inv_desc_wait_irq("Generating complete event");
638 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
639 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
643 static inline bool vtd_root_entry_present(IntelIOMMUState
*s
,
647 if (s
->root_scalable
&& devfn
> UINT8_MAX
/ 2) {
648 return re
->hi
& VTD_ROOT_ENTRY_P
;
651 return re
->lo
& VTD_ROOT_ENTRY_P
;
654 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
659 addr
= s
->root
+ index
* sizeof(*re
);
660 if (dma_memory_read(&address_space_memory
, addr
,
661 re
, sizeof(*re
), MEMTXATTRS_UNSPECIFIED
)) {
663 return -VTD_FR_ROOT_TABLE_INV
;
665 re
->lo
= le64_to_cpu(re
->lo
);
666 re
->hi
= le64_to_cpu(re
->hi
);
670 static inline bool vtd_ce_present(VTDContextEntry
*context
)
672 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
675 static int vtd_get_context_entry_from_root(IntelIOMMUState
*s
,
680 dma_addr_t addr
, ce_size
;
682 /* we have checked that root entry is present */
683 ce_size
= s
->root_scalable
? VTD_CTX_ENTRY_SCALABLE_SIZE
:
684 VTD_CTX_ENTRY_LEGACY_SIZE
;
686 if (s
->root_scalable
&& index
> UINT8_MAX
/ 2) {
687 index
= index
& (~VTD_DEVFN_CHECK_MASK
);
688 addr
= re
->hi
& VTD_ROOT_ENTRY_CTP
;
690 addr
= re
->lo
& VTD_ROOT_ENTRY_CTP
;
693 addr
= addr
+ index
* ce_size
;
694 if (dma_memory_read(&address_space_memory
, addr
,
695 ce
, ce_size
, MEMTXATTRS_UNSPECIFIED
)) {
696 return -VTD_FR_CONTEXT_TABLE_INV
;
699 ce
->lo
= le64_to_cpu(ce
->lo
);
700 ce
->hi
= le64_to_cpu(ce
->hi
);
701 if (ce_size
== VTD_CTX_ENTRY_SCALABLE_SIZE
) {
702 ce
->val
[2] = le64_to_cpu(ce
->val
[2]);
703 ce
->val
[3] = le64_to_cpu(ce
->val
[3]);
708 static inline dma_addr_t
vtd_ce_get_slpt_base(VTDContextEntry
*ce
)
710 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
713 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
, uint8_t aw
)
715 return slpte
& VTD_SL_PT_BASE_ADDR_MASK(aw
);
718 /* Whether the pte indicates the address of the page frame */
719 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
721 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
724 /* Get the content of a spte located in @base_addr[@index] */
725 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
729 assert(index
< VTD_SL_PT_ENTRY_NR
);
731 if (dma_memory_read(&address_space_memory
,
732 base_addr
+ index
* sizeof(slpte
),
733 &slpte
, sizeof(slpte
), MEMTXATTRS_UNSPECIFIED
)) {
734 slpte
= (uint64_t)-1;
737 slpte
= le64_to_cpu(slpte
);
741 /* Given an iova and the level of paging structure, return the offset
744 static inline uint32_t vtd_iova_level_offset(uint64_t iova
, uint32_t level
)
746 return (iova
>> vtd_slpt_level_shift(level
)) &
747 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
750 /* Check Capability Register to see if the @level of page-table is supported */
751 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
753 return VTD_CAP_SAGAW_MASK
& s
->cap
&
754 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
757 /* Return true if check passed, otherwise false */
758 static inline bool vtd_pe_type_check(X86IOMMUState
*x86_iommu
,
761 switch (VTD_PE_GET_TYPE(pe
)) {
762 case VTD_SM_PASID_ENTRY_FLT
:
763 case VTD_SM_PASID_ENTRY_SLT
:
764 case VTD_SM_PASID_ENTRY_NESTED
:
766 case VTD_SM_PASID_ENTRY_PT
:
767 if (!x86_iommu
->pt_supported
) {
778 static inline bool vtd_pdire_present(VTDPASIDDirEntry
*pdire
)
780 return pdire
->val
& 1;
784 * Caller of this function should check present bit if wants
785 * to use pdir entry for further usage except for fpd bit check.
787 static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base
,
789 VTDPASIDDirEntry
*pdire
)
792 dma_addr_t addr
, entry_size
;
794 index
= VTD_PASID_DIR_INDEX(pasid
);
795 entry_size
= VTD_PASID_DIR_ENTRY_SIZE
;
796 addr
= pasid_dir_base
+ index
* entry_size
;
797 if (dma_memory_read(&address_space_memory
, addr
,
798 pdire
, entry_size
, MEMTXATTRS_UNSPECIFIED
)) {
799 return -VTD_FR_PASID_TABLE_INV
;
802 pdire
->val
= le64_to_cpu(pdire
->val
);
807 static inline bool vtd_pe_present(VTDPASIDEntry
*pe
)
809 return pe
->val
[0] & VTD_PASID_ENTRY_P
;
812 static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState
*s
,
818 dma_addr_t entry_size
;
819 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
821 index
= VTD_PASID_TABLE_INDEX(pasid
);
822 entry_size
= VTD_PASID_ENTRY_SIZE
;
823 addr
= addr
+ index
* entry_size
;
824 if (dma_memory_read(&address_space_memory
, addr
,
825 pe
, entry_size
, MEMTXATTRS_UNSPECIFIED
)) {
826 return -VTD_FR_PASID_TABLE_INV
;
828 for (size_t i
= 0; i
< ARRAY_SIZE(pe
->val
); i
++) {
829 pe
->val
[i
] = le64_to_cpu(pe
->val
[i
]);
832 /* Do translation type check */
833 if (!vtd_pe_type_check(x86_iommu
, pe
)) {
834 return -VTD_FR_PASID_TABLE_INV
;
837 if (!vtd_is_level_supported(s
, VTD_PE_GET_LEVEL(pe
))) {
838 return -VTD_FR_PASID_TABLE_INV
;
845 * Caller of this function should check present bit if wants
846 * to use pasid entry for further usage except for fpd bit check.
848 static int vtd_get_pe_from_pdire(IntelIOMMUState
*s
,
850 VTDPASIDDirEntry
*pdire
,
853 dma_addr_t addr
= pdire
->val
& VTD_PASID_TABLE_BASE_ADDR_MASK
;
855 return vtd_get_pe_in_pasid_leaf_table(s
, pasid
, addr
, pe
);
859 * This function gets a pasid entry from a specified pasid
860 * table (includes dir and leaf table) with a specified pasid.
861 * Sanity check should be done to ensure return a present
862 * pasid entry to caller.
864 static int vtd_get_pe_from_pasid_table(IntelIOMMUState
*s
,
865 dma_addr_t pasid_dir_base
,
870 VTDPASIDDirEntry pdire
;
872 ret
= vtd_get_pdire_from_pdir_table(pasid_dir_base
,
878 if (!vtd_pdire_present(&pdire
)) {
879 return -VTD_FR_PASID_TABLE_INV
;
882 ret
= vtd_get_pe_from_pdire(s
, pasid
, &pdire
, pe
);
887 if (!vtd_pe_present(pe
)) {
888 return -VTD_FR_PASID_TABLE_INV
;
894 static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState
*s
,
899 dma_addr_t pasid_dir_base
;
902 if (pasid
== PCI_NO_PASID
) {
903 pasid
= VTD_CE_GET_RID2PASID(ce
);
905 pasid_dir_base
= VTD_CE_GET_PASID_DIR_TABLE(ce
);
906 ret
= vtd_get_pe_from_pasid_table(s
, pasid_dir_base
, pasid
, pe
);
911 static int vtd_ce_get_pasid_fpd(IntelIOMMUState
*s
,
917 dma_addr_t pasid_dir_base
;
918 VTDPASIDDirEntry pdire
;
921 if (pasid
== PCI_NO_PASID
) {
922 pasid
= VTD_CE_GET_RID2PASID(ce
);
924 pasid_dir_base
= VTD_CE_GET_PASID_DIR_TABLE(ce
);
927 * No present bit check since fpd is meaningful even
928 * if the present bit is clear.
930 ret
= vtd_get_pdire_from_pdir_table(pasid_dir_base
, pasid
, &pdire
);
935 if (pdire
.val
& VTD_PASID_DIR_FPD
) {
940 if (!vtd_pdire_present(&pdire
)) {
941 return -VTD_FR_PASID_TABLE_INV
;
945 * No present bit check since fpd is meaningful even
946 * if the present bit is clear.
948 ret
= vtd_get_pe_from_pdire(s
, pasid
, &pdire
, &pe
);
953 if (pe
.val
[0] & VTD_PASID_ENTRY_FPD
) {
960 /* Get the page-table level that hardware should use for the second-level
961 * page-table walk from the Address Width field of context-entry.
963 static inline uint32_t vtd_ce_get_level(VTDContextEntry
*ce
)
965 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
968 static uint32_t vtd_get_iova_level(IntelIOMMUState
*s
,
974 if (s
->root_scalable
) {
975 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
, pasid
);
976 return VTD_PE_GET_LEVEL(&pe
);
979 return vtd_ce_get_level(ce
);
982 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry
*ce
)
984 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
987 static uint32_t vtd_get_iova_agaw(IntelIOMMUState
*s
,
993 if (s
->root_scalable
) {
994 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
, pasid
);
995 return 30 + ((pe
.val
[0] >> 2) & VTD_SM_PASID_ENTRY_AW
) * 9;
998 return vtd_ce_get_agaw(ce
);
1001 static inline uint32_t vtd_ce_get_type(VTDContextEntry
*ce
)
1003 return ce
->lo
& VTD_CONTEXT_ENTRY_TT
;
1006 /* Only for Legacy Mode. Return true if check passed, otherwise false */
1007 static inline bool vtd_ce_type_check(X86IOMMUState
*x86_iommu
,
1008 VTDContextEntry
*ce
)
1010 switch (vtd_ce_get_type(ce
)) {
1011 case VTD_CONTEXT_TT_MULTI_LEVEL
:
1012 /* Always supported */
1014 case VTD_CONTEXT_TT_DEV_IOTLB
:
1015 if (!x86_iommu
->dt_supported
) {
1016 error_report_once("%s: DT specified but not supported", __func__
);
1020 case VTD_CONTEXT_TT_PASS_THROUGH
:
1021 if (!x86_iommu
->pt_supported
) {
1022 error_report_once("%s: PT specified but not supported", __func__
);
1028 error_report_once("%s: unknown ce type: %"PRIu32
, __func__
,
1029 vtd_ce_get_type(ce
));
1035 static inline uint64_t vtd_iova_limit(IntelIOMMUState
*s
,
1036 VTDContextEntry
*ce
, uint8_t aw
,
1039 uint32_t ce_agaw
= vtd_get_iova_agaw(s
, ce
, pasid
);
1040 return 1ULL << MIN(ce_agaw
, aw
);
1043 /* Return true if IOVA passes range check, otherwise false. */
1044 static inline bool vtd_iova_range_check(IntelIOMMUState
*s
,
1045 uint64_t iova
, VTDContextEntry
*ce
,
1046 uint8_t aw
, uint32_t pasid
)
1049 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
1050 * in CAP_REG and AW in context-entry.
1052 return !(iova
& ~(vtd_iova_limit(s
, ce
, aw
, pasid
) - 1));
1055 static dma_addr_t
vtd_get_iova_pgtbl_base(IntelIOMMUState
*s
,
1056 VTDContextEntry
*ce
,
1061 if (s
->root_scalable
) {
1062 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
, pasid
);
1063 return pe
.val
[0] & VTD_SM_PASID_ENTRY_SLPTPTR
;
1066 return vtd_ce_get_slpt_base(ce
);
1070 * Rsvd field masks for spte:
1071 * vtd_spte_rsvd 4k pages
1072 * vtd_spte_rsvd_large large pages
1074 * We support only 3-level and 4-level page tables (see vtd_init() which
1075 * sets only VTD_CAP_SAGAW_39bit and maybe VTD_CAP_SAGAW_48bit bits in s->cap).
1077 #define VTD_SPTE_RSVD_LEN 5
1078 static uint64_t vtd_spte_rsvd
[VTD_SPTE_RSVD_LEN
];
1079 static uint64_t vtd_spte_rsvd_large
[VTD_SPTE_RSVD_LEN
];
1081 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
1086 * We should have caught a guest-mis-programmed level earlier,
1087 * via vtd_is_level_supported.
1089 assert(level
< VTD_SPTE_RSVD_LEN
);
1091 * Zero level doesn't exist. The smallest level is VTD_SL_PT_LEVEL=1 and
1092 * checked by vtd_is_last_slpte().
1096 if ((level
== VTD_SL_PD_LEVEL
|| level
== VTD_SL_PDP_LEVEL
) &&
1097 (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
)) {
1099 rsvd_mask
= vtd_spte_rsvd_large
[level
];
1101 rsvd_mask
= vtd_spte_rsvd
[level
];
1104 return slpte
& rsvd_mask
;
1107 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
1108 * of the translation, can be used for deciding the size of large page.
1110 static int vtd_iova_to_slpte(IntelIOMMUState
*s
, VTDContextEntry
*ce
,
1111 uint64_t iova
, bool is_write
,
1112 uint64_t *slptep
, uint32_t *slpte_level
,
1113 bool *reads
, bool *writes
, uint8_t aw_bits
,
1116 dma_addr_t addr
= vtd_get_iova_pgtbl_base(s
, ce
, pasid
);
1117 uint32_t level
= vtd_get_iova_level(s
, ce
, pasid
);
1120 uint64_t access_right_check
;
1121 uint64_t xlat
, size
;
1123 if (!vtd_iova_range_check(s
, iova
, ce
, aw_bits
, pasid
)) {
1124 error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64
","
1125 "pasid=0x%" PRIx32
")", __func__
, iova
, pasid
);
1126 return -VTD_FR_ADDR_BEYOND_MGAW
;
1129 /* FIXME: what is the Atomics request here? */
1130 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
1133 offset
= vtd_iova_level_offset(iova
, level
);
1134 slpte
= vtd_get_slpte(addr
, offset
);
1136 if (slpte
== (uint64_t)-1) {
1137 error_report_once("%s: detected read error on DMAR slpte "
1138 "(iova=0x%" PRIx64
", pasid=0x%" PRIx32
")",
1139 __func__
, iova
, pasid
);
1140 if (level
== vtd_get_iova_level(s
, ce
, pasid
)) {
1141 /* Invalid programming of context-entry */
1142 return -VTD_FR_CONTEXT_ENTRY_INV
;
1144 return -VTD_FR_PAGING_ENTRY_INV
;
1147 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
1148 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
1149 if (!(slpte
& access_right_check
)) {
1150 error_report_once("%s: detected slpte permission error "
1151 "(iova=0x%" PRIx64
", level=0x%" PRIx32
", "
1152 "slpte=0x%" PRIx64
", write=%d, pasid=0x%"
1153 PRIx32
")", __func__
, iova
, level
,
1154 slpte
, is_write
, pasid
);
1155 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
1157 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
1158 error_report_once("%s: detected splte reserve non-zero "
1159 "iova=0x%" PRIx64
", level=0x%" PRIx32
1160 "slpte=0x%" PRIx64
", pasid=0x%" PRIX32
")",
1161 __func__
, iova
, level
, slpte
, pasid
);
1162 return -VTD_FR_PAGING_ENTRY_RSVD
;
1165 if (vtd_is_last_slpte(slpte
, level
)) {
1167 *slpte_level
= level
;
1170 addr
= vtd_get_slpte_addr(slpte
, aw_bits
);
1174 xlat
= vtd_get_slpte_addr(*slptep
, aw_bits
);
1175 size
= ~vtd_slpt_level_page_mask(level
) + 1;
1178 * From VT-d spec 3.14: Untranslated requests and translation
1179 * requests that result in an address in the interrupt range will be
1180 * blocked with condition code LGN.4 or SGN.8.
1182 if ((xlat
> VTD_INTERRUPT_ADDR_LAST
||
1183 xlat
+ size
- 1 < VTD_INTERRUPT_ADDR_FIRST
)) {
1186 error_report_once("%s: xlat address is in interrupt range "
1187 "(iova=0x%" PRIx64
", level=0x%" PRIx32
", "
1188 "slpte=0x%" PRIx64
", write=%d, "
1189 "xlat=0x%" PRIx64
", size=0x%" PRIx64
", "
1190 "pasid=0x%" PRIx32
")",
1191 __func__
, iova
, level
, slpte
, is_write
,
1193 return s
->scalable_mode
? -VTD_FR_SM_INTERRUPT_ADDR
:
1194 -VTD_FR_INTERRUPT_ADDR
;
1198 typedef int (*vtd_page_walk_hook
)(const IOMMUTLBEvent
*event
, void *private);
1201 * Constant information used during page walking
1203 * @hook_fn: hook func to be called when detected page
1204 * @private: private data to be passed into hook func
1205 * @notify_unmap: whether we should notify invalid entries
1206 * @as: VT-d address space of the device
1207 * @aw: maximum address width
1208 * @domain: domain ID of the page walk
1211 VTDAddressSpace
*as
;
1212 vtd_page_walk_hook hook_fn
;
1217 } vtd_page_walk_info
;
1219 static int vtd_page_walk_one(IOMMUTLBEvent
*event
, vtd_page_walk_info
*info
)
1221 VTDAddressSpace
*as
= info
->as
;
1222 vtd_page_walk_hook hook_fn
= info
->hook_fn
;
1223 void *private = info
->private;
1224 IOMMUTLBEntry
*entry
= &event
->entry
;
1226 .iova
= entry
->iova
,
1227 .size
= entry
->addr_mask
,
1228 .translated_addr
= entry
->translated_addr
,
1229 .perm
= entry
->perm
,
1231 const DMAMap
*mapped
= iova_tree_find(as
->iova_tree
, &target
);
1233 if (event
->type
== IOMMU_NOTIFIER_UNMAP
&& !info
->notify_unmap
) {
1234 trace_vtd_page_walk_one_skip_unmap(entry
->iova
, entry
->addr_mask
);
1240 /* Update local IOVA mapped ranges */
1241 if (event
->type
== IOMMU_NOTIFIER_MAP
) {
1243 /* If it's exactly the same translation, skip */
1244 if (!memcmp(mapped
, &target
, sizeof(target
))) {
1245 trace_vtd_page_walk_one_skip_map(entry
->iova
, entry
->addr_mask
,
1246 entry
->translated_addr
);
1250 * Translation changed. Normally this should not
1251 * happen, but it can happen when with buggy guest
1252 * OSes. Note that there will be a small window that
1253 * we don't have map at all. But that's the best
1254 * effort we can do. The ideal way to emulate this is
1255 * atomically modify the PTE to follow what has
1256 * changed, but we can't. One example is that vfio
1257 * driver only has VFIO_IOMMU_[UN]MAP_DMA but no
1258 * interface to modify a mapping (meanwhile it seems
1259 * meaningless to even provide one). Anyway, let's
1260 * mark this as a TODO in case one day we'll have
1261 * a better solution.
1263 IOMMUAccessFlags cache_perm
= entry
->perm
;
1266 /* Emulate an UNMAP */
1267 event
->type
= IOMMU_NOTIFIER_UNMAP
;
1268 entry
->perm
= IOMMU_NONE
;
1269 trace_vtd_page_walk_one(info
->domain_id
,
1271 entry
->translated_addr
,
1274 ret
= hook_fn(event
, private);
1278 /* Drop any existing mapping */
1279 iova_tree_remove(as
->iova_tree
, target
);
1280 /* Recover the correct type */
1281 event
->type
= IOMMU_NOTIFIER_MAP
;
1282 entry
->perm
= cache_perm
;
1285 iova_tree_insert(as
->iova_tree
, &target
);
1288 /* Skip since we didn't map this range at all */
1289 trace_vtd_page_walk_one_skip_unmap(entry
->iova
, entry
->addr_mask
);
1292 iova_tree_remove(as
->iova_tree
, target
);
1295 trace_vtd_page_walk_one(info
->domain_id
, entry
->iova
,
1296 entry
->translated_addr
, entry
->addr_mask
,
1298 return hook_fn(event
, private);
1302 * vtd_page_walk_level - walk over specific level for IOVA range
1304 * @addr: base GPA addr to start the walk
1305 * @start: IOVA range start address
1306 * @end: IOVA range end address (start <= addr < end)
1307 * @read: whether parent level has read permission
1308 * @write: whether parent level has write permission
1309 * @info: constant information for the page walk
1311 static int vtd_page_walk_level(dma_addr_t addr
, uint64_t start
,
1312 uint64_t end
, uint32_t level
, bool read
,
1313 bool write
, vtd_page_walk_info
*info
)
1315 bool read_cur
, write_cur
, entry_valid
;
1318 uint64_t subpage_size
, subpage_mask
;
1319 IOMMUTLBEvent event
;
1320 uint64_t iova
= start
;
1324 trace_vtd_page_walk_level(addr
, level
, start
, end
);
1326 subpage_size
= 1ULL << vtd_slpt_level_shift(level
);
1327 subpage_mask
= vtd_slpt_level_page_mask(level
);
1329 while (iova
< end
) {
1330 iova_next
= (iova
& subpage_mask
) + subpage_size
;
1332 offset
= vtd_iova_level_offset(iova
, level
);
1333 slpte
= vtd_get_slpte(addr
, offset
);
1335 if (slpte
== (uint64_t)-1) {
1336 trace_vtd_page_walk_skip_read(iova
, iova_next
);
1340 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
1341 trace_vtd_page_walk_skip_reserve(iova
, iova_next
);
1345 /* Permissions are stacked with parents' */
1346 read_cur
= read
&& (slpte
& VTD_SL_R
);
1347 write_cur
= write
&& (slpte
& VTD_SL_W
);
1350 * As long as we have either read/write permission, this is a
1351 * valid entry. The rule works for both page entries and page
1354 entry_valid
= read_cur
| write_cur
;
1356 if (!vtd_is_last_slpte(slpte
, level
) && entry_valid
) {
1358 * This is a valid PDE (or even bigger than PDE). We need
1359 * to walk one further level.
1361 ret
= vtd_page_walk_level(vtd_get_slpte_addr(slpte
, info
->aw
),
1362 iova
, MIN(iova_next
, end
), level
- 1,
1363 read_cur
, write_cur
, info
);
1366 * This means we are either:
1368 * (1) the real page entry (either 4K page, or huge page)
1369 * (2) the whole range is invalid
1371 * In either case, we send an IOTLB notification down.
1373 event
.entry
.target_as
= &address_space_memory
;
1374 event
.entry
.iova
= iova
& subpage_mask
;
1375 event
.entry
.perm
= IOMMU_ACCESS_FLAG(read_cur
, write_cur
);
1376 event
.entry
.addr_mask
= ~subpage_mask
;
1377 /* NOTE: this is only meaningful if entry_valid == true */
1378 event
.entry
.translated_addr
= vtd_get_slpte_addr(slpte
, info
->aw
);
1379 event
.type
= event
.entry
.perm
? IOMMU_NOTIFIER_MAP
:
1380 IOMMU_NOTIFIER_UNMAP
;
1381 ret
= vtd_page_walk_one(&event
, info
);
1396 * vtd_page_walk - walk specific IOVA range, and call the hook
1398 * @s: intel iommu state
1399 * @ce: context entry to walk upon
1400 * @start: IOVA address to start the walk
1401 * @end: IOVA range end address (start <= addr < end)
1402 * @info: page walking information struct
1404 static int vtd_page_walk(IntelIOMMUState
*s
, VTDContextEntry
*ce
,
1405 uint64_t start
, uint64_t end
,
1406 vtd_page_walk_info
*info
,
1409 dma_addr_t addr
= vtd_get_iova_pgtbl_base(s
, ce
, pasid
);
1410 uint32_t level
= vtd_get_iova_level(s
, ce
, pasid
);
1412 if (!vtd_iova_range_check(s
, start
, ce
, info
->aw
, pasid
)) {
1413 return -VTD_FR_ADDR_BEYOND_MGAW
;
1416 if (!vtd_iova_range_check(s
, end
, ce
, info
->aw
, pasid
)) {
1417 /* Fix end so that it reaches the maximum */
1418 end
= vtd_iova_limit(s
, ce
, info
->aw
, pasid
);
1421 return vtd_page_walk_level(addr
, start
, end
, level
, true, true, info
);
1424 static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState
*s
,
1427 /* Legacy Mode reserved bits check */
1428 if (!s
->root_scalable
&&
1429 (re
->hi
|| (re
->lo
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
))))
1432 /* Scalable Mode reserved bits check */
1433 if (s
->root_scalable
&&
1434 ((re
->lo
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
)) ||
1435 (re
->hi
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
))))
1441 error_report_once("%s: invalid root entry: hi=0x%"PRIx64
1443 __func__
, re
->hi
, re
->lo
);
1444 return -VTD_FR_ROOT_ENTRY_RSVD
;
1447 static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState
*s
,
1448 VTDContextEntry
*ce
)
1450 if (!s
->root_scalable
&&
1451 (ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
||
1452 ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO(s
->aw_bits
))) {
1453 error_report_once("%s: invalid context entry: hi=%"PRIx64
1454 ", lo=%"PRIx64
" (reserved nonzero)",
1455 __func__
, ce
->hi
, ce
->lo
);
1456 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
1459 if (s
->root_scalable
&&
1460 (ce
->val
[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s
->aw_bits
) ||
1461 ce
->val
[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1
||
1464 error_report_once("%s: invalid context entry: val[3]=%"PRIx64
1467 ", val[0]=%"PRIx64
" (reserved nonzero)",
1468 __func__
, ce
->val
[3], ce
->val
[2],
1469 ce
->val
[1], ce
->val
[0]);
1470 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
1476 static int vtd_ce_rid2pasid_check(IntelIOMMUState
*s
,
1477 VTDContextEntry
*ce
)
1482 * Make sure in Scalable Mode, a present context entry
1483 * has valid rid2pasid setting, which includes valid
1484 * rid2pasid field and corresponding pasid entry setting
1486 return vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
, PCI_NO_PASID
);
1489 /* Map a device to its corresponding domain (context-entry) */
1490 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
1491 uint8_t devfn
, VTDContextEntry
*ce
)
1495 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
1497 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
1502 if (!vtd_root_entry_present(s
, &re
, devfn
)) {
1503 /* Not error - it's okay we don't have root entry. */
1504 trace_vtd_re_not_present(bus_num
);
1505 return -VTD_FR_ROOT_ENTRY_P
;
1508 ret_fr
= vtd_root_entry_rsvd_bits_check(s
, &re
);
1513 ret_fr
= vtd_get_context_entry_from_root(s
, &re
, devfn
, ce
);
1518 if (!vtd_ce_present(ce
)) {
1519 /* Not error - it's okay we don't have context entry. */
1520 trace_vtd_ce_not_present(bus_num
, devfn
);
1521 return -VTD_FR_CONTEXT_ENTRY_P
;
1524 ret_fr
= vtd_context_entry_rsvd_bits_check(s
, ce
);
1529 /* Check if the programming of context-entry is valid */
1530 if (!s
->root_scalable
&&
1531 !vtd_is_level_supported(s
, vtd_ce_get_level(ce
))) {
1532 error_report_once("%s: invalid context entry: hi=%"PRIx64
1533 ", lo=%"PRIx64
" (level %d not supported)",
1534 __func__
, ce
->hi
, ce
->lo
,
1535 vtd_ce_get_level(ce
));
1536 return -VTD_FR_CONTEXT_ENTRY_INV
;
1539 if (!s
->root_scalable
) {
1540 /* Do translation type check */
1541 if (!vtd_ce_type_check(x86_iommu
, ce
)) {
1542 /* Errors dumped in vtd_ce_type_check() */
1543 return -VTD_FR_CONTEXT_ENTRY_INV
;
1547 * Check if the programming of context-entry.rid2pasid
1548 * and corresponding pasid setting is valid, and thus
1549 * avoids to check pasid entry fetching result in future
1550 * helper function calling.
1552 ret_fr
= vtd_ce_rid2pasid_check(s
, ce
);
1561 static int vtd_sync_shadow_page_hook(const IOMMUTLBEvent
*event
,
1564 memory_region_notify_iommu(private, 0, *event
);
1568 static uint16_t vtd_get_domain_id(IntelIOMMUState
*s
,
1569 VTDContextEntry
*ce
,
1574 if (s
->root_scalable
) {
1575 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
, pasid
);
1576 return VTD_SM_PASID_ENTRY_DID(pe
.val
[1]);
1579 return VTD_CONTEXT_ENTRY_DID(ce
->hi
);
1582 static int vtd_sync_shadow_page_table_range(VTDAddressSpace
*vtd_as
,
1583 VTDContextEntry
*ce
,
1584 hwaddr addr
, hwaddr size
)
1586 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1587 vtd_page_walk_info info
= {
1588 .hook_fn
= vtd_sync_shadow_page_hook
,
1589 .private = (void *)&vtd_as
->iommu
,
1590 .notify_unmap
= true,
1593 .domain_id
= vtd_get_domain_id(s
, ce
, vtd_as
->pasid
),
1596 return vtd_page_walk(s
, ce
, addr
, addr
+ size
, &info
, vtd_as
->pasid
);
1599 static int vtd_address_space_sync(VTDAddressSpace
*vtd_as
)
1605 /* If no MAP notifier registered, we simply invalidate all the cache */
1606 if (!vtd_as_has_map_notifier(vtd_as
)) {
1607 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
1608 memory_region_unmap_iommu_notifier_range(n
);
1613 ret
= vtd_dev_to_context_entry(vtd_as
->iommu_state
,
1614 pci_bus_num(vtd_as
->bus
),
1615 vtd_as
->devfn
, &ce
);
1617 if (ret
== -VTD_FR_CONTEXT_ENTRY_P
) {
1619 * It's a valid scenario to have a context entry that is
1620 * not present. For example, when a device is removed
1621 * from an existing domain then the context entry will be
1622 * zeroed by the guest before it was put into another
1623 * domain. When this happens, instead of synchronizing
1624 * the shadow pages we should invalidate all existing
1625 * mappings and notify the backends.
1627 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
1628 vtd_address_space_unmap(vtd_as
, n
);
1635 return vtd_sync_shadow_page_table_range(vtd_as
, &ce
, 0, UINT64_MAX
);
1639 * Check if specific device is configured to bypass address
1640 * translation for DMA requests. In Scalable Mode, bypass
1641 * 1st-level translation or 2nd-level translation, it depends
1644 static bool vtd_dev_pt_enabled(IntelIOMMUState
*s
, VTDContextEntry
*ce
,
1650 if (s
->root_scalable
) {
1651 ret
= vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
, pasid
);
1654 * This error is guest triggerable. We should assumt PT
1655 * not enabled for safety.
1659 return (VTD_PE_GET_TYPE(&pe
) == VTD_SM_PASID_ENTRY_PT
);
1662 return (vtd_ce_get_type(ce
) == VTD_CONTEXT_TT_PASS_THROUGH
);
1666 static bool vtd_as_pt_enabled(VTDAddressSpace
*as
)
1673 s
= as
->iommu_state
;
1674 if (vtd_dev_to_context_entry(s
, pci_bus_num(as
->bus
), as
->devfn
,
1677 * Possibly failed to parse the context entry for some reason
1678 * (e.g., during init, or any guest configuration errors on
1679 * context entries). We should assume PT not enabled for
1685 return vtd_dev_pt_enabled(s
, &ce
, as
->pasid
);
1688 /* Return whether the device is using IOMMU translation. */
1689 static bool vtd_switch_address_space(VTDAddressSpace
*as
)
1692 /* Whether we need to take the BQL on our own */
1693 bool take_bql
= !bql_locked();
1697 use_iommu
= as
->iommu_state
->dmar_enabled
&& !vtd_as_pt_enabled(as
);
1698 pt
= as
->iommu_state
->dmar_enabled
&& vtd_as_pt_enabled(as
);
1700 trace_vtd_switch_address_space(pci_bus_num(as
->bus
),
1701 VTD_PCI_SLOT(as
->devfn
),
1702 VTD_PCI_FUNC(as
->devfn
),
1706 * It's possible that we reach here without BQL, e.g., when called
1707 * from vtd_pt_enable_fast_path(). However the memory APIs need
1708 * it. We'd better make sure we have had it already, or, take it.
1714 /* Turn off first then on the other */
1716 memory_region_set_enabled(&as
->nodmar
, false);
1717 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), true);
1719 * vt-d spec v3.4 3.14:
1722 * Requests-with-PASID with input address in range 0xFEEx_xxxx
1723 * are translated normally like any other request-with-PASID
1724 * through DMA-remapping hardware.
1727 * Need to disable ir for as with PASID.
1729 if (as
->pasid
!= PCI_NO_PASID
) {
1730 memory_region_set_enabled(&as
->iommu_ir
, false);
1732 memory_region_set_enabled(&as
->iommu_ir
, true);
1735 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), false);
1736 memory_region_set_enabled(&as
->nodmar
, true);
1740 * vtd-spec v3.4 3.14:
1743 * Requests-with-PASID with input address in range 0xFEEx_xxxx are
1744 * translated normally like any other request-with-PASID through
1745 * DMA-remapping hardware. However, if such a request is processed
1746 * using pass-through translation, it will be blocked as described
1747 * in the paragraph below.
1749 * Software must not program paging-structure entries to remap any
1750 * address to the interrupt address range. Untranslated requests
1751 * and translation requests that result in an address in the
1752 * interrupt range will be blocked with condition code LGN.4 or
1756 * We enable per as memory region (iommu_ir_fault) for catching
1757 * the translation for interrupt range through PASID + PT.
1759 if (pt
&& as
->pasid
!= PCI_NO_PASID
) {
1760 memory_region_set_enabled(&as
->iommu_ir_fault
, true);
1762 memory_region_set_enabled(&as
->iommu_ir_fault
, false);
1772 static void vtd_switch_address_space_all(IntelIOMMUState
*s
)
1774 VTDAddressSpace
*vtd_as
;
1775 GHashTableIter iter
;
1777 g_hash_table_iter_init(&iter
, s
->vtd_address_spaces
);
1778 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_as
)) {
1779 vtd_switch_address_space(vtd_as
);
1783 static const bool vtd_qualified_faults
[] = {
1784 [VTD_FR_RESERVED
] = false,
1785 [VTD_FR_ROOT_ENTRY_P
] = false,
1786 [VTD_FR_CONTEXT_ENTRY_P
] = true,
1787 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
1788 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
1789 [VTD_FR_WRITE
] = true,
1790 [VTD_FR_READ
] = true,
1791 [VTD_FR_PAGING_ENTRY_INV
] = true,
1792 [VTD_FR_ROOT_TABLE_INV
] = false,
1793 [VTD_FR_CONTEXT_TABLE_INV
] = false,
1794 [VTD_FR_INTERRUPT_ADDR
] = true,
1795 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
1796 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
1797 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
1798 [VTD_FR_PASID_TABLE_INV
] = false,
1799 [VTD_FR_SM_INTERRUPT_ADDR
] = true,
1800 [VTD_FR_MAX
] = false,
1803 /* To see if a fault condition is "qualified", which is reported to software
1804 * only if the FPD field in the context-entry used to process the faulting
1807 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
1809 return vtd_qualified_faults
[fault
];
1812 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
1814 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
1817 static gboolean
vtd_find_as_by_sid(gpointer key
, gpointer value
,
1820 struct vtd_as_key
*as_key
= (struct vtd_as_key
*)key
;
1821 uint16_t target_sid
= *(uint16_t *)user_data
;
1822 uint16_t sid
= PCI_BUILD_BDF(pci_bus_num(as_key
->bus
), as_key
->devfn
);
1823 return sid
== target_sid
;
1826 static VTDAddressSpace
*vtd_get_as_by_sid(IntelIOMMUState
*s
, uint16_t sid
)
1828 uint8_t bus_num
= PCI_BUS_NUM(sid
);
1829 VTDAddressSpace
*vtd_as
= s
->vtd_as_cache
[bus_num
];
1832 (sid
== PCI_BUILD_BDF(pci_bus_num(vtd_as
->bus
), vtd_as
->devfn
))) {
1836 vtd_as
= g_hash_table_find(s
->vtd_address_spaces
, vtd_find_as_by_sid
, &sid
);
1837 s
->vtd_as_cache
[bus_num
] = vtd_as
;
1842 static void vtd_pt_enable_fast_path(IntelIOMMUState
*s
, uint16_t source_id
)
1844 VTDAddressSpace
*vtd_as
;
1845 bool success
= false;
1847 vtd_as
= vtd_get_as_by_sid(s
, source_id
);
1852 if (vtd_switch_address_space(vtd_as
) == false) {
1853 /* We switched off IOMMU region successfully. */
1858 trace_vtd_pt_enable_fast_path(source_id
, success
);
1861 static void vtd_report_fault(IntelIOMMUState
*s
,
1862 int err
, bool is_fpd_set
,
1869 if (is_fpd_set
&& vtd_is_qualified_fault(err
)) {
1870 trace_vtd_fault_disabled();
1872 vtd_report_dmar_fault(s
, source_id
, addr
, err
, is_write
,
1877 /* Map dev to context-entry then do a paging-structures walk to do a iommu
1880 * Called from RCU critical section.
1882 * @bus_num: The bus number
1883 * @devfn: The devfn, which is the combined of device and function number
1884 * @is_write: The access is a write operation
1885 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
1887 * Returns true if translation is successful, otherwise false.
1889 static bool vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, PCIBus
*bus
,
1890 uint8_t devfn
, hwaddr addr
, bool is_write
,
1891 IOMMUTLBEntry
*entry
)
1893 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1895 uint8_t bus_num
= pci_bus_num(bus
);
1896 VTDContextCacheEntry
*cc_entry
;
1897 uint64_t slpte
, page_mask
;
1898 uint32_t level
, pasid
= vtd_as
->pasid
;
1899 uint16_t source_id
= PCI_BUILD_BDF(bus_num
, devfn
);
1901 bool is_fpd_set
= false;
1904 uint8_t access_flags
;
1905 bool rid2pasid
= (pasid
== PCI_NO_PASID
) && s
->root_scalable
;
1906 VTDIOTLBEntry
*iotlb_entry
;
1909 * We have standalone memory region for interrupt addresses, we
1910 * should never receive translation requests in this region.
1912 assert(!vtd_is_interrupt_addr(addr
));
1916 cc_entry
= &vtd_as
->context_cache_entry
;
1918 /* Try to fetch slpte form IOTLB, we don't need RID2PASID logic */
1920 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, pasid
, addr
);
1922 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
1923 iotlb_entry
->domain_id
);
1924 slpte
= iotlb_entry
->slpte
;
1925 access_flags
= iotlb_entry
->access_flags
;
1926 page_mask
= iotlb_entry
->mask
;
1931 /* Try to fetch context-entry from cache first */
1932 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
1933 trace_vtd_iotlb_cc_hit(bus_num
, devfn
, cc_entry
->context_entry
.hi
,
1934 cc_entry
->context_entry
.lo
,
1935 cc_entry
->context_cache_gen
);
1936 ce
= cc_entry
->context_entry
;
1937 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1938 if (!is_fpd_set
&& s
->root_scalable
) {
1939 ret_fr
= vtd_ce_get_pasid_fpd(s
, &ce
, &is_fpd_set
, pasid
);
1941 vtd_report_fault(s
, -ret_fr
, is_fpd_set
,
1942 source_id
, addr
, is_write
,
1948 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
1949 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1950 if (!ret_fr
&& !is_fpd_set
&& s
->root_scalable
) {
1951 ret_fr
= vtd_ce_get_pasid_fpd(s
, &ce
, &is_fpd_set
, pasid
);
1954 vtd_report_fault(s
, -ret_fr
, is_fpd_set
,
1955 source_id
, addr
, is_write
,
1959 /* Update context-cache */
1960 trace_vtd_iotlb_cc_update(bus_num
, devfn
, ce
.hi
, ce
.lo
,
1961 cc_entry
->context_cache_gen
,
1962 s
->context_cache_gen
);
1963 cc_entry
->context_entry
= ce
;
1964 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
1968 pasid
= VTD_CE_GET_RID2PASID(&ce
);
1972 * We don't need to translate for pass-through context entries.
1973 * Also, let's ignore IOTLB caching as well for PT devices.
1975 if (vtd_dev_pt_enabled(s
, &ce
, pasid
)) {
1976 entry
->iova
= addr
& VTD_PAGE_MASK_4K
;
1977 entry
->translated_addr
= entry
->iova
;
1978 entry
->addr_mask
= ~VTD_PAGE_MASK_4K
;
1979 entry
->perm
= IOMMU_RW
;
1980 trace_vtd_translate_pt(source_id
, entry
->iova
);
1983 * When this happens, it means firstly caching-mode is not
1984 * enabled, and this is the first passthrough translation for
1985 * the device. Let's enable the fast path for passthrough.
1987 * When passthrough is disabled again for the device, we can
1988 * capture it via the context entry invalidation, then the
1989 * IOMMU region can be swapped back.
1991 vtd_pt_enable_fast_path(s
, source_id
);
1992 vtd_iommu_unlock(s
);
1996 /* Try to fetch slpte form IOTLB for RID2PASID slow path */
1998 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, pasid
, addr
);
2000 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
2001 iotlb_entry
->domain_id
);
2002 slpte
= iotlb_entry
->slpte
;
2003 access_flags
= iotlb_entry
->access_flags
;
2004 page_mask
= iotlb_entry
->mask
;
2009 ret_fr
= vtd_iova_to_slpte(s
, &ce
, addr
, is_write
, &slpte
, &level
,
2010 &reads
, &writes
, s
->aw_bits
, pasid
);
2012 vtd_report_fault(s
, -ret_fr
, is_fpd_set
, source_id
,
2013 addr
, is_write
, pasid
!= PCI_NO_PASID
, pasid
);
2017 page_mask
= vtd_slpt_level_page_mask(level
);
2018 access_flags
= IOMMU_ACCESS_FLAG(reads
, writes
);
2019 vtd_update_iotlb(s
, source_id
, vtd_get_domain_id(s
, &ce
, pasid
),
2020 addr
, slpte
, access_flags
, level
, pasid
);
2022 vtd_iommu_unlock(s
);
2023 entry
->iova
= addr
& page_mask
;
2024 entry
->translated_addr
= vtd_get_slpte_addr(slpte
, s
->aw_bits
) & page_mask
;
2025 entry
->addr_mask
= ~page_mask
;
2026 entry
->perm
= access_flags
;
2030 vtd_iommu_unlock(s
);
2032 entry
->translated_addr
= 0;
2033 entry
->addr_mask
= 0;
2034 entry
->perm
= IOMMU_NONE
;
2038 static void vtd_root_table_setup(IntelIOMMUState
*s
)
2040 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
2041 s
->root
&= VTD_RTADDR_ADDR_MASK(s
->aw_bits
);
2043 vtd_update_scalable_state(s
);
2045 trace_vtd_reg_dmar_root(s
->root
, s
->root_scalable
);
2048 static void vtd_iec_notify_all(IntelIOMMUState
*s
, bool global
,
2049 uint32_t index
, uint32_t mask
)
2051 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s
), global
, index
, mask
);
2054 static void vtd_interrupt_remap_table_setup(IntelIOMMUState
*s
)
2057 value
= vtd_get_quad_raw(s
, DMAR_IRTA_REG
);
2058 s
->intr_size
= 1UL << ((value
& VTD_IRTA_SIZE_MASK
) + 1);
2059 s
->intr_root
= value
& VTD_IRTA_ADDR_MASK(s
->aw_bits
);
2060 s
->intr_eime
= value
& VTD_IRTA_EIME
;
2062 /* Notify global invalidation */
2063 vtd_iec_notify_all(s
, true, 0, 0);
2065 trace_vtd_reg_ir_root(s
->intr_root
, s
->intr_size
);
2068 static void vtd_iommu_replay_all(IntelIOMMUState
*s
)
2070 VTDAddressSpace
*vtd_as
;
2072 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
2073 vtd_address_space_sync(vtd_as
);
2077 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
2079 trace_vtd_inv_desc_cc_global();
2080 /* Protects context cache */
2082 s
->context_cache_gen
++;
2083 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
2084 vtd_reset_context_cache_locked(s
);
2086 vtd_iommu_unlock(s
);
2087 vtd_address_space_refresh_all(s
);
2089 * From VT-d spec 6.5.2.1, a global context entry invalidation
2090 * should be followed by a IOTLB global invalidation, so we should
2091 * be safe even without this. Hoewever, let's replay the region as
2092 * well to be safer, and go back here when we need finer tunes for
2093 * VT-d emulation codes.
2095 vtd_iommu_replay_all(s
);
2098 /* Do a context-cache device-selective invalidation.
2099 * @func_mask: FM field after shifting
2101 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
2105 GHashTableIter as_it
;
2107 VTDAddressSpace
*vtd_as
;
2108 uint8_t bus_n
, devfn
;
2110 trace_vtd_inv_desc_cc_devices(source_id
, func_mask
);
2112 switch (func_mask
& 3) {
2114 mask
= 0; /* No bits in the SID field masked */
2117 mask
= 4; /* Mask bit 2 in the SID field */
2120 mask
= 6; /* Mask bit 2:1 in the SID field */
2123 mask
= 7; /* Mask bit 2:0 in the SID field */
2126 g_assert_not_reached();
2130 bus_n
= VTD_SID_TO_BUS(source_id
);
2131 devfn
= VTD_SID_TO_DEVFN(source_id
);
2133 g_hash_table_iter_init(&as_it
, s
->vtd_address_spaces
);
2134 while (g_hash_table_iter_next(&as_it
, NULL
, (void **)&vtd_as
)) {
2135 if ((pci_bus_num(vtd_as
->bus
) == bus_n
) &&
2136 (vtd_as
->devfn
& mask
) == (devfn
& mask
)) {
2137 trace_vtd_inv_desc_cc_device(bus_n
, VTD_PCI_SLOT(vtd_as
->devfn
),
2138 VTD_PCI_FUNC(vtd_as
->devfn
));
2140 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
2141 vtd_iommu_unlock(s
);
2143 * Do switch address space when needed, in case if the
2144 * device passthrough bit is switched.
2146 vtd_switch_address_space(vtd_as
);
2148 * So a device is moving out of (or moving into) a
2149 * domain, resync the shadow page table.
2150 * This won't bring bad even if we have no such
2151 * notifier registered - the IOMMU notification
2152 * framework will skip MAP notifications if that
2155 vtd_address_space_sync(vtd_as
);
2160 /* Context-cache invalidation
2161 * Returns the Context Actual Invalidation Granularity.
2162 * @val: the content of the CCMD_REG
2164 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
2167 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
2170 case VTD_CCMD_DOMAIN_INVL
:
2172 case VTD_CCMD_GLOBAL_INVL
:
2173 caig
= VTD_CCMD_GLOBAL_INVL_A
;
2174 vtd_context_global_invalidate(s
);
2177 case VTD_CCMD_DEVICE_INVL
:
2178 caig
= VTD_CCMD_DEVICE_INVL_A
;
2179 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
2183 error_report_once("%s: invalid context: 0x%" PRIx64
,
2190 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
2192 trace_vtd_inv_desc_iotlb_global();
2194 vtd_iommu_replay_all(s
);
2197 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
2200 VTDAddressSpace
*vtd_as
;
2202 trace_vtd_inv_desc_iotlb_domain(domain_id
);
2205 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
2207 vtd_iommu_unlock(s
);
2209 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
2210 if (!vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
2211 vtd_as
->devfn
, &ce
) &&
2212 domain_id
== vtd_get_domain_id(s
, &ce
, vtd_as
->pasid
)) {
2213 vtd_address_space_sync(vtd_as
);
2218 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState
*s
,
2219 uint16_t domain_id
, hwaddr addr
,
2220 uint8_t am
, uint32_t pasid
)
2222 VTDAddressSpace
*vtd_as
;
2225 hwaddr size
= (1 << am
) * VTD_PAGE_SIZE
;
2227 QLIST_FOREACH(vtd_as
, &(s
->vtd_as_with_notifiers
), next
) {
2228 if (pasid
!= PCI_NO_PASID
&& pasid
!= vtd_as
->pasid
) {
2231 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
2232 vtd_as
->devfn
, &ce
);
2233 if (!ret
&& domain_id
== vtd_get_domain_id(s
, &ce
, vtd_as
->pasid
)) {
2234 if (vtd_as_has_map_notifier(vtd_as
)) {
2236 * As long as we have MAP notifications registered in
2237 * any of our IOMMU notifiers, we need to sync the
2238 * shadow page table.
2240 vtd_sync_shadow_page_table_range(vtd_as
, &ce
, addr
, size
);
2243 * For UNMAP-only notifiers, we don't need to walk the
2244 * page tables. We just deliver the PSI down to
2245 * invalidate caches.
2247 const IOMMUTLBEvent event
= {
2248 .type
= IOMMU_NOTIFIER_UNMAP
,
2250 .target_as
= &address_space_memory
,
2252 .translated_addr
= 0,
2253 .addr_mask
= size
- 1,
2257 memory_region_notify_iommu(&vtd_as
->iommu
, 0, event
);
2263 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
2264 hwaddr addr
, uint8_t am
)
2266 VTDIOTLBPageInvInfo info
;
2268 trace_vtd_inv_desc_iotlb_pages(domain_id
, addr
, am
);
2270 assert(am
<= VTD_MAMV
);
2271 info
.domain_id
= domain_id
;
2273 info
.mask
= ~((1 << am
) - 1);
2275 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
2276 vtd_iommu_unlock(s
);
2277 vtd_iotlb_page_invalidate_notify(s
, domain_id
, addr
, am
, PCI_NO_PASID
);
2281 * Returns the IOTLB Actual Invalidation Granularity.
2282 * @val: the content of the IOTLB_REG
2284 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
2287 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
2293 case VTD_TLB_GLOBAL_FLUSH
:
2294 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
2295 vtd_iotlb_global_invalidate(s
);
2298 case VTD_TLB_DSI_FLUSH
:
2299 domain_id
= VTD_TLB_DID(val
);
2300 iaig
= VTD_TLB_DSI_FLUSH_A
;
2301 vtd_iotlb_domain_invalidate(s
, domain_id
);
2304 case VTD_TLB_PSI_FLUSH
:
2305 domain_id
= VTD_TLB_DID(val
);
2306 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
2307 am
= VTD_IVA_AM(addr
);
2308 addr
= VTD_IVA_ADDR(addr
);
2309 if (am
> VTD_MAMV
) {
2310 error_report_once("%s: address mask overflow: 0x%" PRIx64
,
2311 __func__
, vtd_get_quad_raw(s
, DMAR_IVA_REG
));
2315 iaig
= VTD_TLB_PSI_FLUSH_A
;
2316 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
2320 error_report_once("%s: invalid granularity: 0x%" PRIx64
,
2327 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
);
2329 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
2331 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
2332 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
2335 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
2337 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
2339 trace_vtd_inv_qi_enable(en
);
2342 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK(s
->aw_bits
);
2343 /* 2^(x+8) entries */
2344 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8 - (s
->iq_dw
? 1 : 0));
2345 s
->qi_enabled
= true;
2346 trace_vtd_inv_qi_setup(s
->iq
, s
->iq_size
);
2347 /* Ok - report back to driver */
2348 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
2350 if (s
->iq_tail
!= 0) {
2352 * This is a spec violation but Windows guests are known to set up
2353 * Queued Invalidation this way so we allow the write and process
2354 * Invalidation Descriptors right away.
2356 trace_vtd_warn_invalid_qi_tail(s
->iq_tail
);
2357 if (!(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
2358 vtd_fetch_inv_desc(s
);
2362 if (vtd_queued_inv_disable_check(s
)) {
2363 /* disable Queued Invalidation */
2364 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
2366 s
->qi_enabled
= false;
2367 /* Ok - report back to driver */
2368 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
2370 error_report_once("%s: detected improper state when disable QI "
2371 "(head=0x%x, tail=0x%x, last_type=%d)",
2373 s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
2378 /* Set Root Table Pointer */
2379 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
2381 vtd_root_table_setup(s
);
2382 /* Ok - report back to driver */
2383 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
2384 vtd_reset_caches(s
);
2385 vtd_address_space_refresh_all(s
);
2388 /* Set Interrupt Remap Table Pointer */
2389 static void vtd_handle_gcmd_sirtp(IntelIOMMUState
*s
)
2391 vtd_interrupt_remap_table_setup(s
);
2392 /* Ok - report back to driver */
2393 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRTPS
);
2396 /* Handle Translation Enable/Disable */
2397 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
2399 if (s
->dmar_enabled
== en
) {
2403 trace_vtd_dmar_enable(en
);
2406 s
->dmar_enabled
= true;
2407 /* Ok - report back to driver */
2408 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
2410 s
->dmar_enabled
= false;
2412 /* Clear the index of Fault Recording Register */
2413 s
->next_frcd_reg
= 0;
2414 /* Ok - report back to driver */
2415 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
2418 vtd_reset_caches(s
);
2419 vtd_address_space_refresh_all(s
);
2422 /* Handle Interrupt Remap Enable/Disable */
2423 static void vtd_handle_gcmd_ire(IntelIOMMUState
*s
, bool en
)
2425 trace_vtd_ir_enable(en
);
2428 s
->intr_enabled
= true;
2429 /* Ok - report back to driver */
2430 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRES
);
2432 s
->intr_enabled
= false;
2433 /* Ok - report back to driver */
2434 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_IRES
, 0);
2438 /* Handle write to Global Command Register */
2439 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
2441 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
2442 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
2443 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
2444 uint32_t changed
= status
^ val
;
2446 trace_vtd_reg_write_gcmd(status
, val
);
2447 if ((changed
& VTD_GCMD_TE
) && s
->dma_translation
) {
2448 /* Translation enable/disable */
2449 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
2451 if (val
& VTD_GCMD_SRTP
) {
2452 /* Set/update the root-table pointer */
2453 vtd_handle_gcmd_srtp(s
);
2455 if (changed
& VTD_GCMD_QIE
) {
2456 /* Queued Invalidation Enable */
2457 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
2459 if (val
& VTD_GCMD_SIRTP
) {
2460 /* Set/update the interrupt remapping root-table pointer */
2461 vtd_handle_gcmd_sirtp(s
);
2463 if ((changed
& VTD_GCMD_IRE
) &&
2464 x86_iommu_ir_supported(x86_iommu
)) {
2465 /* Interrupt remap enable/disable */
2466 vtd_handle_gcmd_ire(s
, val
& VTD_GCMD_IRE
);
2470 /* Handle write to Context Command Register */
2471 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
2474 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
2476 /* Context-cache invalidation request */
2477 if (val
& VTD_CCMD_ICC
) {
2478 if (s
->qi_enabled
) {
2479 error_report_once("Queued Invalidation enabled, "
2480 "should not use register-based invalidation");
2483 ret
= vtd_context_cache_invalidate(s
, val
);
2484 /* Invalidation completed. Change something to show */
2485 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
2486 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
2491 /* Handle write to IOTLB Invalidation Register */
2492 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
2495 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
2497 /* IOTLB invalidation request */
2498 if (val
& VTD_TLB_IVT
) {
2499 if (s
->qi_enabled
) {
2500 error_report_once("Queued Invalidation enabled, "
2501 "should not use register-based invalidation");
2504 ret
= vtd_iotlb_flush(s
, val
);
2505 /* Invalidation completed. Change something to show */
2506 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
2507 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
2508 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
2512 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
2513 static bool vtd_get_inv_desc(IntelIOMMUState
*s
,
2514 VTDInvDesc
*inv_desc
)
2516 dma_addr_t base_addr
= s
->iq
;
2517 uint32_t offset
= s
->iq_head
;
2518 uint32_t dw
= s
->iq_dw
? 32 : 16;
2519 dma_addr_t addr
= base_addr
+ offset
* dw
;
2521 if (dma_memory_read(&address_space_memory
, addr
,
2522 inv_desc
, dw
, MEMTXATTRS_UNSPECIFIED
)) {
2523 error_report_once("Read INV DESC failed.");
2526 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
2527 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
2529 inv_desc
->val
[2] = le64_to_cpu(inv_desc
->val
[2]);
2530 inv_desc
->val
[3] = le64_to_cpu(inv_desc
->val
[3]);
2535 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
2537 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
2538 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
2539 error_report_once("%s: invalid wait desc: hi=%"PRIx64
", lo=%"PRIx64
2540 " (reserved nonzero)", __func__
, inv_desc
->hi
,
2544 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
2546 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
2547 VTD_INV_DESC_WAIT_DATA_SHIFT
);
2549 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
2551 /* FIXME: need to be masked with HAW? */
2552 dma_addr_t status_addr
= inv_desc
->hi
;
2553 trace_vtd_inv_desc_wait_sw(status_addr
, status_data
);
2554 status_data
= cpu_to_le32(status_data
);
2555 if (dma_memory_write(&address_space_memory
, status_addr
,
2556 &status_data
, sizeof(status_data
),
2557 MEMTXATTRS_UNSPECIFIED
)) {
2558 trace_vtd_inv_desc_wait_write_fail(inv_desc
->hi
, inv_desc
->lo
);
2561 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
2562 /* Interrupt flag */
2563 vtd_generate_completion_event(s
);
2565 error_report_once("%s: invalid wait desc: hi=%"PRIx64
", lo=%"PRIx64
2566 " (unknown type)", __func__
, inv_desc
->hi
,
2573 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
2574 VTDInvDesc
*inv_desc
)
2576 uint16_t sid
, fmask
;
2578 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
2579 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64
", lo=%"PRIx64
2580 " (reserved nonzero)", __func__
, inv_desc
->hi
,
2584 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
2585 case VTD_INV_DESC_CC_DOMAIN
:
2586 trace_vtd_inv_desc_cc_domain(
2587 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
2589 case VTD_INV_DESC_CC_GLOBAL
:
2590 vtd_context_global_invalidate(s
);
2593 case VTD_INV_DESC_CC_DEVICE
:
2594 sid
= VTD_INV_DESC_CC_SID(inv_desc
->lo
);
2595 fmask
= VTD_INV_DESC_CC_FM(inv_desc
->lo
);
2596 vtd_context_device_invalidate(s
, sid
, fmask
);
2600 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64
", lo=%"PRIx64
2601 " (invalid type)", __func__
, inv_desc
->hi
,
2608 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
2614 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
2615 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
2616 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2617 ", lo=0x%"PRIx64
" (reserved bits unzero)",
2618 __func__
, inv_desc
->hi
, inv_desc
->lo
);
2622 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
2623 case VTD_INV_DESC_IOTLB_GLOBAL
:
2624 vtd_iotlb_global_invalidate(s
);
2627 case VTD_INV_DESC_IOTLB_DOMAIN
:
2628 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
2629 vtd_iotlb_domain_invalidate(s
, domain_id
);
2632 case VTD_INV_DESC_IOTLB_PAGE
:
2633 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
2634 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
2635 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
2636 if (am
> VTD_MAMV
) {
2637 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2638 ", lo=0x%"PRIx64
" (am=%u > VTD_MAMV=%u)",
2639 __func__
, inv_desc
->hi
, inv_desc
->lo
,
2640 am
, (unsigned)VTD_MAMV
);
2643 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
2647 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2648 ", lo=0x%"PRIx64
" (type mismatch: 0x%llx)",
2649 __func__
, inv_desc
->hi
, inv_desc
->lo
,
2650 inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
);
2656 static bool vtd_process_inv_iec_desc(IntelIOMMUState
*s
,
2657 VTDInvDesc
*inv_desc
)
2659 trace_vtd_inv_desc_iec(inv_desc
->iec
.granularity
,
2660 inv_desc
->iec
.index
,
2661 inv_desc
->iec
.index_mask
);
2663 vtd_iec_notify_all(s
, !inv_desc
->iec
.granularity
,
2664 inv_desc
->iec
.index
,
2665 inv_desc
->iec
.index_mask
);
2669 static void do_invalidate_device_tlb(VTDAddressSpace
*vtd_dev_as
,
2670 bool size
, hwaddr addr
)
2673 * According to ATS spec table 2.4:
2674 * S = 0, bits 15:12 = xxxx range size: 4K
2675 * S = 1, bits 15:12 = xxx0 range size: 8K
2676 * S = 1, bits 15:12 = xx01 range size: 16K
2677 * S = 1, bits 15:12 = x011 range size: 32K
2678 * S = 1, bits 15:12 = 0111 range size: 64K
2682 IOMMUTLBEvent event
;
2686 sz
= (VTD_PAGE_SIZE
* 2) << cto64(addr
>> VTD_PAGE_SHIFT
);
2692 event
.type
= IOMMU_NOTIFIER_DEVIOTLB_UNMAP
;
2693 event
.entry
.target_as
= &vtd_dev_as
->as
;
2694 event
.entry
.addr_mask
= sz
- 1;
2695 event
.entry
.iova
= addr
;
2696 event
.entry
.perm
= IOMMU_NONE
;
2697 event
.entry
.translated_addr
= 0;
2698 memory_region_notify_iommu(&vtd_dev_as
->iommu
, 0, event
);
2701 static bool vtd_process_device_iotlb_desc(IntelIOMMUState
*s
,
2702 VTDInvDesc
*inv_desc
)
2704 VTDAddressSpace
*vtd_dev_as
;
2709 addr
= VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc
->hi
);
2710 sid
= VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc
->lo
);
2711 size
= VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc
->hi
);
2713 if ((inv_desc
->lo
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO
) ||
2714 (inv_desc
->hi
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI
)) {
2715 error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64
2716 ", lo=%"PRIx64
" (reserved nonzero)", __func__
,
2717 inv_desc
->hi
, inv_desc
->lo
);
2722 * Using sid is OK since the guest should have finished the
2723 * initialization of both the bus and device.
2725 vtd_dev_as
= vtd_get_as_by_sid(s
, sid
);
2730 do_invalidate_device_tlb(vtd_dev_as
, size
, addr
);
2736 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
2738 VTDInvDesc inv_desc
;
2741 trace_vtd_inv_qi_head(s
->iq_head
);
2742 if (!vtd_get_inv_desc(s
, &inv_desc
)) {
2743 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
2747 desc_type
= VTD_INV_DESC_TYPE(inv_desc
.lo
);
2748 /* FIXME: should update at first or at last? */
2749 s
->iq_last_desc_type
= desc_type
;
2751 switch (desc_type
) {
2752 case VTD_INV_DESC_CC
:
2753 trace_vtd_inv_desc("context-cache", inv_desc
.hi
, inv_desc
.lo
);
2754 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
2759 case VTD_INV_DESC_IOTLB
:
2760 trace_vtd_inv_desc("iotlb", inv_desc
.hi
, inv_desc
.lo
);
2761 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
2766 case VTD_INV_DESC_WAIT
:
2767 trace_vtd_inv_desc("wait", inv_desc
.hi
, inv_desc
.lo
);
2768 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
2773 case VTD_INV_DESC_IEC
:
2774 trace_vtd_inv_desc("iec", inv_desc
.hi
, inv_desc
.lo
);
2775 if (!vtd_process_inv_iec_desc(s
, &inv_desc
)) {
2780 case VTD_INV_DESC_DEVICE
:
2781 trace_vtd_inv_desc("device", inv_desc
.hi
, inv_desc
.lo
);
2782 if (!vtd_process_device_iotlb_desc(s
, &inv_desc
)) {
2788 * TODO: the entity of below two cases will be implemented in future series.
2789 * To make guest (which integrates scalable mode support patch set in
2790 * iommu driver) work, just return true is enough so far.
2792 case VTD_INV_DESC_PC
:
2793 case VTD_INV_DESC_PIOTLB
:
2794 if (s
->scalable_mode
) {
2799 error_report_once("%s: invalid inv desc: hi=%"PRIx64
", lo=%"PRIx64
2800 " (unknown type)", __func__
, inv_desc
.hi
,
2805 if (s
->iq_head
== s
->iq_size
) {
2811 /* Try to fetch and process more Invalidation Descriptors */
2812 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
2816 /* Refer to 10.4.23 of VT-d spec 3.0 */
2817 qi_shift
= s
->iq_dw
? VTD_IQH_QH_SHIFT_5
: VTD_IQH_QH_SHIFT_4
;
2819 trace_vtd_inv_qi_fetch();
2821 if (s
->iq_tail
>= s
->iq_size
) {
2822 /* Detects an invalid Tail pointer */
2823 error_report_once("%s: detected invalid QI tail "
2824 "(tail=0x%x, size=0x%x)",
2825 __func__
, s
->iq_tail
, s
->iq_size
);
2826 vtd_handle_inv_queue_error(s
);
2829 while (s
->iq_head
!= s
->iq_tail
) {
2830 if (!vtd_process_inv_desc(s
)) {
2831 /* Invalidation Queue Errors */
2832 vtd_handle_inv_queue_error(s
);
2835 /* Must update the IQH_REG in time */
2836 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
2837 (((uint64_t)(s
->iq_head
)) << qi_shift
) &
2842 /* Handle write to Invalidation Queue Tail Register */
2843 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
2845 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
2847 if (s
->iq_dw
&& (val
& VTD_IQT_QT_256_RSV_BIT
)) {
2848 error_report_once("%s: RSV bit is set: val=0x%"PRIx64
,
2852 s
->iq_tail
= VTD_IQT_QT(s
->iq_dw
, val
);
2853 trace_vtd_inv_qi_tail(s
->iq_tail
);
2855 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
2856 /* Process Invalidation Queue here */
2857 vtd_fetch_inv_desc(s
);
2861 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
2863 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
2864 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
2865 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
2867 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
2868 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
2869 trace_vtd_fsts_clear_ip();
2871 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
2872 * Descriptors if there are any when Queued Invalidation is enabled?
2876 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
2879 /* FIXME: when software clears the IM field, check the IP field. But do we
2880 * need to compare the old value and the new value to conclude that
2881 * software clears the IM field? Or just check if the IM field is zero?
2883 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
2885 trace_vtd_reg_write_fectl(fectl_reg
);
2887 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
2888 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
2889 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
2893 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
2895 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
2896 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
2898 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
2899 trace_vtd_reg_ics_clear_ip();
2900 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
2904 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
2907 /* FIXME: when software clears the IM field, check the IP field. But do we
2908 * need to compare the old value and the new value to conclude that
2909 * software clears the IM field? Or just check if the IM field is zero?
2911 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
2913 trace_vtd_reg_write_iectl(iectl_reg
);
2915 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
2916 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
2917 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
2921 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
2923 IntelIOMMUState
*s
= opaque
;
2926 trace_vtd_reg_read(addr
, size
);
2928 if (addr
+ size
> DMAR_REG_SIZE
) {
2929 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2930 " size=0x%x", __func__
, addr
, size
);
2931 return (uint64_t)-1;
2935 /* Root Table Address Register, 64-bit */
2936 case DMAR_RTADDR_REG
:
2937 val
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
2939 val
= val
& ((1ULL << 32) - 1);
2943 case DMAR_RTADDR_REG_HI
:
2945 val
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
) >> 32;
2948 /* Invalidation Queue Address Register, 64-bit */
2951 (vtd_get_quad(s
, DMAR_IQA_REG
) &
2952 (VTD_IQA_QS
| VTD_IQA_DW_MASK
));
2954 val
= val
& ((1ULL << 32) - 1);
2958 case DMAR_IQA_REG_HI
:
2965 val
= vtd_get_long(s
, addr
);
2967 val
= vtd_get_quad(s
, addr
);
2974 static void vtd_mem_write(void *opaque
, hwaddr addr
,
2975 uint64_t val
, unsigned size
)
2977 IntelIOMMUState
*s
= opaque
;
2979 trace_vtd_reg_write(addr
, size
, val
);
2981 if (addr
+ size
> DMAR_REG_SIZE
) {
2982 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2983 " size=0x%x", __func__
, addr
, size
);
2988 /* Global Command Register, 32-bit */
2990 vtd_set_long(s
, addr
, val
);
2991 vtd_handle_gcmd_write(s
);
2994 /* Context Command Register, 64-bit */
2997 vtd_set_long(s
, addr
, val
);
2999 vtd_set_quad(s
, addr
, val
);
3000 vtd_handle_ccmd_write(s
);
3004 case DMAR_CCMD_REG_HI
:
3006 vtd_set_long(s
, addr
, val
);
3007 vtd_handle_ccmd_write(s
);
3010 /* IOTLB Invalidation Register, 64-bit */
3011 case DMAR_IOTLB_REG
:
3013 vtd_set_long(s
, addr
, val
);
3015 vtd_set_quad(s
, addr
, val
);
3016 vtd_handle_iotlb_write(s
);
3020 case DMAR_IOTLB_REG_HI
:
3022 vtd_set_long(s
, addr
, val
);
3023 vtd_handle_iotlb_write(s
);
3026 /* Invalidate Address Register, 64-bit */
3029 vtd_set_long(s
, addr
, val
);
3031 vtd_set_quad(s
, addr
, val
);
3035 case DMAR_IVA_REG_HI
:
3037 vtd_set_long(s
, addr
, val
);
3040 /* Fault Status Register, 32-bit */
3043 vtd_set_long(s
, addr
, val
);
3044 vtd_handle_fsts_write(s
);
3047 /* Fault Event Control Register, 32-bit */
3048 case DMAR_FECTL_REG
:
3050 vtd_set_long(s
, addr
, val
);
3051 vtd_handle_fectl_write(s
);
3054 /* Fault Event Data Register, 32-bit */
3055 case DMAR_FEDATA_REG
:
3057 vtd_set_long(s
, addr
, val
);
3060 /* Fault Event Address Register, 32-bit */
3061 case DMAR_FEADDR_REG
:
3063 vtd_set_long(s
, addr
, val
);
3066 * While the register is 32-bit only, some guests (Xen...) write to
3069 vtd_set_quad(s
, addr
, val
);
3073 /* Fault Event Upper Address Register, 32-bit */
3074 case DMAR_FEUADDR_REG
:
3076 vtd_set_long(s
, addr
, val
);
3079 /* Protected Memory Enable Register, 32-bit */
3082 vtd_set_long(s
, addr
, val
);
3085 /* Root Table Address Register, 64-bit */
3086 case DMAR_RTADDR_REG
:
3088 vtd_set_long(s
, addr
, val
);
3090 vtd_set_quad(s
, addr
, val
);
3094 case DMAR_RTADDR_REG_HI
:
3096 vtd_set_long(s
, addr
, val
);
3099 /* Invalidation Queue Tail Register, 64-bit */
3102 vtd_set_long(s
, addr
, val
);
3104 vtd_set_quad(s
, addr
, val
);
3106 vtd_handle_iqt_write(s
);
3109 case DMAR_IQT_REG_HI
:
3111 vtd_set_long(s
, addr
, val
);
3112 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
3115 /* Invalidation Queue Address Register, 64-bit */
3118 vtd_set_long(s
, addr
, val
);
3120 vtd_set_quad(s
, addr
, val
);
3122 vtd_update_iq_dw(s
);
3125 case DMAR_IQA_REG_HI
:
3127 vtd_set_long(s
, addr
, val
);
3130 /* Invalidation Completion Status Register, 32-bit */
3133 vtd_set_long(s
, addr
, val
);
3134 vtd_handle_ics_write(s
);
3137 /* Invalidation Event Control Register, 32-bit */
3138 case DMAR_IECTL_REG
:
3140 vtd_set_long(s
, addr
, val
);
3141 vtd_handle_iectl_write(s
);
3144 /* Invalidation Event Data Register, 32-bit */
3145 case DMAR_IEDATA_REG
:
3147 vtd_set_long(s
, addr
, val
);
3150 /* Invalidation Event Address Register, 32-bit */
3151 case DMAR_IEADDR_REG
:
3153 vtd_set_long(s
, addr
, val
);
3156 /* Invalidation Event Upper Address Register, 32-bit */
3157 case DMAR_IEUADDR_REG
:
3159 vtd_set_long(s
, addr
, val
);
3162 /* Fault Recording Registers, 128-bit */
3163 case DMAR_FRCD_REG_0_0
:
3165 vtd_set_long(s
, addr
, val
);
3167 vtd_set_quad(s
, addr
, val
);
3171 case DMAR_FRCD_REG_0_1
:
3173 vtd_set_long(s
, addr
, val
);
3176 case DMAR_FRCD_REG_0_2
:
3178 vtd_set_long(s
, addr
, val
);
3180 vtd_set_quad(s
, addr
, val
);
3181 /* May clear bit 127 (Fault), update PPF */
3182 vtd_update_fsts_ppf(s
);
3186 case DMAR_FRCD_REG_0_3
:
3188 vtd_set_long(s
, addr
, val
);
3189 /* May clear bit 127 (Fault), update PPF */
3190 vtd_update_fsts_ppf(s
);
3195 vtd_set_long(s
, addr
, val
);
3197 vtd_set_quad(s
, addr
, val
);
3201 case DMAR_IRTA_REG_HI
:
3203 vtd_set_long(s
, addr
, val
);
3208 vtd_set_long(s
, addr
, val
);
3210 vtd_set_quad(s
, addr
, val
);
3215 static IOMMUTLBEntry
vtd_iommu_translate(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
3216 IOMMUAccessFlags flag
, int iommu_idx
)
3218 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
3219 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
3220 IOMMUTLBEntry iotlb
= {
3221 /* We'll fill in the rest later. */
3222 .target_as
= &address_space_memory
,
3226 if (likely(s
->dmar_enabled
)) {
3227 success
= vtd_do_iommu_translate(vtd_as
, vtd_as
->bus
, vtd_as
->devfn
,
3228 addr
, flag
& IOMMU_WO
, &iotlb
);
3230 /* DMAR disabled, passthrough, use 4k-page*/
3231 iotlb
.iova
= addr
& VTD_PAGE_MASK_4K
;
3232 iotlb
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
3233 iotlb
.addr_mask
= ~VTD_PAGE_MASK_4K
;
3234 iotlb
.perm
= IOMMU_RW
;
3238 if (likely(success
)) {
3239 trace_vtd_dmar_translate(pci_bus_num(vtd_as
->bus
),
3240 VTD_PCI_SLOT(vtd_as
->devfn
),
3241 VTD_PCI_FUNC(vtd_as
->devfn
),
3242 iotlb
.iova
, iotlb
.translated_addr
,
3245 error_report_once("%s: detected translation failure "
3246 "(dev=%02x:%02x:%02x, iova=0x%" PRIx64
")",
3247 __func__
, pci_bus_num(vtd_as
->bus
),
3248 VTD_PCI_SLOT(vtd_as
->devfn
),
3249 VTD_PCI_FUNC(vtd_as
->devfn
),
3256 static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
3257 IOMMUNotifierFlag old
,
3258 IOMMUNotifierFlag
new,
3261 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
3262 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
3263 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
3265 /* TODO: add support for VFIO and vhost users */
3266 if (s
->snoop_control
) {
3267 error_setg_errno(errp
, ENOTSUP
,
3268 "Snoop Control with vhost or VFIO is not supported");
3271 if (!s
->caching_mode
&& (new & IOMMU_NOTIFIER_MAP
)) {
3272 error_setg_errno(errp
, ENOTSUP
,
3273 "device %02x.%02x.%x requires caching mode",
3274 pci_bus_num(vtd_as
->bus
), PCI_SLOT(vtd_as
->devfn
),
3275 PCI_FUNC(vtd_as
->devfn
));
3278 if (!x86_iommu
->dt_supported
&& (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP
)) {
3279 error_setg_errno(errp
, ENOTSUP
,
3280 "device %02x.%02x.%x requires device IOTLB mode",
3281 pci_bus_num(vtd_as
->bus
), PCI_SLOT(vtd_as
->devfn
),
3282 PCI_FUNC(vtd_as
->devfn
));
3286 /* Update per-address-space notifier flags */
3287 vtd_as
->notifier_flags
= new;
3289 if (old
== IOMMU_NOTIFIER_NONE
) {
3290 QLIST_INSERT_HEAD(&s
->vtd_as_with_notifiers
, vtd_as
, next
);
3291 } else if (new == IOMMU_NOTIFIER_NONE
) {
3292 QLIST_REMOVE(vtd_as
, next
);
3297 static int vtd_post_load(void *opaque
, int version_id
)
3299 IntelIOMMUState
*iommu
= opaque
;
3302 * We don't need to migrate the root_scalable because we can
3303 * simply do the calculation after the loading is complete. We
3304 * can actually do similar things with root, dmar_enabled, etc.
3305 * however since we've had them already so we'd better keep them
3306 * for compatibility of migration.
3308 vtd_update_scalable_state(iommu
);
3310 vtd_update_iq_dw(iommu
);
3313 * Memory regions are dynamically turned on/off depending on
3314 * context entry configurations from the guest. After migration,
3315 * we need to make sure the memory regions are still correct.
3317 vtd_switch_address_space_all(iommu
);
3322 static const VMStateDescription vtd_vmstate
= {
3323 .name
= "iommu-intel",
3325 .minimum_version_id
= 1,
3326 .priority
= MIG_PRI_IOMMU
,
3327 .post_load
= vtd_post_load
,
3328 .fields
= (const VMStateField
[]) {
3329 VMSTATE_UINT64(root
, IntelIOMMUState
),
3330 VMSTATE_UINT64(intr_root
, IntelIOMMUState
),
3331 VMSTATE_UINT64(iq
, IntelIOMMUState
),
3332 VMSTATE_UINT32(intr_size
, IntelIOMMUState
),
3333 VMSTATE_UINT16(iq_head
, IntelIOMMUState
),
3334 VMSTATE_UINT16(iq_tail
, IntelIOMMUState
),
3335 VMSTATE_UINT16(iq_size
, IntelIOMMUState
),
3336 VMSTATE_UINT16(next_frcd_reg
, IntelIOMMUState
),
3337 VMSTATE_UINT8_ARRAY(csr
, IntelIOMMUState
, DMAR_REG_SIZE
),
3338 VMSTATE_UINT8(iq_last_desc_type
, IntelIOMMUState
),
3339 VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */
3340 VMSTATE_BOOL(dmar_enabled
, IntelIOMMUState
),
3341 VMSTATE_BOOL(qi_enabled
, IntelIOMMUState
),
3342 VMSTATE_BOOL(intr_enabled
, IntelIOMMUState
),
3343 VMSTATE_BOOL(intr_eime
, IntelIOMMUState
),
3344 VMSTATE_END_OF_LIST()
3348 static const MemoryRegionOps vtd_mem_ops
= {
3349 .read
= vtd_mem_read
,
3350 .write
= vtd_mem_write
,
3351 .endianness
= DEVICE_LITTLE_ENDIAN
,
3353 .min_access_size
= 4,
3354 .max_access_size
= 8,
3357 .min_access_size
= 4,
3358 .max_access_size
= 8,
3362 static Property vtd_properties
[] = {
3363 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
3364 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState
, intr_eim
,
3366 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState
, buggy_eim
, false),
3367 DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState
, aw_bits
,
3368 VTD_HOST_ADDRESS_WIDTH
),
3369 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState
, caching_mode
, FALSE
),
3370 DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState
, scalable_mode
, FALSE
),
3371 DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState
, snoop_control
, false),
3372 DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState
, pasid
, false),
3373 DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState
, dma_drain
, true),
3374 DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState
, dma_translation
, true),
3375 DEFINE_PROP_END_OF_LIST(),
3378 /* Read IRTE entry with specific index */
3379 static bool vtd_irte_get(IntelIOMMUState
*iommu
, uint16_t index
,
3380 VTD_IR_TableEntry
*entry
, uint16_t sid
,
3383 static const uint16_t vtd_svt_mask
[VTD_SQ_MAX
] = \
3384 {0xffff, 0xfffb, 0xfff9, 0xfff8};
3385 dma_addr_t addr
= 0x00;
3386 uint16_t mask
, source_id
;
3387 uint8_t bus
, bus_max
, bus_min
;
3389 if (index
>= iommu
->intr_size
) {
3390 error_report_once("%s: index too large: ind=0x%x",
3393 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_INDEX_OVER
, index
);
3398 addr
= iommu
->intr_root
+ index
* sizeof(*entry
);
3399 if (dma_memory_read(&address_space_memory
, addr
,
3400 entry
, sizeof(*entry
), MEMTXATTRS_UNSPECIFIED
)) {
3401 error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64
,
3402 __func__
, index
, addr
);
3404 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_ROOT_INVAL
, index
);
3409 entry
->data
[0] = le64_to_cpu(entry
->data
[0]);
3410 entry
->data
[1] = le64_to_cpu(entry
->data
[1]);
3412 trace_vtd_ir_irte_get(index
, entry
->data
[1], entry
->data
[0]);
3415 * The remaining potential fault conditions are "qualified" by the
3416 * Fault Processing Disable bit in the IRTE. Even "not present".
3417 * So just clear the do_fault flag if PFD is set, which will
3418 * prevent faults being raised.
3420 if (entry
->irte
.fault_disable
) {
3424 if (!entry
->irte
.present
) {
3425 error_report_once("%s: detected non-present IRTE "
3426 "(index=%u, high=0x%" PRIx64
", low=0x%" PRIx64
")",
3427 __func__
, index
, entry
->data
[1], entry
->data
[0]);
3429 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_ENTRY_P
, index
);
3434 if (entry
->irte
.__reserved_0
|| entry
->irte
.__reserved_1
||
3435 entry
->irte
.__reserved_2
) {
3436 error_report_once("%s: detected non-zero reserved IRTE "
3437 "(index=%u, high=0x%" PRIx64
", low=0x%" PRIx64
")",
3438 __func__
, index
, entry
->data
[1], entry
->data
[0]);
3440 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_IRTE_RSVD
, index
);
3445 if (sid
!= X86_IOMMU_SID_INVALID
) {
3446 /* Validate IRTE SID */
3447 source_id
= entry
->irte
.source_id
;
3448 switch (entry
->irte
.sid_vtype
) {
3453 mask
= vtd_svt_mask
[entry
->irte
.sid_q
];
3454 if ((source_id
& mask
) != (sid
& mask
)) {
3455 error_report_once("%s: invalid IRTE SID "
3456 "(index=%u, sid=%u, source_id=%u)",
3457 __func__
, index
, sid
, source_id
);
3459 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_SID_ERR
, index
);
3466 bus_max
= source_id
>> 8;
3467 bus_min
= source_id
& 0xff;
3469 if (bus
> bus_max
|| bus
< bus_min
) {
3470 error_report_once("%s: invalid SVT_BUS "
3471 "(index=%u, bus=%u, min=%u, max=%u)",
3472 __func__
, index
, bus
, bus_min
, bus_max
);
3474 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_SID_ERR
, index
);
3481 error_report_once("%s: detected invalid IRTE SVT "
3482 "(index=%u, type=%d)", __func__
,
3483 index
, entry
->irte
.sid_vtype
);
3484 /* Take this as verification failure. */
3486 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_SID_ERR
, index
);
3495 /* Fetch IRQ information of specific IR index */
3496 static bool vtd_remap_irq_get(IntelIOMMUState
*iommu
, uint16_t index
,
3497 X86IOMMUIrq
*irq
, uint16_t sid
, bool do_fault
)
3499 VTD_IR_TableEntry irte
= {};
3501 if (!vtd_irte_get(iommu
, index
, &irte
, sid
, do_fault
)) {
3505 irq
->trigger_mode
= irte
.irte
.trigger_mode
;
3506 irq
->vector
= irte
.irte
.vector
;
3507 irq
->delivery_mode
= irte
.irte
.delivery_mode
;
3508 irq
->dest
= irte
.irte
.dest_id
;
3509 if (!iommu
->intr_eime
) {
3510 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
3511 #define VTD_IR_APIC_DEST_SHIFT (8)
3512 irq
->dest
= (irq
->dest
& VTD_IR_APIC_DEST_MASK
) >>
3513 VTD_IR_APIC_DEST_SHIFT
;
3515 irq
->dest_mode
= irte
.irte
.dest_mode
;
3516 irq
->redir_hint
= irte
.irte
.redir_hint
;
3518 trace_vtd_ir_remap(index
, irq
->trigger_mode
, irq
->vector
,
3519 irq
->delivery_mode
, irq
->dest
, irq
->dest_mode
);
3524 /* Interrupt remapping for MSI/MSI-X entry */
3525 static int vtd_interrupt_remap_msi(IntelIOMMUState
*iommu
,
3527 MSIMessage
*translated
,
3528 uint16_t sid
, bool do_fault
)
3530 VTD_IR_MSIAddress addr
;
3532 X86IOMMUIrq irq
= {};
3534 assert(origin
&& translated
);
3536 trace_vtd_ir_remap_msi_req(origin
->address
, origin
->data
);
3538 if (!iommu
|| !iommu
->intr_enabled
) {
3539 memcpy(translated
, origin
, sizeof(*origin
));
3543 if (origin
->address
& VTD_MSI_ADDR_HI_MASK
) {
3544 error_report_once("%s: MSI address high 32 bits non-zero detected: "
3545 "address=0x%" PRIx64
, __func__
, origin
->address
);
3547 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_REQ_RSVD
, 0);
3552 addr
.data
= origin
->address
& VTD_MSI_ADDR_LO_MASK
;
3553 if (addr
.addr
.__head
!= 0xfee) {
3554 error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32
,
3555 __func__
, addr
.data
);
3557 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_REQ_RSVD
, 0);
3562 /* This is compatible mode. */
3563 if (addr
.addr
.int_mode
!= VTD_IR_INT_FORMAT_REMAP
) {
3564 memcpy(translated
, origin
, sizeof(*origin
));
3568 index
= addr
.addr
.index_h
<< 15 | addr
.addr
.index_l
;
3570 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
3571 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
3573 if (addr
.addr
.sub_valid
) {
3574 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
3575 index
+= origin
->data
& VTD_IR_MSI_DATA_SUBHANDLE
;
3578 if (!vtd_remap_irq_get(iommu
, index
, &irq
, sid
, do_fault
)) {
3582 if (addr
.addr
.sub_valid
) {
3583 trace_vtd_ir_remap_type("MSI");
3584 if (origin
->data
& VTD_IR_MSI_DATA_RESERVED
) {
3585 error_report_once("%s: invalid IR MSI "
3586 "(sid=%u, address=0x%" PRIx64
3587 ", data=0x%" PRIx32
")",
3588 __func__
, sid
, origin
->address
, origin
->data
);
3590 vtd_report_ir_fault(iommu
, sid
, VTD_FR_IR_REQ_RSVD
, 0);
3595 uint8_t vector
= origin
->data
& 0xff;
3596 uint8_t trigger_mode
= (origin
->data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
3598 trace_vtd_ir_remap_type("IOAPIC");
3599 /* IOAPIC entry vector should be aligned with IRTE vector
3600 * (see vt-d spec 5.1.5.1). */
3601 if (vector
!= irq
.vector
) {
3602 trace_vtd_warn_ir_vector(sid
, index
, vector
, irq
.vector
);
3605 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
3606 * (see vt-d spec 5.1.5.1). */
3607 if (trigger_mode
!= irq
.trigger_mode
) {
3608 trace_vtd_warn_ir_trigger(sid
, index
, trigger_mode
,
3614 * We'd better keep the last two bits, assuming that guest OS
3615 * might modify it. Keep it does not hurt after all.
3617 irq
.msi_addr_last_bits
= addr
.addr
.__not_care
;
3619 /* Translate X86IOMMUIrq to MSI message */
3620 x86_iommu_irq_to_msi_message(&irq
, translated
);
3623 trace_vtd_ir_remap_msi(origin
->address
, origin
->data
,
3624 translated
->address
, translated
->data
);
3628 static int vtd_int_remap(X86IOMMUState
*iommu
, MSIMessage
*src
,
3629 MSIMessage
*dst
, uint16_t sid
)
3631 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu
),
3632 src
, dst
, sid
, false);
3635 static MemTxResult
vtd_mem_ir_read(void *opaque
, hwaddr addr
,
3636 uint64_t *data
, unsigned size
,
3642 static MemTxResult
vtd_mem_ir_write(void *opaque
, hwaddr addr
,
3643 uint64_t value
, unsigned size
,
3647 MSIMessage from
= {}, to
= {};
3648 uint16_t sid
= X86_IOMMU_SID_INVALID
;
3650 from
.address
= (uint64_t) addr
+ VTD_INTERRUPT_ADDR_FIRST
;
3651 from
.data
= (uint32_t) value
;
3653 if (!attrs
.unspecified
) {
3654 /* We have explicit Source ID */
3655 sid
= attrs
.requester_id
;
3658 ret
= vtd_interrupt_remap_msi(opaque
, &from
, &to
, sid
, true);
3660 /* Drop this interrupt */
3664 apic_get_class(NULL
)->send_msi(&to
);
3669 static const MemoryRegionOps vtd_mem_ir_ops
= {
3670 .read_with_attrs
= vtd_mem_ir_read
,
3671 .write_with_attrs
= vtd_mem_ir_write
,
3672 .endianness
= DEVICE_LITTLE_ENDIAN
,
3674 .min_access_size
= 4,
3675 .max_access_size
= 4,
3678 .min_access_size
= 4,
3679 .max_access_size
= 4,
3683 static void vtd_report_ir_illegal_access(VTDAddressSpace
*vtd_as
,
3684 hwaddr addr
, bool is_write
)
3686 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
3687 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
3688 uint16_t sid
= PCI_BUILD_BDF(bus_n
, vtd_as
->devfn
);
3689 bool is_fpd_set
= false;
3692 assert(vtd_as
->pasid
!= PCI_NO_PASID
);
3694 /* Try out best to fetch FPD, we can't do anything more */
3695 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
3696 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
3697 if (!is_fpd_set
&& s
->root_scalable
) {
3698 vtd_ce_get_pasid_fpd(s
, &ce
, &is_fpd_set
, vtd_as
->pasid
);
3702 vtd_report_fault(s
, VTD_FR_SM_INTERRUPT_ADDR
,
3703 is_fpd_set
, sid
, addr
, is_write
,
3704 true, vtd_as
->pasid
);
3707 static MemTxResult
vtd_mem_ir_fault_read(void *opaque
, hwaddr addr
,
3708 uint64_t *data
, unsigned size
,
3711 vtd_report_ir_illegal_access(opaque
, addr
, false);
3716 static MemTxResult
vtd_mem_ir_fault_write(void *opaque
, hwaddr addr
,
3717 uint64_t value
, unsigned size
,
3720 vtd_report_ir_illegal_access(opaque
, addr
, true);
3725 static const MemoryRegionOps vtd_mem_ir_fault_ops
= {
3726 .read_with_attrs
= vtd_mem_ir_fault_read
,
3727 .write_with_attrs
= vtd_mem_ir_fault_write
,
3728 .endianness
= DEVICE_LITTLE_ENDIAN
,
3730 .min_access_size
= 1,
3731 .max_access_size
= 8,
3734 .min_access_size
= 1,
3735 .max_access_size
= 8,
3739 VTDAddressSpace
*vtd_find_add_as(IntelIOMMUState
*s
, PCIBus
*bus
,
3740 int devfn
, unsigned int pasid
)
3743 * We can't simply use sid here since the bus number might not be
3744 * initialized by the guest.
3746 struct vtd_as_key key
= {
3751 VTDAddressSpace
*vtd_dev_as
;
3754 vtd_dev_as
= g_hash_table_lookup(s
->vtd_address_spaces
, &key
);
3756 struct vtd_as_key
*new_key
= g_malloc(sizeof(*new_key
));
3759 new_key
->devfn
= devfn
;
3760 new_key
->pasid
= pasid
;
3762 if (pasid
== PCI_NO_PASID
) {
3763 snprintf(name
, sizeof(name
), "vtd-%02x.%x", PCI_SLOT(devfn
),
3766 snprintf(name
, sizeof(name
), "vtd-%02x.%x-pasid-%x", PCI_SLOT(devfn
),
3767 PCI_FUNC(devfn
), pasid
);
3770 vtd_dev_as
= g_new0(VTDAddressSpace
, 1);
3772 vtd_dev_as
->bus
= bus
;
3773 vtd_dev_as
->devfn
= (uint8_t)devfn
;
3774 vtd_dev_as
->pasid
= pasid
;
3775 vtd_dev_as
->iommu_state
= s
;
3776 vtd_dev_as
->context_cache_entry
.context_cache_gen
= 0;
3777 vtd_dev_as
->iova_tree
= iova_tree_new();
3779 memory_region_init(&vtd_dev_as
->root
, OBJECT(s
), name
, UINT64_MAX
);
3780 address_space_init(&vtd_dev_as
->as
, &vtd_dev_as
->root
, "vtd-root");
3783 * Build the DMAR-disabled container with aliases to the
3784 * shared MRs. Note that aliasing to a shared memory region
3785 * could help the memory API to detect same FlatViews so we
3786 * can have devices to share the same FlatView when DMAR is
3787 * disabled (either by not providing "intel_iommu=on" or with
3788 * "iommu=pt"). It will greatly reduce the total number of
3789 * FlatViews of the system hence VM runs faster.
3791 memory_region_init_alias(&vtd_dev_as
->nodmar
, OBJECT(s
),
3792 "vtd-nodmar", &s
->mr_nodmar
, 0,
3793 memory_region_size(&s
->mr_nodmar
));
3796 * Build the per-device DMAR-enabled container.
3798 * TODO: currently we have per-device IOMMU memory region only
3799 * because we have per-device IOMMU notifiers for devices. If
3800 * one day we can abstract the IOMMU notifiers out of the
3801 * memory regions then we can also share the same memory
3802 * region here just like what we've done above with the nodmar
3805 strcat(name
, "-dmar");
3806 memory_region_init_iommu(&vtd_dev_as
->iommu
, sizeof(vtd_dev_as
->iommu
),
3807 TYPE_INTEL_IOMMU_MEMORY_REGION
, OBJECT(s
),
3809 memory_region_init_alias(&vtd_dev_as
->iommu_ir
, OBJECT(s
), "vtd-ir",
3810 &s
->mr_ir
, 0, memory_region_size(&s
->mr_ir
));
3811 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as
->iommu
),
3812 VTD_INTERRUPT_ADDR_FIRST
,
3813 &vtd_dev_as
->iommu_ir
, 1);
3816 * This region is used for catching fault to access interrupt
3817 * range via passthrough + PASID. See also
3818 * vtd_switch_address_space(). We can't use alias since we
3819 * need to know the sid which is valid for MSI who uses
3820 * bus_master_as (see msi_send_message()).
3822 memory_region_init_io(&vtd_dev_as
->iommu_ir_fault
, OBJECT(s
),
3823 &vtd_mem_ir_fault_ops
, vtd_dev_as
, "vtd-no-ir",
3824 VTD_INTERRUPT_ADDR_SIZE
);
3826 * Hook to root since when PT is enabled vtd_dev_as->iommu
3829 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as
->root
),
3830 VTD_INTERRUPT_ADDR_FIRST
,
3831 &vtd_dev_as
->iommu_ir_fault
, 2);
3834 * Hook both the containers under the root container, we
3835 * switch between DMAR & noDMAR by enable/disable
3836 * corresponding sub-containers
3838 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
3839 MEMORY_REGION(&vtd_dev_as
->iommu
),
3841 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
3842 &vtd_dev_as
->nodmar
, 0);
3844 vtd_switch_address_space(vtd_dev_as
);
3846 g_hash_table_insert(s
->vtd_address_spaces
, new_key
, vtd_dev_as
);
3851 static bool vtd_check_hiod(IntelIOMMUState
*s
, HostIOMMUDevice
*hiod
,
3854 HostIOMMUDeviceClass
*hiodc
= HOST_IOMMU_DEVICE_GET_CLASS(hiod
);
3857 if (!hiodc
->get_cap
) {
3858 error_setg(errp
, ".get_cap() not implemented");
3863 ret
= hiodc
->get_cap(hiod
, HOST_IOMMU_DEVICE_CAP_AW_BITS
, errp
);
3867 if (s
->aw_bits
> ret
) {
3868 error_setg(errp
, "aw-bits %d > host aw-bits %d", s
->aw_bits
, ret
);
3875 static bool vtd_dev_set_iommu_device(PCIBus
*bus
, void *opaque
, int devfn
,
3876 HostIOMMUDevice
*hiod
, Error
**errp
)
3878 IntelIOMMUState
*s
= opaque
;
3879 struct vtd_as_key key
= {
3883 struct vtd_as_key
*new_key
;
3889 if (g_hash_table_lookup(s
->vtd_host_iommu_dev
, &key
)) {
3890 error_setg(errp
, "Host IOMMU device already exist");
3891 vtd_iommu_unlock(s
);
3895 if (!vtd_check_hiod(s
, hiod
, errp
)) {
3896 vtd_iommu_unlock(s
);
3900 new_key
= g_malloc(sizeof(*new_key
));
3902 new_key
->devfn
= devfn
;
3905 g_hash_table_insert(s
->vtd_host_iommu_dev
, new_key
, hiod
);
3907 vtd_iommu_unlock(s
);
3912 static void vtd_dev_unset_iommu_device(PCIBus
*bus
, void *opaque
, int devfn
)
3914 IntelIOMMUState
*s
= opaque
;
3915 struct vtd_as_key key
= {
3922 if (!g_hash_table_lookup(s
->vtd_host_iommu_dev
, &key
)) {
3923 vtd_iommu_unlock(s
);
3927 g_hash_table_remove(s
->vtd_host_iommu_dev
, &key
);
3929 vtd_iommu_unlock(s
);
3932 /* Unmap the whole range in the notifier's scope. */
3933 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
)
3935 hwaddr total
, remain
;
3936 hwaddr start
= n
->start
;
3937 hwaddr end
= n
->end
;
3938 IntelIOMMUState
*s
= as
->iommu_state
;
3942 * Note: all the codes in this function has a assumption that IOVA
3943 * bits are no more than VTD_MGAW bits (which is restricted by
3944 * VT-d spec), otherwise we need to consider overflow of 64 bits.
3947 if (end
> VTD_ADDRESS_SIZE(s
->aw_bits
) - 1) {
3949 * Don't need to unmap regions that is bigger than the whole
3950 * VT-d supported address space size
3952 end
= VTD_ADDRESS_SIZE(s
->aw_bits
) - 1;
3955 assert(start
<= end
);
3956 total
= remain
= end
- start
+ 1;
3958 while (remain
>= VTD_PAGE_SIZE
) {
3959 IOMMUTLBEvent event
;
3960 uint64_t mask
= dma_aligned_pow2_mask(start
, end
, s
->aw_bits
);
3961 uint64_t size
= mask
+ 1;
3965 event
.type
= IOMMU_NOTIFIER_UNMAP
;
3966 event
.entry
.iova
= start
;
3967 event
.entry
.addr_mask
= mask
;
3968 event
.entry
.target_as
= &address_space_memory
;
3969 event
.entry
.perm
= IOMMU_NONE
;
3970 /* This field is meaningless for unmap */
3971 event
.entry
.translated_addr
= 0;
3973 memory_region_notify_iommu_one(n
, &event
);
3981 trace_vtd_as_unmap_whole(pci_bus_num(as
->bus
),
3982 VTD_PCI_SLOT(as
->devfn
),
3983 VTD_PCI_FUNC(as
->devfn
),
3986 map
.iova
= n
->start
;
3987 map
.size
= total
- 1; /* Inclusive */
3988 iova_tree_remove(as
->iova_tree
, map
);
3991 static void vtd_address_space_unmap_all(IntelIOMMUState
*s
)
3993 VTDAddressSpace
*vtd_as
;
3996 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
3997 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
3998 vtd_address_space_unmap(vtd_as
, n
);
4003 static void vtd_address_space_refresh_all(IntelIOMMUState
*s
)
4005 vtd_address_space_unmap_all(s
);
4006 vtd_switch_address_space_all(s
);
4009 static int vtd_replay_hook(const IOMMUTLBEvent
*event
, void *private)
4011 memory_region_notify_iommu_one(private, event
);
4015 static void vtd_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
4017 VTDAddressSpace
*vtd_as
= container_of(iommu_mr
, VTDAddressSpace
, iommu
);
4018 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
4019 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
4021 DMAMap map
= { .iova
= 0, .size
= HWADDR_MAX
};
4023 /* replay is protected by BQL, page walk will re-setup it safely */
4024 iova_tree_remove(vtd_as
->iova_tree
, map
);
4026 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
4027 trace_vtd_replay_ce_valid(s
->root_scalable
? "scalable mode" :
4029 bus_n
, PCI_SLOT(vtd_as
->devfn
),
4030 PCI_FUNC(vtd_as
->devfn
),
4031 vtd_get_domain_id(s
, &ce
, vtd_as
->pasid
),
4033 if (n
->notifier_flags
& IOMMU_NOTIFIER_MAP
) {
4034 /* This is required only for MAP typed notifiers */
4035 vtd_page_walk_info info
= {
4036 .hook_fn
= vtd_replay_hook
,
4037 .private = (void *)n
,
4038 .notify_unmap
= false,
4041 .domain_id
= vtd_get_domain_id(s
, &ce
, vtd_as
->pasid
),
4044 vtd_page_walk(s
, &ce
, 0, ~0ULL, &info
, vtd_as
->pasid
);
4047 trace_vtd_replay_ce_invalid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
4048 PCI_FUNC(vtd_as
->devfn
));
4054 static void vtd_cap_init(IntelIOMMUState
*s
)
4056 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
4058 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
|
4059 VTD_CAP_MAMV
| VTD_CAP_PSI
| VTD_CAP_SLLPS
|
4060 VTD_CAP_MGAW(s
->aw_bits
);
4062 s
->cap
|= VTD_CAP_DRAIN
;
4064 if (s
->dma_translation
) {
4065 if (s
->aw_bits
>= VTD_HOST_AW_39BIT
) {
4066 s
->cap
|= VTD_CAP_SAGAW_39bit
;
4068 if (s
->aw_bits
>= VTD_HOST_AW_48BIT
) {
4069 s
->cap
|= VTD_CAP_SAGAW_48bit
;
4072 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
4074 if (x86_iommu_ir_supported(x86_iommu
)) {
4075 s
->ecap
|= VTD_ECAP_IR
| VTD_ECAP_MHMV
;
4076 if (s
->intr_eim
== ON_OFF_AUTO_ON
) {
4077 s
->ecap
|= VTD_ECAP_EIM
;
4079 assert(s
->intr_eim
!= ON_OFF_AUTO_AUTO
);
4082 if (x86_iommu
->dt_supported
) {
4083 s
->ecap
|= VTD_ECAP_DT
;
4086 if (x86_iommu
->pt_supported
) {
4087 s
->ecap
|= VTD_ECAP_PT
;
4090 if (s
->caching_mode
) {
4091 s
->cap
|= VTD_CAP_CM
;
4094 /* TODO: read cap/ecap from host to decide which cap to be exposed. */
4095 if (s
->scalable_mode
) {
4096 s
->ecap
|= VTD_ECAP_SMTS
| VTD_ECAP_SRS
| VTD_ECAP_SLTS
;
4099 if (s
->snoop_control
) {
4100 s
->ecap
|= VTD_ECAP_SC
;
4104 s
->ecap
|= VTD_ECAP_PASID
;
4109 * Do the initialization. It will also be called when reset, so pay
4110 * attention when adding new initialization stuff.
4112 static void vtd_init(IntelIOMMUState
*s
)
4114 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
4116 memset(s
->csr
, 0, DMAR_REG_SIZE
);
4117 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
4118 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
4119 memset(s
->womask
, 0, DMAR_REG_SIZE
);
4122 s
->root_scalable
= false;
4123 s
->dmar_enabled
= false;
4124 s
->intr_enabled
= false;
4129 s
->qi_enabled
= false;
4130 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
4132 s
->next_frcd_reg
= 0;
4137 * Rsvd field masks for spte
4139 vtd_spte_rsvd
[0] = ~0ULL;
4140 vtd_spte_rsvd
[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s
->aw_bits
,
4141 x86_iommu
->dt_supported
);
4142 vtd_spte_rsvd
[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s
->aw_bits
);
4143 vtd_spte_rsvd
[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s
->aw_bits
);
4144 vtd_spte_rsvd
[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s
->aw_bits
);
4146 vtd_spte_rsvd_large
[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s
->aw_bits
,
4147 x86_iommu
->dt_supported
);
4148 vtd_spte_rsvd_large
[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s
->aw_bits
,
4149 x86_iommu
->dt_supported
);
4151 if (s
->scalable_mode
|| s
->snoop_control
) {
4152 vtd_spte_rsvd
[1] &= ~VTD_SPTE_SNP
;
4153 vtd_spte_rsvd_large
[2] &= ~VTD_SPTE_SNP
;
4154 vtd_spte_rsvd_large
[3] &= ~VTD_SPTE_SNP
;
4157 vtd_reset_caches(s
);
4159 /* Define registers with default values and bit semantics */
4160 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
4161 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
4162 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
4163 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
4164 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
4165 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
4166 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffffc00ULL
, 0);
4167 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
4168 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
4170 /* Advanced Fault Logging not supported */
4171 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
4172 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
4173 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
4174 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
4176 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
4177 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
4179 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
4181 /* Treated as RO for implementations that PLMR and PHMR fields reported
4182 * as Clear in the CAP_REG.
4183 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
4185 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
4187 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
4188 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
4189 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff807ULL
, 0);
4190 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
4191 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
4192 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
4193 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
4194 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
4195 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
4197 /* IOTLB registers */
4198 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
4199 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
4200 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
4202 /* Fault Recording Registers, 128-bit */
4203 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
4204 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
4207 * Interrupt remapping registers.
4209 vtd_define_quad(s
, DMAR_IRTA_REG
, 0, 0xfffffffffffff80fULL
, 0);
4212 /* Should not reset address_spaces when reset because devices will still use
4213 * the address space they got at first (won't ask the bus again).
4215 static void vtd_reset(DeviceState
*dev
)
4217 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
4220 vtd_address_space_refresh_all(s
);
4223 static AddressSpace
*vtd_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
4225 IntelIOMMUState
*s
= opaque
;
4226 VTDAddressSpace
*vtd_as
;
4228 assert(0 <= devfn
&& devfn
< PCI_DEVFN_MAX
);
4230 vtd_as
= vtd_find_add_as(s
, bus
, devfn
, PCI_NO_PASID
);
4234 static PCIIOMMUOps vtd_iommu_ops
= {
4235 .get_address_space
= vtd_host_dma_iommu
,
4236 .set_iommu_device
= vtd_dev_set_iommu_device
,
4237 .unset_iommu_device
= vtd_dev_unset_iommu_device
,
4240 static bool vtd_decide_config(IntelIOMMUState
*s
, Error
**errp
)
4242 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
4244 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !x86_iommu_ir_supported(x86_iommu
)) {
4245 error_setg(errp
, "eim=on cannot be selected without intremap=on");
4249 if (s
->intr_eim
== ON_OFF_AUTO_AUTO
) {
4250 s
->intr_eim
= (kvm_irqchip_in_kernel() || s
->buggy_eim
)
4251 && x86_iommu_ir_supported(x86_iommu
) ?
4252 ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
4254 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !s
->buggy_eim
) {
4255 if (kvm_irqchip_is_split() && !kvm_enable_x2apic()) {
4256 error_setg(errp
, "eim=on requires support on the KVM side"
4257 "(X2APIC_API, first shipped in v4.7)");
4262 /* Currently only address widths supported are 39 and 48 bits */
4263 if ((s
->aw_bits
!= VTD_HOST_AW_39BIT
) &&
4264 (s
->aw_bits
!= VTD_HOST_AW_48BIT
)) {
4265 error_setg(errp
, "Supported values for aw-bits are: %d, %d",
4266 VTD_HOST_AW_39BIT
, VTD_HOST_AW_48BIT
);
4270 if (s
->scalable_mode
&& !s
->dma_drain
) {
4271 error_setg(errp
, "Need to set dma_drain for scalable mode");
4275 if (s
->pasid
&& !s
->scalable_mode
) {
4276 error_setg(errp
, "Need to set scalable mode for PASID");
4283 static int vtd_machine_done_notify_one(Object
*child
, void *unused
)
4285 IntelIOMMUState
*iommu
= INTEL_IOMMU_DEVICE(x86_iommu_get_default());
4288 * We hard-coded here because vfio-pci is the only special case
4289 * here. Let's be more elegant in the future when we can, but so
4290 * far there seems to be no better way.
4292 if (object_dynamic_cast(child
, "vfio-pci") && !iommu
->caching_mode
) {
4293 vtd_panic_require_caching_mode();
4299 static void vtd_machine_done_hook(Notifier
*notifier
, void *unused
)
4301 object_child_foreach_recursive(object_get_root(),
4302 vtd_machine_done_notify_one
, NULL
);
4305 static Notifier vtd_machine_done_notify
= {
4306 .notify
= vtd_machine_done_hook
,
4309 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
4311 MachineState
*ms
= MACHINE(qdev_get_machine());
4312 PCMachineState
*pcms
= PC_MACHINE(ms
);
4313 X86MachineState
*x86ms
= X86_MACHINE(ms
);
4314 PCIBus
*bus
= pcms
->pcibus
;
4315 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
4316 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
4318 if (s
->pasid
&& x86_iommu
->dt_supported
) {
4320 * PASID-based-Device-TLB Invalidate Descriptor is not
4321 * implemented and it requires support from vhost layer which
4322 * needs to be implemented in the future.
4324 error_setg(errp
, "PASID based device IOTLB is not supported");
4328 if (!vtd_decide_config(s
, errp
)) {
4332 QLIST_INIT(&s
->vtd_as_with_notifiers
);
4333 qemu_mutex_init(&s
->iommu_lock
);
4334 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
4335 "intel_iommu", DMAR_REG_SIZE
);
4336 memory_region_add_subregion(get_system_memory(),
4337 Q35_HOST_BRIDGE_IOMMU_ADDR
, &s
->csrmem
);
4339 /* Create the shared memory regions by all devices */
4340 memory_region_init(&s
->mr_nodmar
, OBJECT(s
), "vtd-nodmar",
4342 memory_region_init_io(&s
->mr_ir
, OBJECT(s
), &vtd_mem_ir_ops
,
4343 s
, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE
);
4344 memory_region_init_alias(&s
->mr_sys_alias
, OBJECT(s
),
4345 "vtd-sys-alias", get_system_memory(), 0,
4346 memory_region_size(get_system_memory()));
4347 memory_region_add_subregion_overlap(&s
->mr_nodmar
, 0,
4348 &s
->mr_sys_alias
, 0);
4349 memory_region_add_subregion_overlap(&s
->mr_nodmar
,
4350 VTD_INTERRUPT_ADDR_FIRST
,
4352 /* No corresponding destroy */
4353 s
->iotlb
= g_hash_table_new_full(vtd_iotlb_hash
, vtd_iotlb_equal
,
4355 s
->vtd_address_spaces
= g_hash_table_new_full(vtd_as_hash
, vtd_as_equal
,
4357 s
->vtd_host_iommu_dev
= g_hash_table_new_full(vtd_hiod_hash
, vtd_hiod_equal
,
4358 g_free
, vtd_hiod_destroy
);
4360 pci_setup_iommu(bus
, &vtd_iommu_ops
, dev
);
4361 /* Pseudo address space under root PCI bus. */
4362 x86ms
->ioapic_as
= vtd_host_dma_iommu(bus
, s
, Q35_PSEUDO_DEVFN_IOAPIC
);
4363 qemu_add_machine_init_done_notifier(&vtd_machine_done_notify
);
4366 static void vtd_class_init(ObjectClass
*klass
, void *data
)
4368 DeviceClass
*dc
= DEVICE_CLASS(klass
);
4369 X86IOMMUClass
*x86_class
= X86_IOMMU_DEVICE_CLASS(klass
);
4371 device_class_set_legacy_reset(dc
, vtd_reset
);
4372 dc
->vmsd
= &vtd_vmstate
;
4373 device_class_set_props(dc
, vtd_properties
);
4374 dc
->hotpluggable
= false;
4375 x86_class
->realize
= vtd_realize
;
4376 x86_class
->int_remap
= vtd_int_remap
;
4377 /* Supported by the pc-q35-* machine types */
4378 dc
->user_creatable
= true;
4379 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
4380 dc
->desc
= "Intel IOMMU (VT-d) DMA Remapping device";
4383 static const TypeInfo vtd_info
= {
4384 .name
= TYPE_INTEL_IOMMU_DEVICE
,
4385 .parent
= TYPE_X86_IOMMU_DEVICE
,
4386 .instance_size
= sizeof(IntelIOMMUState
),
4387 .class_init
= vtd_class_init
,
4390 static void vtd_iommu_memory_region_class_init(ObjectClass
*klass
,
4393 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
4395 imrc
->translate
= vtd_iommu_translate
;
4396 imrc
->notify_flag_changed
= vtd_iommu_notify_flag_changed
;
4397 imrc
->replay
= vtd_iommu_replay
;
4400 static const TypeInfo vtd_iommu_memory_region_info
= {
4401 .parent
= TYPE_IOMMU_MEMORY_REGION
,
4402 .name
= TYPE_INTEL_IOMMU_MEMORY_REGION
,
4403 .class_init
= vtd_iommu_memory_region_class_init
,
4406 static void vtd_register_types(void)
4408 type_register_static(&vtd_info
);
4409 type_register_static(&vtd_iommu_memory_region_info
);
4412 type_init(vtd_register_types
)