2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
38 #define PREFIX "DMAR:"
40 /* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
44 LIST_HEAD(dmar_drhd_units
);
46 static struct acpi_table_header
* __initdata dmar_tbl
;
47 static acpi_size dmar_tbl_size
;
49 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 if (drhd
->include_all
)
56 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
58 list_add(&drhd
->list
, &dmar_drhd_units
);
61 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
62 struct pci_dev
**dev
, u16 segment
)
65 struct pci_dev
*pdev
= NULL
;
66 struct acpi_dmar_pci_path
*path
;
69 bus
= pci_find_bus(segment
, scope
->bus
);
70 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
71 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
72 / sizeof(struct acpi_dmar_pci_path
);
78 * Some BIOSes list non-exist devices in DMAR table, just
83 PREFIX
"Device scope bus [%d] not found\n",
87 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->dev
, path
->fn
));
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment
, bus
->number
, path
->dev
, path
->fn
);
96 bus
= pdev
->subordinate
;
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment
, scope
->bus
, path
->dev
, path
->fn
);
105 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
106 pdev
->subordinate
) || (scope
->entry_type
== \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
118 static int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
119 struct pci_dev
***devices
, u16 segment
)
121 struct acpi_dmar_device_scope
*scope
;
127 while (start
< end
) {
129 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
130 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start
+= scope
->length
;
140 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
146 while (start
< end
) {
148 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
149 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
150 ret
= dmar_parse_one_dev_scope(scope
,
151 &(*devices
)[index
], segment
);
158 start
+= scope
->length
;
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
170 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
172 struct acpi_dmar_hardware_unit
*drhd
;
173 struct dmar_drhd_unit
*dmaru
;
176 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
177 if (!drhd
->address
) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR
),
182 dmi_get_system_info(DMI_BIOS_VERSION
),
183 dmi_get_system_info(DMI_PRODUCT_VERSION
));
186 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
191 dmaru
->reg_base_addr
= drhd
->address
;
192 dmaru
->segment
= drhd
->segment
;
193 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
195 ret
= alloc_iommu(dmaru
);
200 dmar_register_drhd_unit(dmaru
);
204 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
206 struct acpi_dmar_hardware_unit
*drhd
;
209 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
211 if (dmaru
->include_all
)
214 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
215 ((void *)drhd
) + drhd
->header
.length
,
216 &dmaru
->devices_cnt
, &dmaru
->devices
,
219 list_del(&dmaru
->list
);
226 LIST_HEAD(dmar_rmrr_units
);
228 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
230 list_add(&rmrr
->list
, &dmar_rmrr_units
);
235 dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
237 struct acpi_dmar_reserved_memory
*rmrr
;
238 struct dmar_rmrr_unit
*rmrru
;
240 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
245 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
246 rmrru
->base_address
= rmrr
->base_address
;
247 rmrru
->end_address
= rmrr
->end_address
;
249 dmar_register_rmrr_unit(rmrru
);
254 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
256 struct acpi_dmar_reserved_memory
*rmrr
;
259 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
260 ret
= dmar_parse_dev_scope((void *)(rmrr
+ 1),
261 ((void *)rmrr
) + rmrr
->header
.length
,
262 &rmrru
->devices_cnt
, &rmrru
->devices
, rmrr
->segment
);
264 if (ret
|| (rmrru
->devices_cnt
== 0)) {
265 list_del(&rmrru
->list
);
273 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
275 struct acpi_dmar_hardware_unit
*drhd
;
276 struct acpi_dmar_reserved_memory
*rmrr
;
278 switch (header
->type
) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
280 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
281 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
283 drhd
->flags
, (unsigned long long)drhd
->address
);
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
286 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
288 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
290 (unsigned long long)rmrr
->base_address
,
291 (unsigned long long)rmrr
->end_address
);
297 * dmar_table_detect - checks to see if the platform supports DMAR devices
299 static int __init
dmar_table_detect(void)
301 acpi_status status
= AE_OK
;
303 /* if we could find DMAR table, then there are DMAR devices */
304 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
305 (struct acpi_table_header
**)&dmar_tbl
,
308 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
309 printk (KERN_WARNING PREFIX
"Unable to map DMAR\n");
310 status
= AE_NOT_FOUND
;
313 return (ACPI_SUCCESS(status
) ? 1 : 0);
317 * parse_dmar_table - parses the DMA reporting table
320 parse_dmar_table(void)
322 struct acpi_table_dmar
*dmar
;
323 struct acpi_dmar_header
*entry_header
;
327 * Do it again, earlier dmar_tbl mapping could be mapped with
332 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
336 if (dmar
->width
< PAGE_SHIFT
- 1) {
337 printk(KERN_WARNING PREFIX
"Invalid DMAR haw\n");
341 printk (KERN_INFO PREFIX
"Host address width %d\n",
344 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
345 while (((unsigned long)entry_header
) <
346 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
347 /* Avoid looping forever on bad ACPI tables */
348 if (entry_header
->length
== 0) {
349 printk(KERN_WARNING PREFIX
350 "Invalid 0-length structure\n");
355 dmar_table_print_dmar_entry(entry_header
);
357 switch (entry_header
->type
) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
359 ret
= dmar_parse_one_drhd(entry_header
);
361 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
363 ret
= dmar_parse_one_rmrr(entry_header
);
367 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n");
369 ret
= 0; /* for forward compatibility */
375 entry_header
= ((void *)entry_header
+ entry_header
->length
);
380 int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
386 for (index
= 0; index
< cnt
; index
++)
387 if (dev
== devices
[index
])
390 /* Check our parent */
391 dev
= dev
->bus
->self
;
397 struct dmar_drhd_unit
*
398 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
400 struct dmar_drhd_unit
*dmaru
= NULL
;
401 struct acpi_dmar_hardware_unit
*drhd
;
403 list_for_each_entry(dmaru
, &dmar_drhd_units
, list
) {
404 drhd
= container_of(dmaru
->hdr
,
405 struct acpi_dmar_hardware_unit
,
408 if (dmaru
->include_all
&&
409 drhd
->segment
== pci_domain_nr(dev
->bus
))
412 if (dmar_pci_device_match(dmaru
->devices
,
413 dmaru
->devices_cnt
, dev
))
420 int __init
dmar_dev_scope_init(void)
422 struct dmar_drhd_unit
*drhd
, *drhd_n
;
425 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
426 ret
= dmar_parse_dev(drhd
);
433 struct dmar_rmrr_unit
*rmrr
, *rmrr_n
;
434 list_for_each_entry_safe(rmrr
, rmrr_n
, &dmar_rmrr_units
, list
) {
435 ret
= rmrr_parse_dev(rmrr
);
446 int __init
dmar_table_init(void)
448 static int dmar_table_initialized
;
451 if (dmar_table_initialized
)
454 dmar_table_initialized
= 1;
456 ret
= parse_dmar_table();
459 printk(KERN_INFO PREFIX
"parse DMAR table failure.\n");
463 if (list_empty(&dmar_drhd_units
)) {
464 printk(KERN_INFO PREFIX
"No DMAR devices found\n");
469 if (list_empty(&dmar_rmrr_units
))
470 printk(KERN_INFO PREFIX
"No RMRR found\n");
473 #ifdef CONFIG_INTR_REMAP
474 parse_ioapics_under_ir();
479 void __init
detect_intel_iommu(void)
483 ret
= dmar_table_detect();
486 #ifdef CONFIG_INTR_REMAP
487 struct acpi_table_dmar
*dmar
;
489 * for now we will disable dma-remapping when interrupt
490 * remapping is enabled.
491 * When support for queued invalidation for IOTLB invalidation
492 * is added, we will not need this any more.
494 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
495 if (ret
&& cpu_has_x2apic
&& dmar
->flags
& 0x1)
497 "Queued invalidation will be enabled to support "
498 "x2apic and Intr-remapping.\n");
501 if (ret
&& !no_iommu
&& !iommu_detected
&& !swiotlb
&&
506 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
511 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
513 struct intel_iommu
*iommu
;
516 static int iommu_allocated
= 0;
519 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
523 iommu
->seq_id
= iommu_allocated
++;
524 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
526 iommu
->reg
= ioremap(drhd
->reg_base_addr
, VTD_PAGE_SIZE
);
528 printk(KERN_ERR
"IOMMU: can't map the region\n");
531 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
532 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
535 agaw
= iommu_calculate_agaw(iommu
);
538 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
545 /* the registers might be more than one page */
546 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
547 cap_max_fault_reg_offset(iommu
->cap
));
548 map_size
= VTD_PAGE_ALIGN(map_size
);
549 if (map_size
> VTD_PAGE_SIZE
) {
551 iommu
->reg
= ioremap(drhd
->reg_base_addr
, map_size
);
553 printk(KERN_ERR
"IOMMU: can't map the region\n");
558 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
559 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
560 (unsigned long long)drhd
->reg_base_addr
,
561 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
562 (unsigned long long)iommu
->cap
,
563 (unsigned long long)iommu
->ecap
);
565 spin_lock_init(&iommu
->register_lock
);
574 void free_iommu(struct intel_iommu
*iommu
)
580 free_dmar_iommu(iommu
);
589 * Reclaim all the submitted descriptors which have completed its work.
591 static inline void reclaim_free_desc(struct q_inval
*qi
)
593 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
) {
594 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
595 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
600 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
604 struct q_inval
*qi
= iommu
->qi
;
605 int wait_index
= (index
+ 1) % QI_LENGTH
;
607 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
610 * If IQE happens, the head points to the descriptor associated
611 * with the error. No new descriptors are fetched until the IQE
614 if (fault
& DMA_FSTS_IQE
) {
615 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
616 if ((head
>> 4) == index
) {
617 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
618 sizeof(struct qi_desc
));
619 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
620 sizeof(struct qi_desc
));
621 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
630 * Submit the queued invalidation descriptor to the remapping
631 * hardware unit and wait for its completion.
633 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
636 struct q_inval
*qi
= iommu
->qi
;
637 struct qi_desc
*hw
, wait_desc
;
638 int wait_index
, index
;
646 spin_lock_irqsave(&qi
->q_lock
, flags
);
647 while (qi
->free_cnt
< 3) {
648 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
650 spin_lock_irqsave(&qi
->q_lock
, flags
);
653 index
= qi
->free_head
;
654 wait_index
= (index
+ 1) % QI_LENGTH
;
656 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
660 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
661 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
662 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
664 hw
[wait_index
] = wait_desc
;
666 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
667 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
669 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
673 * update the HW tail register indicating the presence of
676 writel(qi
->free_head
<< 4, iommu
->reg
+ DMAR_IQT_REG
);
678 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
680 * We will leave the interrupts disabled, to prevent interrupt
681 * context to queue another cmd while a cmd is already submitted
682 * and waiting for completion on this cpu. This is to avoid
683 * a deadlock where the interrupt context can wait indefinitely
684 * for free slots in the queue.
686 rc
= qi_check_fault(iommu
, index
);
690 spin_unlock(&qi
->q_lock
);
692 spin_lock(&qi
->q_lock
);
695 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_DONE
;
697 reclaim_free_desc(qi
);
698 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
704 * Flush the global interrupt entry cache.
706 void qi_global_iec(struct intel_iommu
*iommu
)
710 desc
.low
= QI_IEC_TYPE
;
713 /* should never fail */
714 qi_submit_sync(&desc
, iommu
);
717 int qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
718 u64 type
, int non_present_entry_flush
)
722 if (non_present_entry_flush
) {
723 if (!cap_caching_mode(iommu
->cap
))
729 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
730 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
733 return qi_submit_sync(&desc
, iommu
);
736 int qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
737 unsigned int size_order
, u64 type
,
738 int non_present_entry_flush
)
745 if (non_present_entry_flush
) {
746 if (!cap_caching_mode(iommu
->cap
))
752 if (cap_write_drain(iommu
->cap
))
755 if (cap_read_drain(iommu
->cap
))
758 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
759 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
760 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
761 | QI_IOTLB_AM(size_order
);
763 return qi_submit_sync(&desc
, iommu
);
767 * Disable Queued Invalidation interface.
769 void dmar_disable_qi(struct intel_iommu
*iommu
)
773 cycles_t start_time
= get_cycles();
775 if (!ecap_qis(iommu
->ecap
))
778 spin_lock_irqsave(&iommu
->register_lock
, flags
);
780 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
781 if (!(sts
& DMA_GSTS_QIES
))
785 * Give a chance to HW to complete the pending invalidation requests.
787 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
788 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
789 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
792 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
794 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
796 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
797 !(sts
& DMA_GSTS_QIES
), sts
);
799 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
803 * Enable queued invalidation.
805 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
809 struct q_inval
*qi
= iommu
->qi
;
811 qi
->free_head
= qi
->free_tail
= 0;
812 qi
->free_cnt
= QI_LENGTH
;
814 spin_lock_irqsave(&iommu
->register_lock
, flags
);
816 /* write zero to the tail reg */
817 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
819 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
821 cmd
= iommu
->gcmd
| DMA_GCMD_QIE
;
822 iommu
->gcmd
|= DMA_GCMD_QIE
;
823 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
825 /* Make sure hardware complete it */
826 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
828 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
832 * Enable Queued Invalidation interface. This is a must to support
833 * interrupt-remapping. Also used by DMA-remapping, which replaces
834 * register based IOTLB invalidation.
836 int dmar_enable_qi(struct intel_iommu
*iommu
)
840 if (!ecap_qis(iommu
->ecap
))
844 * queued invalidation is already setup and enabled.
849 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
855 qi
->desc
= (void *)(get_zeroed_page(GFP_ATOMIC
));
862 qi
->desc_status
= kmalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
863 if (!qi
->desc_status
) {
864 free_page((unsigned long) qi
->desc
);
870 qi
->free_head
= qi
->free_tail
= 0;
871 qi
->free_cnt
= QI_LENGTH
;
873 spin_lock_init(&qi
->q_lock
);
875 __dmar_enable_qi(iommu
);
880 /* iommu interrupt handling. Most stuff are MSI-like. */
888 static const char *dma_remap_fault_reasons
[] =
891 "Present bit in root entry is clear",
892 "Present bit in context entry is clear",
893 "Invalid context entry",
894 "Access beyond MGAW",
895 "PTE Write access is not set",
896 "PTE Read access is not set",
897 "Next page table ptr is invalid",
898 "Root table address invalid",
899 "Context table ptr is invalid",
900 "non-zero reserved fields in RTP",
901 "non-zero reserved fields in CTP",
902 "non-zero reserved fields in PTE",
905 static const char *intr_remap_fault_reasons
[] =
907 "Detected reserved fields in the decoded interrupt-remapped request",
908 "Interrupt index exceeded the interrupt-remapping table size",
909 "Present field in the IRTE entry is clear",
910 "Error accessing interrupt-remapping table pointed by IRTA_REG",
911 "Detected reserved fields in the IRTE entry",
912 "Blocked a compatibility format interrupt request",
913 "Blocked an interrupt request due to source-id verification failure",
916 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
918 const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
920 if (fault_reason
>= 0x20 && (fault_reason
<= 0x20 +
921 ARRAY_SIZE(intr_remap_fault_reasons
))) {
922 *fault_type
= INTR_REMAP
;
923 return intr_remap_fault_reasons
[fault_reason
- 0x20];
924 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
925 *fault_type
= DMA_REMAP
;
926 return dma_remap_fault_reasons
[fault_reason
];
928 *fault_type
= UNKNOWN
;
933 void dmar_msi_unmask(unsigned int irq
)
935 struct intel_iommu
*iommu
= get_irq_data(irq
);
939 spin_lock_irqsave(&iommu
->register_lock
, flag
);
940 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
941 /* Read a reg to force flush the post write */
942 readl(iommu
->reg
+ DMAR_FECTL_REG
);
943 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
946 void dmar_msi_mask(unsigned int irq
)
949 struct intel_iommu
*iommu
= get_irq_data(irq
);
952 spin_lock_irqsave(&iommu
->register_lock
, flag
);
953 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
954 /* Read a reg to force flush the post write */
955 readl(iommu
->reg
+ DMAR_FECTL_REG
);
956 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
959 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
961 struct intel_iommu
*iommu
= get_irq_data(irq
);
964 spin_lock_irqsave(&iommu
->register_lock
, flag
);
965 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
966 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
967 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
968 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
971 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
973 struct intel_iommu
*iommu
= get_irq_data(irq
);
976 spin_lock_irqsave(&iommu
->register_lock
, flag
);
977 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
978 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
979 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
980 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
983 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
984 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
989 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
991 if (fault_type
== INTR_REMAP
)
992 printk(KERN_ERR
"INTR-REMAP: Request device [[%02x:%02x.%d] "
994 "INTR-REMAP:[fault reason %02d] %s\n",
995 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
996 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
997 fault_reason
, reason
);
1000 "DMAR:[%s] Request device [%02x:%02x.%d] "
1001 "fault addr %llx \n"
1002 "DMAR:[fault reason %02d] %s\n",
1003 (type
? "DMA Read" : "DMA Write"),
1004 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1005 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1009 #define PRIMARY_FAULT_REG_LEN (16)
1010 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1012 struct intel_iommu
*iommu
= dev_id
;
1013 int reg
, fault_index
;
1017 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1018 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1020 printk(KERN_ERR
"DRHD: handling fault status reg %x\n",
1023 /* TBD: ignore advanced fault log currently */
1024 if (!(fault_status
& DMA_FSTS_PPF
))
1027 fault_index
= dma_fsts_fault_record_index(fault_status
);
1028 reg
= cap_fault_reg_offset(iommu
->cap
);
1036 /* highest 32 bits */
1037 data
= readl(iommu
->reg
+ reg
+
1038 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1039 if (!(data
& DMA_FRCD_F
))
1042 fault_reason
= dma_frcd_fault_reason(data
);
1043 type
= dma_frcd_type(data
);
1045 data
= readl(iommu
->reg
+ reg
+
1046 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1047 source_id
= dma_frcd_source_id(data
);
1049 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1050 fault_index
* PRIMARY_FAULT_REG_LEN
);
1051 guest_addr
= dma_frcd_page_addr(guest_addr
);
1052 /* clear the fault */
1053 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1054 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1056 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1058 dmar_fault_do_one(iommu
, type
, fault_reason
,
1059 source_id
, guest_addr
);
1062 if (fault_index
> cap_num_fault_regs(iommu
->cap
))
1064 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1067 /* clear all the other faults */
1068 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1069 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1071 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1075 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1080 * Check if the fault interrupt is already initialized.
1087 printk(KERN_ERR
"IOMMU: no free vectors\n");
1091 set_irq_data(irq
, iommu
);
1094 ret
= arch_setup_dmar_msi(irq
);
1096 set_irq_data(irq
, NULL
);
1102 ret
= request_irq(irq
, dmar_fault
, 0, iommu
->name
, iommu
);
1104 printk(KERN_ERR
"IOMMU: can't request irq\n");
1108 int __init
enable_drhd_fault_handling(void)
1110 struct dmar_drhd_unit
*drhd
;
1113 * Enable fault control interrupt.
1115 for_each_drhd_unit(drhd
) {
1117 struct intel_iommu
*iommu
= drhd
->iommu
;
1118 ret
= dmar_set_interrupt(iommu
);
1121 printk(KERN_ERR
"DRHD %Lx: failed to enable fault, "
1122 " interrupt, ret %d\n",
1123 (unsigned long long)drhd
->reg_base_addr
, ret
);
1132 * Re-enable Queued Invalidation interface.
1134 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1136 if (!ecap_qis(iommu
->ecap
))
1143 * First disable queued invalidation.
1145 dmar_disable_qi(iommu
);
1147 * Then enable queued invalidation again. Since there is no pending
1148 * invalidation requests now, it's safe to re-enable queued
1151 __dmar_enable_qi(iommu
);