2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
38 #define PREFIX "DMAR:"
40 /* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
44 LIST_HEAD(dmar_drhd_units
);
46 static struct acpi_table_header
* __initdata dmar_tbl
;
47 static acpi_size dmar_tbl_size
;
49 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 if (drhd
->include_all
)
56 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
58 list_add(&drhd
->list
, &dmar_drhd_units
);
61 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
62 struct pci_dev
**dev
, u16 segment
)
65 struct pci_dev
*pdev
= NULL
;
66 struct acpi_dmar_pci_path
*path
;
69 bus
= pci_find_bus(segment
, scope
->bus
);
70 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
71 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
72 / sizeof(struct acpi_dmar_pci_path
);
78 * Some BIOSes list non-exist devices in DMAR table, just
83 PREFIX
"Device scope bus [%d] not found\n",
87 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->dev
, path
->fn
));
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment
, bus
->number
, path
->dev
, path
->fn
);
96 bus
= pdev
->subordinate
;
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment
, scope
->bus
, path
->dev
, path
->fn
);
105 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
106 pdev
->subordinate
) || (scope
->entry_type
== \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
118 static int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
119 struct pci_dev
***devices
, u16 segment
)
121 struct acpi_dmar_device_scope
*scope
;
127 while (start
< end
) {
129 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
130 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start
+= scope
->length
;
140 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
146 while (start
< end
) {
148 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
149 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
150 ret
= dmar_parse_one_dev_scope(scope
,
151 &(*devices
)[index
], segment
);
158 start
+= scope
->length
;
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
170 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
172 struct acpi_dmar_hardware_unit
*drhd
;
173 struct dmar_drhd_unit
*dmaru
;
176 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
177 if (!drhd
->address
) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR
),
182 dmi_get_system_info(DMI_BIOS_VERSION
),
183 dmi_get_system_info(DMI_PRODUCT_VERSION
));
186 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
191 dmaru
->reg_base_addr
= drhd
->address
;
192 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
194 ret
= alloc_iommu(dmaru
);
199 dmar_register_drhd_unit(dmaru
);
203 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
205 struct acpi_dmar_hardware_unit
*drhd
;
208 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
210 if (dmaru
->include_all
)
213 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
214 ((void *)drhd
) + drhd
->header
.length
,
215 &dmaru
->devices_cnt
, &dmaru
->devices
,
218 list_del(&dmaru
->list
);
225 LIST_HEAD(dmar_rmrr_units
);
227 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
229 list_add(&rmrr
->list
, &dmar_rmrr_units
);
234 dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
236 struct acpi_dmar_reserved_memory
*rmrr
;
237 struct dmar_rmrr_unit
*rmrru
;
239 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
244 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
245 rmrru
->base_address
= rmrr
->base_address
;
246 rmrru
->end_address
= rmrr
->end_address
;
248 dmar_register_rmrr_unit(rmrru
);
253 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
255 struct acpi_dmar_reserved_memory
*rmrr
;
258 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
259 ret
= dmar_parse_dev_scope((void *)(rmrr
+ 1),
260 ((void *)rmrr
) + rmrr
->header
.length
,
261 &rmrru
->devices_cnt
, &rmrru
->devices
, rmrr
->segment
);
263 if (ret
|| (rmrru
->devices_cnt
== 0)) {
264 list_del(&rmrru
->list
);
272 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
274 struct acpi_dmar_hardware_unit
*drhd
;
275 struct acpi_dmar_reserved_memory
*rmrr
;
277 switch (header
->type
) {
278 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
279 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
280 printk (KERN_INFO PREFIX
281 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
282 drhd
->flags
, (unsigned long long)drhd
->address
);
284 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
285 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
287 printk (KERN_INFO PREFIX
288 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
289 (unsigned long long)rmrr
->base_address
,
290 (unsigned long long)rmrr
->end_address
);
296 * dmar_table_detect - checks to see if the platform supports DMAR devices
298 static int __init
dmar_table_detect(void)
300 acpi_status status
= AE_OK
;
302 /* if we could find DMAR table, then there are DMAR devices */
303 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
304 (struct acpi_table_header
**)&dmar_tbl
,
307 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
308 printk (KERN_WARNING PREFIX
"Unable to map DMAR\n");
309 status
= AE_NOT_FOUND
;
312 return (ACPI_SUCCESS(status
) ? 1 : 0);
316 * parse_dmar_table - parses the DMA reporting table
319 parse_dmar_table(void)
321 struct acpi_table_dmar
*dmar
;
322 struct acpi_dmar_header
*entry_header
;
326 * Do it again, earlier dmar_tbl mapping could be mapped with
331 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
335 if (dmar
->width
< PAGE_SHIFT
- 1) {
336 printk(KERN_WARNING PREFIX
"Invalid DMAR haw\n");
340 printk (KERN_INFO PREFIX
"Host address width %d\n",
343 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
344 while (((unsigned long)entry_header
) <
345 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
346 /* Avoid looping forever on bad ACPI tables */
347 if (entry_header
->length
== 0) {
348 printk(KERN_WARNING PREFIX
349 "Invalid 0-length structure\n");
354 dmar_table_print_dmar_entry(entry_header
);
356 switch (entry_header
->type
) {
357 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
358 ret
= dmar_parse_one_drhd(entry_header
);
360 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
362 ret
= dmar_parse_one_rmrr(entry_header
);
366 printk(KERN_WARNING PREFIX
367 "Unknown DMAR structure type\n");
368 ret
= 0; /* for forward compatibility */
374 entry_header
= ((void *)entry_header
+ entry_header
->length
);
379 int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
385 for (index
= 0; index
< cnt
; index
++)
386 if (dev
== devices
[index
])
389 /* Check our parent */
390 dev
= dev
->bus
->self
;
396 struct dmar_drhd_unit
*
397 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
399 struct dmar_drhd_unit
*dmaru
= NULL
;
400 struct acpi_dmar_hardware_unit
*drhd
;
402 list_for_each_entry(dmaru
, &dmar_drhd_units
, list
) {
403 drhd
= container_of(dmaru
->hdr
,
404 struct acpi_dmar_hardware_unit
,
407 if (dmaru
->include_all
&&
408 drhd
->segment
== pci_domain_nr(dev
->bus
))
411 if (dmar_pci_device_match(dmaru
->devices
,
412 dmaru
->devices_cnt
, dev
))
419 int __init
dmar_dev_scope_init(void)
421 struct dmar_drhd_unit
*drhd
, *drhd_n
;
424 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
425 ret
= dmar_parse_dev(drhd
);
432 struct dmar_rmrr_unit
*rmrr
, *rmrr_n
;
433 list_for_each_entry_safe(rmrr
, rmrr_n
, &dmar_rmrr_units
, list
) {
434 ret
= rmrr_parse_dev(rmrr
);
445 int __init
dmar_table_init(void)
447 static int dmar_table_initialized
;
450 if (dmar_table_initialized
)
453 dmar_table_initialized
= 1;
455 ret
= parse_dmar_table();
458 printk(KERN_INFO PREFIX
"parse DMAR table failure.\n");
462 if (list_empty(&dmar_drhd_units
)) {
463 printk(KERN_INFO PREFIX
"No DMAR devices found\n");
468 if (list_empty(&dmar_rmrr_units
))
469 printk(KERN_INFO PREFIX
"No RMRR found\n");
472 #ifdef CONFIG_INTR_REMAP
473 parse_ioapics_under_ir();
478 void __init
detect_intel_iommu(void)
482 ret
= dmar_table_detect();
485 #ifdef CONFIG_INTR_REMAP
486 struct acpi_table_dmar
*dmar
;
488 * for now we will disable dma-remapping when interrupt
489 * remapping is enabled.
490 * When support for queued invalidation for IOTLB invalidation
491 * is added, we will not need this any more.
493 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
494 if (ret
&& cpu_has_x2apic
&& dmar
->flags
& 0x1)
496 "Queued invalidation will be enabled to support "
497 "x2apic and Intr-remapping.\n");
500 if (ret
&& !no_iommu
&& !iommu_detected
&& !swiotlb
&&
505 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
510 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
512 struct intel_iommu
*iommu
;
515 static int iommu_allocated
= 0;
518 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
522 iommu
->seq_id
= iommu_allocated
++;
523 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
525 iommu
->reg
= ioremap(drhd
->reg_base_addr
, VTD_PAGE_SIZE
);
527 printk(KERN_ERR
"IOMMU: can't map the region\n");
530 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
531 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
534 agaw
= iommu_calculate_agaw(iommu
);
537 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
544 /* the registers might be more than one page */
545 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
546 cap_max_fault_reg_offset(iommu
->cap
));
547 map_size
= VTD_PAGE_ALIGN(map_size
);
548 if (map_size
> VTD_PAGE_SIZE
) {
550 iommu
->reg
= ioremap(drhd
->reg_base_addr
, map_size
);
552 printk(KERN_ERR
"IOMMU: can't map the region\n");
557 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
558 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
559 (unsigned long long)drhd
->reg_base_addr
,
560 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
561 (unsigned long long)iommu
->cap
,
562 (unsigned long long)iommu
->ecap
);
564 spin_lock_init(&iommu
->register_lock
);
573 void free_iommu(struct intel_iommu
*iommu
)
579 free_dmar_iommu(iommu
);
588 * Reclaim all the submitted descriptors which have completed its work.
590 static inline void reclaim_free_desc(struct q_inval
*qi
)
592 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
) {
593 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
594 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
599 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
603 struct q_inval
*qi
= iommu
->qi
;
604 int wait_index
= (index
+ 1) % QI_LENGTH
;
606 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
609 * If IQE happens, the head points to the descriptor associated
610 * with the error. No new descriptors are fetched until the IQE
613 if (fault
& DMA_FSTS_IQE
) {
614 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
615 if ((head
>> 4) == index
) {
616 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
617 sizeof(struct qi_desc
));
618 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
619 sizeof(struct qi_desc
));
620 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
629 * Submit the queued invalidation descriptor to the remapping
630 * hardware unit and wait for its completion.
632 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
635 struct q_inval
*qi
= iommu
->qi
;
636 struct qi_desc
*hw
, wait_desc
;
637 int wait_index
, index
;
645 spin_lock_irqsave(&qi
->q_lock
, flags
);
646 while (qi
->free_cnt
< 3) {
647 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
649 spin_lock_irqsave(&qi
->q_lock
, flags
);
652 index
= qi
->free_head
;
653 wait_index
= (index
+ 1) % QI_LENGTH
;
655 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
659 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
660 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
661 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
663 hw
[wait_index
] = wait_desc
;
665 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
666 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
668 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
672 * update the HW tail register indicating the presence of
675 writel(qi
->free_head
<< 4, iommu
->reg
+ DMAR_IQT_REG
);
677 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
679 * We will leave the interrupts disabled, to prevent interrupt
680 * context to queue another cmd while a cmd is already submitted
681 * and waiting for completion on this cpu. This is to avoid
682 * a deadlock where the interrupt context can wait indefinitely
683 * for free slots in the queue.
685 rc
= qi_check_fault(iommu
, index
);
689 spin_unlock(&qi
->q_lock
);
691 spin_lock(&qi
->q_lock
);
694 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_DONE
;
696 reclaim_free_desc(qi
);
697 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
703 * Flush the global interrupt entry cache.
705 void qi_global_iec(struct intel_iommu
*iommu
)
709 desc
.low
= QI_IEC_TYPE
;
712 /* should never fail */
713 qi_submit_sync(&desc
, iommu
);
716 int qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
717 u64 type
, int non_present_entry_flush
)
721 if (non_present_entry_flush
) {
722 if (!cap_caching_mode(iommu
->cap
))
728 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
729 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
732 return qi_submit_sync(&desc
, iommu
);
735 int qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
736 unsigned int size_order
, u64 type
,
737 int non_present_entry_flush
)
744 if (non_present_entry_flush
) {
745 if (!cap_caching_mode(iommu
->cap
))
751 if (cap_write_drain(iommu
->cap
))
754 if (cap_read_drain(iommu
->cap
))
757 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
758 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
759 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
760 | QI_IOTLB_AM(size_order
);
762 return qi_submit_sync(&desc
, iommu
);
766 * Disable Queued Invalidation interface.
768 void dmar_disable_qi(struct intel_iommu
*iommu
)
772 cycles_t start_time
= get_cycles();
774 if (!ecap_qis(iommu
->ecap
))
777 spin_lock_irqsave(&iommu
->register_lock
, flags
);
779 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
780 if (!(sts
& DMA_GSTS_QIES
))
784 * Give a chance to HW to complete the pending invalidation requests.
786 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
787 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
788 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
791 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
793 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
795 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
796 !(sts
& DMA_GSTS_QIES
), sts
);
798 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
802 * Enable Queued Invalidation interface. This is a must to support
803 * interrupt-remapping. Also used by DMA-remapping, which replaces
804 * register based IOTLB invalidation.
806 int dmar_enable_qi(struct intel_iommu
*iommu
)
812 if (!ecap_qis(iommu
->ecap
))
816 * queued invalidation is already setup and enabled.
821 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
827 qi
->desc
= (void *)(get_zeroed_page(GFP_ATOMIC
));
834 qi
->desc_status
= kmalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
835 if (!qi
->desc_status
) {
836 free_page((unsigned long) qi
->desc
);
842 qi
->free_head
= qi
->free_tail
= 0;
843 qi
->free_cnt
= QI_LENGTH
;
845 spin_lock_init(&qi
->q_lock
);
847 spin_lock_irqsave(&iommu
->register_lock
, flags
);
848 /* write zero to the tail reg */
849 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
851 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
853 cmd
= iommu
->gcmd
| DMA_GCMD_QIE
;
854 iommu
->gcmd
|= DMA_GCMD_QIE
;
855 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
857 /* Make sure hardware complete it */
858 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
859 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
864 /* iommu interrupt handling. Most stuff are MSI-like. */
872 static const char *dma_remap_fault_reasons
[] =
875 "Present bit in root entry is clear",
876 "Present bit in context entry is clear",
877 "Invalid context entry",
878 "Access beyond MGAW",
879 "PTE Write access is not set",
880 "PTE Read access is not set",
881 "Next page table ptr is invalid",
882 "Root table address invalid",
883 "Context table ptr is invalid",
884 "non-zero reserved fields in RTP",
885 "non-zero reserved fields in CTP",
886 "non-zero reserved fields in PTE",
889 static const char *intr_remap_fault_reasons
[] =
891 "Detected reserved fields in the decoded interrupt-remapped request",
892 "Interrupt index exceeded the interrupt-remapping table size",
893 "Present field in the IRTE entry is clear",
894 "Error accessing interrupt-remapping table pointed by IRTA_REG",
895 "Detected reserved fields in the IRTE entry",
896 "Blocked a compatibility format interrupt request",
897 "Blocked an interrupt request due to source-id verification failure",
900 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
902 const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
904 if (fault_reason
>= 0x20 && (fault_reason
<= 0x20 +
905 ARRAY_SIZE(intr_remap_fault_reasons
))) {
906 *fault_type
= INTR_REMAP
;
907 return intr_remap_fault_reasons
[fault_reason
- 0x20];
908 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
909 *fault_type
= DMA_REMAP
;
910 return dma_remap_fault_reasons
[fault_reason
];
912 *fault_type
= UNKNOWN
;
917 void dmar_msi_unmask(unsigned int irq
)
919 struct intel_iommu
*iommu
= get_irq_data(irq
);
923 spin_lock_irqsave(&iommu
->register_lock
, flag
);
924 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
925 /* Read a reg to force flush the post write */
926 readl(iommu
->reg
+ DMAR_FECTL_REG
);
927 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
930 void dmar_msi_mask(unsigned int irq
)
933 struct intel_iommu
*iommu
= get_irq_data(irq
);
936 spin_lock_irqsave(&iommu
->register_lock
, flag
);
937 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
938 /* Read a reg to force flush the post write */
939 readl(iommu
->reg
+ DMAR_FECTL_REG
);
940 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
943 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
945 struct intel_iommu
*iommu
= get_irq_data(irq
);
948 spin_lock_irqsave(&iommu
->register_lock
, flag
);
949 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
950 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
951 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
952 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
955 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
957 struct intel_iommu
*iommu
= get_irq_data(irq
);
960 spin_lock_irqsave(&iommu
->register_lock
, flag
);
961 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
962 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
963 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
964 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
967 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
968 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
973 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
975 if (fault_type
== INTR_REMAP
)
976 printk(KERN_ERR
"INTR-REMAP: Request device [[%02x:%02x.%d] "
978 "INTR-REMAP:[fault reason %02d] %s\n",
979 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
980 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
981 fault_reason
, reason
);
984 "DMAR:[%s] Request device [%02x:%02x.%d] "
986 "DMAR:[fault reason %02d] %s\n",
987 (type
? "DMA Read" : "DMA Write"),
988 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
989 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
993 #define PRIMARY_FAULT_REG_LEN (16)
994 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
996 struct intel_iommu
*iommu
= dev_id
;
997 int reg
, fault_index
;
1001 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1002 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1004 printk(KERN_ERR
"DRHD: handling fault status reg %x\n",
1007 /* TBD: ignore advanced fault log currently */
1008 if (!(fault_status
& DMA_FSTS_PPF
))
1011 fault_index
= dma_fsts_fault_record_index(fault_status
);
1012 reg
= cap_fault_reg_offset(iommu
->cap
);
1020 /* highest 32 bits */
1021 data
= readl(iommu
->reg
+ reg
+
1022 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1023 if (!(data
& DMA_FRCD_F
))
1026 fault_reason
= dma_frcd_fault_reason(data
);
1027 type
= dma_frcd_type(data
);
1029 data
= readl(iommu
->reg
+ reg
+
1030 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1031 source_id
= dma_frcd_source_id(data
);
1033 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1034 fault_index
* PRIMARY_FAULT_REG_LEN
);
1035 guest_addr
= dma_frcd_page_addr(guest_addr
);
1036 /* clear the fault */
1037 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1038 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1040 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1042 dmar_fault_do_one(iommu
, type
, fault_reason
,
1043 source_id
, guest_addr
);
1046 if (fault_index
> cap_num_fault_regs(iommu
->cap
))
1048 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1051 /* clear all the other faults */
1052 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1053 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1055 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1059 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1064 * Check if the fault interrupt is already initialized.
1071 printk(KERN_ERR
"IOMMU: no free vectors\n");
1075 set_irq_data(irq
, iommu
);
1078 ret
= arch_setup_dmar_msi(irq
);
1080 set_irq_data(irq
, NULL
);
1086 ret
= request_irq(irq
, dmar_fault
, 0, iommu
->name
, iommu
);
1088 printk(KERN_ERR
"IOMMU: can't request irq\n");
1092 int __init
enable_drhd_fault_handling(void)
1094 struct dmar_drhd_unit
*drhd
;
1097 * Enable fault control interrupt.
1099 for_each_drhd_unit(drhd
) {
1101 struct intel_iommu
*iommu
= drhd
->iommu
;
1102 ret
= dmar_set_interrupt(iommu
);
1105 printk(KERN_ERR
"DRHD %Lx: failed to enable fault, "
1106 " interrupt, ret %d\n",
1107 (unsigned long long)drhd
->reg_base_addr
, ret
);