2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
58 DECLARE_RWSEM(dmar_global_lock
);
59 LIST_HEAD(dmar_drhd_units
);
61 struct acpi_table_header
* __initdata dmar_tbl
;
62 static acpi_size dmar_tbl_size
;
63 static int dmar_dev_scope_status
= 1;
65 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
66 static void free_iommu(struct intel_iommu
*iommu
);
68 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
74 if (drhd
->include_all
)
75 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
77 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
80 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
82 struct acpi_dmar_device_scope
*scope
;
87 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ACPI
||
88 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
89 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
91 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
92 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
93 pr_warn("Unsupported device scope\n");
95 start
+= scope
->length
;
100 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
103 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
106 struct device
*tmp_dev
;
108 if (*devices
&& *cnt
) {
109 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
118 /* Optimize out kzalloc()/kfree() for normal cases */
119 static char dmar_pci_notify_info_buf
[64];
121 static struct dmar_pci_notify_info
*
122 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
127 struct dmar_pci_notify_info
*info
;
129 BUG_ON(dev
->is_virtfn
);
131 /* Only generate path[] for device addition event */
132 if (event
== BUS_NOTIFY_ADD_DEVICE
)
133 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
136 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
137 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
138 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
140 info
= kzalloc(size
, GFP_KERNEL
);
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev
));
144 if (dmar_dev_scope_status
== 0)
145 dmar_dev_scope_status
= -ENOMEM
;
152 info
->seg
= pci_domain_nr(dev
->bus
);
154 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
155 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
) {
157 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
158 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
159 if (pci_is_root_bus(tmp
->bus
))
160 info
->bus
= tmp
->bus
->number
;
167 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
169 if ((void *)info
!= dmar_pci_notify_info_buf
)
173 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
174 struct acpi_dmar_pci_path
*path
, int count
)
178 if (info
->bus
!= bus
)
180 if (info
->level
!= count
)
183 for (i
= 0; i
< count
; i
++) {
184 if (path
[i
].device
!= info
->path
[i
].device
||
185 path
[i
].function
!= info
->path
[i
].function
)
192 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
193 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
194 void *start
, void*end
, u16 segment
,
195 struct dmar_dev_scope
*devices
,
199 struct device
*tmp
, *dev
= &info
->dev
->dev
;
200 struct acpi_dmar_device_scope
*scope
;
201 struct acpi_dmar_pci_path
*path
;
203 if (segment
!= info
->seg
)
206 for (; start
< end
; start
+= scope
->length
) {
208 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
209 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
212 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
213 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
214 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
217 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
) ^
218 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
)) {
219 pr_warn("Device scope type does not match for %s\n",
220 pci_name(info
->dev
));
224 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
226 devices
[i
].bus
= info
->dev
->bus
->number
;
227 devices
[i
].devfn
= info
->dev
->devfn
;
228 rcu_assign_pointer(devices
[i
].dev
,
232 BUG_ON(i
>= devices_cnt
);
238 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
239 struct dmar_dev_scope
*devices
, int count
)
244 if (info
->seg
!= segment
)
247 for_each_active_dev_scope(devices
, count
, index
, tmp
)
248 if (tmp
== &info
->dev
->dev
) {
249 rcu_assign_pointer(devices
[index
].dev
, NULL
);
258 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
261 struct dmar_drhd_unit
*dmaru
;
262 struct acpi_dmar_hardware_unit
*drhd
;
264 for_each_drhd_unit(dmaru
) {
265 if (dmaru
->include_all
)
268 drhd
= container_of(dmaru
->hdr
,
269 struct acpi_dmar_hardware_unit
, header
);
270 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
271 ((void *)drhd
) + drhd
->header
.length
,
273 dmaru
->devices
, dmaru
->devices_cnt
);
278 ret
= dmar_iommu_notify_scope_dev(info
);
279 if (ret
< 0 && dmar_dev_scope_status
== 0)
280 dmar_dev_scope_status
= ret
;
285 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
287 struct dmar_drhd_unit
*dmaru
;
289 for_each_drhd_unit(dmaru
)
290 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
291 dmaru
->devices
, dmaru
->devices_cnt
))
293 dmar_iommu_notify_scope_dev(info
);
296 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
297 unsigned long action
, void *data
)
299 struct pci_dev
*pdev
= to_pci_dev(data
);
300 struct dmar_pci_notify_info
*info
;
302 /* Only care about add/remove events for physical functions */
305 if (action
!= BUS_NOTIFY_ADD_DEVICE
&& action
!= BUS_NOTIFY_DEL_DEVICE
)
308 info
= dmar_alloc_pci_notify_info(pdev
, action
);
312 down_write(&dmar_global_lock
);
313 if (action
== BUS_NOTIFY_ADD_DEVICE
)
314 dmar_pci_bus_add_dev(info
);
315 else if (action
== BUS_NOTIFY_DEL_DEVICE
)
316 dmar_pci_bus_del_dev(info
);
317 up_write(&dmar_global_lock
);
319 dmar_free_pci_notify_info(info
);
324 static struct notifier_block dmar_pci_bus_nb
= {
325 .notifier_call
= dmar_pci_bus_notifier
,
330 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
331 * structure which uniquely represent one DMA remapping hardware unit
332 * present in the platform
335 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
337 struct acpi_dmar_hardware_unit
*drhd
;
338 struct dmar_drhd_unit
*dmaru
;
341 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
342 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
347 dmaru
->reg_base_addr
= drhd
->address
;
348 dmaru
->segment
= drhd
->segment
;
349 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
350 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
351 ((void *)drhd
) + drhd
->header
.length
,
352 &dmaru
->devices_cnt
);
353 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
358 ret
= alloc_iommu(dmaru
);
360 dmar_free_dev_scope(&dmaru
->devices
,
361 &dmaru
->devices_cnt
);
365 dmar_register_drhd_unit(dmaru
);
369 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
371 if (dmaru
->devices
&& dmaru
->devices_cnt
)
372 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
374 free_iommu(dmaru
->iommu
);
378 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
)
380 struct acpi_dmar_andd
*andd
= (void *)header
;
382 /* Check for NUL termination within the designated length */
383 if (strnlen(andd
->object_name
, header
->length
- 8) == header
->length
- 8) {
384 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
385 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
386 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
387 dmi_get_system_info(DMI_BIOS_VENDOR
),
388 dmi_get_system_info(DMI_BIOS_VERSION
),
389 dmi_get_system_info(DMI_PRODUCT_VERSION
));
392 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
398 #ifdef CONFIG_ACPI_NUMA
400 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
402 struct acpi_dmar_rhsa
*rhsa
;
403 struct dmar_drhd_unit
*drhd
;
405 rhsa
= (struct acpi_dmar_rhsa
*)header
;
406 for_each_drhd_unit(drhd
) {
407 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
408 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
410 if (!node_online(node
))
412 drhd
->iommu
->node
= node
;
417 1, TAINT_FIRMWARE_WORKAROUND
,
418 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
419 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
421 dmi_get_system_info(DMI_BIOS_VENDOR
),
422 dmi_get_system_info(DMI_BIOS_VERSION
),
423 dmi_get_system_info(DMI_PRODUCT_VERSION
));
430 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
432 struct acpi_dmar_hardware_unit
*drhd
;
433 struct acpi_dmar_reserved_memory
*rmrr
;
434 struct acpi_dmar_atsr
*atsr
;
435 struct acpi_dmar_rhsa
*rhsa
;
437 switch (header
->type
) {
438 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
439 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
441 pr_info("DRHD base: %#016Lx flags: %#x\n",
442 (unsigned long long)drhd
->address
, drhd
->flags
);
444 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
445 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
447 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
448 (unsigned long long)rmrr
->base_address
,
449 (unsigned long long)rmrr
->end_address
);
451 case ACPI_DMAR_TYPE_ATSR
:
452 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
453 pr_info("ATSR flags: %#x\n", atsr
->flags
);
455 case ACPI_DMAR_HARDWARE_AFFINITY
:
456 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
457 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
458 (unsigned long long)rhsa
->base_address
,
459 rhsa
->proximity_domain
);
461 case ACPI_DMAR_TYPE_ANDD
:
462 /* We don't print this here because we need to sanity-check
463 it first. So print it in dmar_parse_one_andd() instead. */
469 * dmar_table_detect - checks to see if the platform supports DMAR devices
471 static int __init
dmar_table_detect(void)
473 acpi_status status
= AE_OK
;
475 /* if we could find DMAR table, then there are DMAR devices */
476 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
477 (struct acpi_table_header
**)&dmar_tbl
,
480 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
481 pr_warn("Unable to map DMAR\n");
482 status
= AE_NOT_FOUND
;
485 return (ACPI_SUCCESS(status
) ? 1 : 0);
489 * parse_dmar_table - parses the DMA reporting table
492 parse_dmar_table(void)
494 struct acpi_table_dmar
*dmar
;
495 struct acpi_dmar_header
*entry_header
;
500 * Do it again, earlier dmar_tbl mapping could be mapped with
506 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
507 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
509 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
511 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
515 if (dmar
->width
< PAGE_SHIFT
- 1) {
516 pr_warn("Invalid DMAR haw\n");
520 pr_info("Host address width %d\n", dmar
->width
+ 1);
522 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
523 while (((unsigned long)entry_header
) <
524 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
525 /* Avoid looping forever on bad ACPI tables */
526 if (entry_header
->length
== 0) {
527 pr_warn("Invalid 0-length structure\n");
532 dmar_table_print_dmar_entry(entry_header
);
534 switch (entry_header
->type
) {
535 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
537 ret
= dmar_parse_one_drhd(entry_header
);
539 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
540 ret
= dmar_parse_one_rmrr(entry_header
);
542 case ACPI_DMAR_TYPE_ATSR
:
543 ret
= dmar_parse_one_atsr(entry_header
);
545 case ACPI_DMAR_HARDWARE_AFFINITY
:
546 #ifdef CONFIG_ACPI_NUMA
547 ret
= dmar_parse_one_rhsa(entry_header
);
550 case ACPI_DMAR_TYPE_ANDD
:
551 ret
= dmar_parse_one_andd(entry_header
);
554 pr_warn("Unknown DMAR structure type %d\n",
556 ret
= 0; /* for forward compatibility */
562 entry_header
= ((void *)entry_header
+ entry_header
->length
);
565 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
569 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
570 int cnt
, struct pci_dev
*dev
)
576 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
577 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
580 /* Check our parent */
581 dev
= dev
->bus
->self
;
587 struct dmar_drhd_unit
*
588 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
590 struct dmar_drhd_unit
*dmaru
;
591 struct acpi_dmar_hardware_unit
*drhd
;
593 dev
= pci_physfn(dev
);
596 for_each_drhd_unit(dmaru
) {
597 drhd
= container_of(dmaru
->hdr
,
598 struct acpi_dmar_hardware_unit
,
601 if (dmaru
->include_all
&&
602 drhd
->segment
== pci_domain_nr(dev
->bus
))
605 if (dmar_pci_device_match(dmaru
->devices
,
606 dmaru
->devices_cnt
, dev
))
616 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
617 struct acpi_device
*adev
)
619 struct dmar_drhd_unit
*dmaru
;
620 struct acpi_dmar_hardware_unit
*drhd
;
621 struct acpi_dmar_device_scope
*scope
;
624 struct acpi_dmar_pci_path
*path
;
626 for_each_drhd_unit(dmaru
) {
627 drhd
= container_of(dmaru
->hdr
,
628 struct acpi_dmar_hardware_unit
,
631 for (scope
= (void *)(drhd
+ 1);
632 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
633 scope
= ((void *)scope
) + scope
->length
) {
634 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ACPI
)
636 if (scope
->enumeration_id
!= device_number
)
639 path
= (void *)(scope
+ 1);
640 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
641 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
642 scope
->bus
, path
->device
, path
->function
);
643 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
645 dmaru
->devices
[i
].bus
= scope
->bus
;
646 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
648 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
649 get_device(&adev
->dev
));
652 BUG_ON(i
>= dmaru
->devices_cnt
);
655 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
656 device_number
, dev_name(&adev
->dev
));
659 static int __init
dmar_acpi_dev_scope_init(void)
661 struct acpi_dmar_andd
*andd
;
663 if (dmar_tbl
== NULL
)
666 for (andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
667 ((unsigned long)andd
) < ((unsigned long)dmar_tbl
) + dmar_tbl
->length
;
668 andd
= ((void *)andd
) + andd
->header
.length
) {
669 if (andd
->header
.type
== ACPI_DMAR_TYPE_ANDD
) {
671 struct acpi_device
*adev
;
673 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
676 pr_err("Failed to find handle for ACPI object %s\n",
680 if (acpi_bus_get_device(h
, &adev
)) {
681 pr_err("Failed to get device for ACPI object %s\n",
685 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
691 int __init
dmar_dev_scope_init(void)
693 struct pci_dev
*dev
= NULL
;
694 struct dmar_pci_notify_info
*info
;
696 if (dmar_dev_scope_status
!= 1)
697 return dmar_dev_scope_status
;
699 if (list_empty(&dmar_drhd_units
)) {
700 dmar_dev_scope_status
= -ENODEV
;
702 dmar_dev_scope_status
= 0;
704 dmar_acpi_dev_scope_init();
706 for_each_pci_dev(dev
) {
710 info
= dmar_alloc_pci_notify_info(dev
,
711 BUS_NOTIFY_ADD_DEVICE
);
713 return dmar_dev_scope_status
;
715 dmar_pci_bus_add_dev(info
);
716 dmar_free_pci_notify_info(info
);
720 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
723 return dmar_dev_scope_status
;
727 int __init
dmar_table_init(void)
729 static int dmar_table_initialized
;
732 if (dmar_table_initialized
== 0) {
733 ret
= parse_dmar_table();
736 pr_info("parse DMAR table failure.\n");
737 } else if (list_empty(&dmar_drhd_units
)) {
738 pr_info("No DMAR devices found\n");
743 dmar_table_initialized
= ret
;
745 dmar_table_initialized
= 1;
748 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
751 static void warn_invalid_dmar(u64 addr
, const char *message
)
754 1, TAINT_FIRMWARE_WORKAROUND
,
755 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
756 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
758 dmi_get_system_info(DMI_BIOS_VENDOR
),
759 dmi_get_system_info(DMI_BIOS_VERSION
),
760 dmi_get_system_info(DMI_PRODUCT_VERSION
));
763 static int __init
check_zero_address(void)
765 struct acpi_table_dmar
*dmar
;
766 struct acpi_dmar_header
*entry_header
;
767 struct acpi_dmar_hardware_unit
*drhd
;
769 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
770 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
772 while (((unsigned long)entry_header
) <
773 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
774 /* Avoid looping forever on bad ACPI tables */
775 if (entry_header
->length
== 0) {
776 pr_warn("Invalid 0-length structure\n");
780 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
784 drhd
= (void *)entry_header
;
785 if (!drhd
->address
) {
786 warn_invalid_dmar(0, "");
790 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
792 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
795 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
796 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
797 early_iounmap(addr
, VTD_PAGE_SIZE
);
798 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
799 warn_invalid_dmar(drhd
->address
,
800 " returns all ones");
805 entry_header
= ((void *)entry_header
+ entry_header
->length
);
813 int __init
detect_intel_iommu(void)
817 down_write(&dmar_global_lock
);
818 ret
= dmar_table_detect();
820 ret
= check_zero_address();
822 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
824 /* Make sure ACS will be enabled */
830 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
833 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
835 up_write(&dmar_global_lock
);
837 return ret
? 1 : -ENODEV
;
841 static void unmap_iommu(struct intel_iommu
*iommu
)
844 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
848 * map_iommu: map the iommu's registers
849 * @iommu: the iommu to map
850 * @phys_addr: the physical address of the base resgister
852 * Memory map the iommu's registers. Start w/ a single page, and
853 * possibly expand if that turns out to be insufficent.
855 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
859 iommu
->reg_phys
= phys_addr
;
860 iommu
->reg_size
= VTD_PAGE_SIZE
;
862 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
863 pr_err("IOMMU: can't reserve memory\n");
868 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
870 pr_err("IOMMU: can't map the region\n");
875 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
876 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
878 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
880 warn_invalid_dmar(phys_addr
, " returns all ones");
884 /* the registers might be more than one page */
885 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
886 cap_max_fault_reg_offset(iommu
->cap
));
887 map_size
= VTD_PAGE_ALIGN(map_size
);
888 if (map_size
> iommu
->reg_size
) {
890 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
891 iommu
->reg_size
= map_size
;
892 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
894 pr_err("IOMMU: can't reserve memory\n");
898 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
900 pr_err("IOMMU: can't map the region\n");
911 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
916 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
918 struct intel_iommu
*iommu
;
920 static int iommu_allocated
= 0;
925 if (!drhd
->reg_base_addr
) {
926 warn_invalid_dmar(0, "");
930 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
934 iommu
->seq_id
= iommu_allocated
++;
935 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
937 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
939 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
944 agaw
= iommu_calculate_agaw(iommu
);
946 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
950 msagaw
= iommu_calculate_max_sagaw(iommu
);
952 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
957 iommu
->msagaw
= msagaw
;
958 iommu
->segment
= drhd
->segment
;
962 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
963 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
965 (unsigned long long)drhd
->reg_base_addr
,
966 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
967 (unsigned long long)iommu
->cap
,
968 (unsigned long long)iommu
->ecap
);
970 /* Reflect status in gcmd */
971 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
972 if (sts
& DMA_GSTS_IRES
)
973 iommu
->gcmd
|= DMA_GCMD_IRE
;
974 if (sts
& DMA_GSTS_TES
)
975 iommu
->gcmd
|= DMA_GCMD_TE
;
976 if (sts
& DMA_GSTS_QIES
)
977 iommu
->gcmd
|= DMA_GCMD_QIE
;
979 raw_spin_lock_init(&iommu
->register_lock
);
991 static void free_iommu(struct intel_iommu
*iommu
)
994 free_irq(iommu
->irq
, iommu
);
995 irq_set_handler_data(iommu
->irq
, NULL
);
996 dmar_free_hwirq(iommu
->irq
);
1000 free_page((unsigned long)iommu
->qi
->desc
);
1001 kfree(iommu
->qi
->desc_status
);
1012 * Reclaim all the submitted descriptors which have completed its work.
1014 static inline void reclaim_free_desc(struct q_inval
*qi
)
1016 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1017 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1018 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1019 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1024 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1028 struct q_inval
*qi
= iommu
->qi
;
1029 int wait_index
= (index
+ 1) % QI_LENGTH
;
1031 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1034 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1037 * If IQE happens, the head points to the descriptor associated
1038 * with the error. No new descriptors are fetched until the IQE
1041 if (fault
& DMA_FSTS_IQE
) {
1042 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1043 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1044 pr_err("VT-d detected invalid descriptor: "
1045 "low=%llx, high=%llx\n",
1046 (unsigned long long)qi
->desc
[index
].low
,
1047 (unsigned long long)qi
->desc
[index
].high
);
1048 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1049 sizeof(struct qi_desc
));
1050 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
1051 sizeof(struct qi_desc
));
1052 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1058 * If ITE happens, all pending wait_desc commands are aborted.
1059 * No new descriptors are fetched until the ITE is cleared.
1061 if (fault
& DMA_FSTS_ITE
) {
1062 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1063 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1065 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1066 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1068 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1071 if (qi
->desc_status
[head
] == QI_IN_USE
)
1072 qi
->desc_status
[head
] = QI_ABORT
;
1073 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1074 } while (head
!= tail
);
1076 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1080 if (fault
& DMA_FSTS_ICE
)
1081 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1087 * Submit the queued invalidation descriptor to the remapping
1088 * hardware unit and wait for its completion.
1090 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1093 struct q_inval
*qi
= iommu
->qi
;
1094 struct qi_desc
*hw
, wait_desc
;
1095 int wait_index
, index
;
1096 unsigned long flags
;
1106 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1107 while (qi
->free_cnt
< 3) {
1108 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1110 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1113 index
= qi
->free_head
;
1114 wait_index
= (index
+ 1) % QI_LENGTH
;
1116 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1120 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1121 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1122 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1124 hw
[wait_index
] = wait_desc
;
1126 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
1127 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
1129 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1133 * update the HW tail register indicating the presence of
1136 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1138 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1140 * We will leave the interrupts disabled, to prevent interrupt
1141 * context to queue another cmd while a cmd is already submitted
1142 * and waiting for completion on this cpu. This is to avoid
1143 * a deadlock where the interrupt context can wait indefinitely
1144 * for free slots in the queue.
1146 rc
= qi_check_fault(iommu
, index
);
1150 raw_spin_unlock(&qi
->q_lock
);
1152 raw_spin_lock(&qi
->q_lock
);
1155 qi
->desc_status
[index
] = QI_DONE
;
1157 reclaim_free_desc(qi
);
1158 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1167 * Flush the global interrupt entry cache.
1169 void qi_global_iec(struct intel_iommu
*iommu
)
1171 struct qi_desc desc
;
1173 desc
.low
= QI_IEC_TYPE
;
1176 /* should never fail */
1177 qi_submit_sync(&desc
, iommu
);
1180 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1183 struct qi_desc desc
;
1185 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1186 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1189 qi_submit_sync(&desc
, iommu
);
1192 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1193 unsigned int size_order
, u64 type
)
1197 struct qi_desc desc
;
1200 if (cap_write_drain(iommu
->cap
))
1203 if (cap_read_drain(iommu
->cap
))
1206 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1207 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1208 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1209 | QI_IOTLB_AM(size_order
);
1211 qi_submit_sync(&desc
, iommu
);
1214 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1215 u64 addr
, unsigned mask
)
1217 struct qi_desc desc
;
1220 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1221 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1222 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1224 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1226 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1229 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1232 qi_submit_sync(&desc
, iommu
);
1236 * Disable Queued Invalidation interface.
1238 void dmar_disable_qi(struct intel_iommu
*iommu
)
1240 unsigned long flags
;
1242 cycles_t start_time
= get_cycles();
1244 if (!ecap_qis(iommu
->ecap
))
1247 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1249 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1250 if (!(sts
& DMA_GSTS_QIES
))
1254 * Give a chance to HW to complete the pending invalidation requests.
1256 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1257 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1258 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1261 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1262 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1264 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1265 !(sts
& DMA_GSTS_QIES
), sts
);
1267 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1271 * Enable queued invalidation.
1273 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1276 unsigned long flags
;
1277 struct q_inval
*qi
= iommu
->qi
;
1279 qi
->free_head
= qi
->free_tail
= 0;
1280 qi
->free_cnt
= QI_LENGTH
;
1282 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1284 /* write zero to the tail reg */
1285 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1287 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1289 iommu
->gcmd
|= DMA_GCMD_QIE
;
1290 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1292 /* Make sure hardware complete it */
1293 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1295 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1299 * Enable Queued Invalidation interface. This is a must to support
1300 * interrupt-remapping. Also used by DMA-remapping, which replaces
1301 * register based IOTLB invalidation.
1303 int dmar_enable_qi(struct intel_iommu
*iommu
)
1306 struct page
*desc_page
;
1308 if (!ecap_qis(iommu
->ecap
))
1312 * queued invalidation is already setup and enabled.
1317 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1324 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1331 qi
->desc
= page_address(desc_page
);
1333 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1334 if (!qi
->desc_status
) {
1335 free_page((unsigned long) qi
->desc
);
1341 qi
->free_head
= qi
->free_tail
= 0;
1342 qi
->free_cnt
= QI_LENGTH
;
1344 raw_spin_lock_init(&qi
->q_lock
);
1346 __dmar_enable_qi(iommu
);
1351 /* iommu interrupt handling. Most stuff are MSI-like. */
1359 static const char *dma_remap_fault_reasons
[] =
1362 "Present bit in root entry is clear",
1363 "Present bit in context entry is clear",
1364 "Invalid context entry",
1365 "Access beyond MGAW",
1366 "PTE Write access is not set",
1367 "PTE Read access is not set",
1368 "Next page table ptr is invalid",
1369 "Root table address invalid",
1370 "Context table ptr is invalid",
1371 "non-zero reserved fields in RTP",
1372 "non-zero reserved fields in CTP",
1373 "non-zero reserved fields in PTE",
1374 "PCE for translation request specifies blocking",
1377 static const char *irq_remap_fault_reasons
[] =
1379 "Detected reserved fields in the decoded interrupt-remapped request",
1380 "Interrupt index exceeded the interrupt-remapping table size",
1381 "Present field in the IRTE entry is clear",
1382 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1383 "Detected reserved fields in the IRTE entry",
1384 "Blocked a compatibility format interrupt request",
1385 "Blocked an interrupt request due to source-id verification failure",
1388 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1390 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1391 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1392 *fault_type
= INTR_REMAP
;
1393 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1394 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1395 *fault_type
= DMA_REMAP
;
1396 return dma_remap_fault_reasons
[fault_reason
];
1398 *fault_type
= UNKNOWN
;
1403 void dmar_msi_unmask(struct irq_data
*data
)
1405 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1409 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1410 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1411 /* Read a reg to force flush the post write */
1412 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1413 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1416 void dmar_msi_mask(struct irq_data
*data
)
1419 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1422 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1423 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1424 /* Read a reg to force flush the post write */
1425 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1426 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1429 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1431 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1434 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1435 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1436 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1437 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1438 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1441 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1443 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1446 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1447 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1448 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1449 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1450 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1453 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1454 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1459 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1461 if (fault_type
== INTR_REMAP
)
1462 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1463 "fault index %llx\n"
1464 "INTR-REMAP:[fault reason %02d] %s\n",
1465 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1466 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1467 fault_reason
, reason
);
1469 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1470 "fault addr %llx \n"
1471 "DMAR:[fault reason %02d] %s\n",
1472 (type
? "DMA Read" : "DMA Write"),
1473 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1474 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1478 #define PRIMARY_FAULT_REG_LEN (16)
1479 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1481 struct intel_iommu
*iommu
= dev_id
;
1482 int reg
, fault_index
;
1486 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1487 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1489 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1491 /* TBD: ignore advanced fault log currently */
1492 if (!(fault_status
& DMA_FSTS_PPF
))
1495 fault_index
= dma_fsts_fault_record_index(fault_status
);
1496 reg
= cap_fault_reg_offset(iommu
->cap
);
1504 /* highest 32 bits */
1505 data
= readl(iommu
->reg
+ reg
+
1506 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1507 if (!(data
& DMA_FRCD_F
))
1510 fault_reason
= dma_frcd_fault_reason(data
);
1511 type
= dma_frcd_type(data
);
1513 data
= readl(iommu
->reg
+ reg
+
1514 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1515 source_id
= dma_frcd_source_id(data
);
1517 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1518 fault_index
* PRIMARY_FAULT_REG_LEN
);
1519 guest_addr
= dma_frcd_page_addr(guest_addr
);
1520 /* clear the fault */
1521 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1522 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1524 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1526 dmar_fault_do_one(iommu
, type
, fault_reason
,
1527 source_id
, guest_addr
);
1530 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1532 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1535 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1538 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1542 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1547 * Check if the fault interrupt is already initialized.
1552 irq
= dmar_alloc_hwirq();
1554 pr_err("IOMMU: no free vectors\n");
1558 irq_set_handler_data(irq
, iommu
);
1561 ret
= arch_setup_dmar_msi(irq
);
1563 irq_set_handler_data(irq
, NULL
);
1565 dmar_free_hwirq(irq
);
1569 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1571 pr_err("IOMMU: can't request irq\n");
1575 int __init
enable_drhd_fault_handling(void)
1577 struct dmar_drhd_unit
*drhd
;
1578 struct intel_iommu
*iommu
;
1581 * Enable fault control interrupt.
1583 for_each_iommu(iommu
, drhd
) {
1585 int ret
= dmar_set_interrupt(iommu
);
1588 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1589 (unsigned long long)drhd
->reg_base_addr
, ret
);
1594 * Clear any previous faults.
1596 dmar_fault(iommu
->irq
, iommu
);
1597 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1598 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1605 * Re-enable Queued Invalidation interface.
1607 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1609 if (!ecap_qis(iommu
->ecap
))
1616 * First disable queued invalidation.
1618 dmar_disable_qi(iommu
);
1620 * Then enable queued invalidation again. Since there is no pending
1621 * invalidation requests now, it's safe to re-enable queued
1624 __dmar_enable_qi(iommu
);
1630 * Check interrupt remapping support in DMAR table description.
1632 int __init
dmar_ir_support(void)
1634 struct acpi_table_dmar
*dmar
;
1635 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1638 return dmar
->flags
& 0x1;
1641 static int __init
dmar_free_unused_resources(void)
1643 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1645 /* DMAR units are in use */
1646 if (irq_remapping_enabled
|| intel_iommu_enabled
)
1649 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1650 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1652 down_write(&dmar_global_lock
);
1653 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1654 list_del(&dmaru
->list
);
1655 dmar_free_drhd(dmaru
);
1657 up_write(&dmar_global_lock
);
1662 late_initcall(dmar_free_unused_resources
);
1663 IOMMU_INIT_POST(detect_intel_iommu
);