2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t
)(struct acpi_dmar_header
*, void *);
48 struct dmar_res_callback
{
49 dmar_res_handler_t cb
[ACPI_DMAR_TYPE_RESERVED
];
50 void *arg
[ACPI_DMAR_TYPE_RESERVED
];
51 bool ignore_unhandled
;
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock
);
68 LIST_HEAD(dmar_drhd_units
);
70 struct acpi_table_header
* __initdata dmar_tbl
;
71 static acpi_size dmar_tbl_size
;
72 static int dmar_dev_scope_status
= 1;
73 static unsigned long dmar_seq_ids
[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED
)];
75 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
76 static void free_iommu(struct intel_iommu
*iommu
);
78 static void dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
81 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 if (drhd
->include_all
)
85 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
87 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
90 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
92 struct acpi_dmar_device_scope
*scope
;
97 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_NAMESPACE
||
98 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
99 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
101 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
102 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
103 pr_warn("Unsupported device scope\n");
105 start
+= scope
->length
;
110 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
113 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
116 struct device
*tmp_dev
;
118 if (*devices
&& *cnt
) {
119 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
128 /* Optimize out kzalloc()/kfree() for normal cases */
129 static char dmar_pci_notify_info_buf
[64];
131 static struct dmar_pci_notify_info
*
132 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
137 struct dmar_pci_notify_info
*info
;
139 BUG_ON(dev
->is_virtfn
);
141 /* Only generate path[] for device addition event */
142 if (event
== BUS_NOTIFY_ADD_DEVICE
)
143 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
146 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
147 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
148 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
150 info
= kzalloc(size
, GFP_KERNEL
);
152 pr_warn("Out of memory when allocating notify_info "
153 "for %s.\n", pci_name(dev
));
154 if (dmar_dev_scope_status
== 0)
155 dmar_dev_scope_status
= -ENOMEM
;
162 info
->seg
= pci_domain_nr(dev
->bus
);
164 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
165 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
) {
167 info
->path
[level
].bus
= tmp
->bus
->number
;
168 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
169 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
170 if (pci_is_root_bus(tmp
->bus
))
171 info
->bus
= tmp
->bus
->number
;
178 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
180 if ((void *)info
!= dmar_pci_notify_info_buf
)
184 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
185 struct acpi_dmar_pci_path
*path
, int count
)
189 if (info
->bus
!= bus
)
191 if (info
->level
!= count
)
194 for (i
= 0; i
< count
; i
++) {
195 if (path
[i
].device
!= info
->path
[i
].device
||
196 path
[i
].function
!= info
->path
[i
].function
)
208 if (bus
== info
->path
[i
].bus
&&
209 path
[0].device
== info
->path
[i
].device
&&
210 path
[0].function
== info
->path
[i
].function
) {
211 pr_info(FW_BUG
"RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212 bus
, path
[0].device
, path
[0].function
);
219 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
220 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
221 void *start
, void*end
, u16 segment
,
222 struct dmar_dev_scope
*devices
,
226 struct device
*tmp
, *dev
= &info
->dev
->dev
;
227 struct acpi_dmar_device_scope
*scope
;
228 struct acpi_dmar_pci_path
*path
;
230 if (segment
!= info
->seg
)
233 for (; start
< end
; start
+= scope
->length
) {
235 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
236 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
239 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
240 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
241 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
244 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
) ^
245 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
)) {
246 pr_warn("Device scope type does not match for %s\n",
247 pci_name(info
->dev
));
251 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
253 devices
[i
].bus
= info
->dev
->bus
->number
;
254 devices
[i
].devfn
= info
->dev
->devfn
;
255 rcu_assign_pointer(devices
[i
].dev
,
259 BUG_ON(i
>= devices_cnt
);
265 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
266 struct dmar_dev_scope
*devices
, int count
)
271 if (info
->seg
!= segment
)
274 for_each_active_dev_scope(devices
, count
, index
, tmp
)
275 if (tmp
== &info
->dev
->dev
) {
276 RCU_INIT_POINTER(devices
[index
].dev
, NULL
);
285 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
288 struct dmar_drhd_unit
*dmaru
;
289 struct acpi_dmar_hardware_unit
*drhd
;
291 for_each_drhd_unit(dmaru
) {
292 if (dmaru
->include_all
)
295 drhd
= container_of(dmaru
->hdr
,
296 struct acpi_dmar_hardware_unit
, header
);
297 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
298 ((void *)drhd
) + drhd
->header
.length
,
300 dmaru
->devices
, dmaru
->devices_cnt
);
305 ret
= dmar_iommu_notify_scope_dev(info
);
306 if (ret
< 0 && dmar_dev_scope_status
== 0)
307 dmar_dev_scope_status
= ret
;
312 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
314 struct dmar_drhd_unit
*dmaru
;
316 for_each_drhd_unit(dmaru
)
317 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
318 dmaru
->devices
, dmaru
->devices_cnt
))
320 dmar_iommu_notify_scope_dev(info
);
323 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
324 unsigned long action
, void *data
)
326 struct pci_dev
*pdev
= to_pci_dev(data
);
327 struct dmar_pci_notify_info
*info
;
329 /* Only care about add/remove events for physical functions */
332 if (action
!= BUS_NOTIFY_ADD_DEVICE
&& action
!= BUS_NOTIFY_DEL_DEVICE
)
335 info
= dmar_alloc_pci_notify_info(pdev
, action
);
339 down_write(&dmar_global_lock
);
340 if (action
== BUS_NOTIFY_ADD_DEVICE
)
341 dmar_pci_bus_add_dev(info
);
342 else if (action
== BUS_NOTIFY_DEL_DEVICE
)
343 dmar_pci_bus_del_dev(info
);
344 up_write(&dmar_global_lock
);
346 dmar_free_pci_notify_info(info
);
351 static struct notifier_block dmar_pci_bus_nb
= {
352 .notifier_call
= dmar_pci_bus_notifier
,
356 static struct dmar_drhd_unit
*
357 dmar_find_dmaru(struct acpi_dmar_hardware_unit
*drhd
)
359 struct dmar_drhd_unit
*dmaru
;
361 list_for_each_entry_rcu(dmaru
, &dmar_drhd_units
, list
)
362 if (dmaru
->segment
== drhd
->segment
&&
363 dmaru
->reg_base_addr
== drhd
->address
)
370 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
371 * structure which uniquely represent one DMA remapping hardware unit
372 * present in the platform
374 static int dmar_parse_one_drhd(struct acpi_dmar_header
*header
, void *arg
)
376 struct acpi_dmar_hardware_unit
*drhd
;
377 struct dmar_drhd_unit
*dmaru
;
380 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
381 dmaru
= dmar_find_dmaru(drhd
);
385 dmaru
= kzalloc(sizeof(*dmaru
) + header
->length
, GFP_KERNEL
);
390 * If header is allocated from slab by ACPI _DSM method, we need to
391 * copy the content because the memory buffer will be freed on return.
393 dmaru
->hdr
= (void *)(dmaru
+ 1);
394 memcpy(dmaru
->hdr
, header
, header
->length
);
395 dmaru
->reg_base_addr
= drhd
->address
;
396 dmaru
->segment
= drhd
->segment
;
397 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
398 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
399 ((void *)drhd
) + drhd
->header
.length
,
400 &dmaru
->devices_cnt
);
401 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
406 ret
= alloc_iommu(dmaru
);
408 dmar_free_dev_scope(&dmaru
->devices
,
409 &dmaru
->devices_cnt
);
413 dmar_register_drhd_unit(dmaru
);
422 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
424 if (dmaru
->devices
&& dmaru
->devices_cnt
)
425 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
427 free_iommu(dmaru
->iommu
);
431 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
,
434 struct acpi_dmar_andd
*andd
= (void *)header
;
436 /* Check for NUL termination within the designated length */
437 if (strnlen(andd
->device_name
, header
->length
- 8) == header
->length
- 8) {
438 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
439 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
440 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
441 dmi_get_system_info(DMI_BIOS_VENDOR
),
442 dmi_get_system_info(DMI_BIOS_VERSION
),
443 dmi_get_system_info(DMI_PRODUCT_VERSION
));
446 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
452 #ifdef CONFIG_ACPI_NUMA
453 static int dmar_parse_one_rhsa(struct acpi_dmar_header
*header
, void *arg
)
455 struct acpi_dmar_rhsa
*rhsa
;
456 struct dmar_drhd_unit
*drhd
;
458 rhsa
= (struct acpi_dmar_rhsa
*)header
;
459 for_each_drhd_unit(drhd
) {
460 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
461 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
463 if (!node_online(node
))
465 drhd
->iommu
->node
= node
;
470 1, TAINT_FIRMWARE_WORKAROUND
,
471 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
472 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
474 dmi_get_system_info(DMI_BIOS_VENDOR
),
475 dmi_get_system_info(DMI_BIOS_VERSION
),
476 dmi_get_system_info(DMI_PRODUCT_VERSION
));
481 #define dmar_parse_one_rhsa dmar_res_noop
485 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
487 struct acpi_dmar_hardware_unit
*drhd
;
488 struct acpi_dmar_reserved_memory
*rmrr
;
489 struct acpi_dmar_atsr
*atsr
;
490 struct acpi_dmar_rhsa
*rhsa
;
492 switch (header
->type
) {
493 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
494 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
496 pr_info("DRHD base: %#016Lx flags: %#x\n",
497 (unsigned long long)drhd
->address
, drhd
->flags
);
499 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
500 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
502 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
503 (unsigned long long)rmrr
->base_address
,
504 (unsigned long long)rmrr
->end_address
);
506 case ACPI_DMAR_TYPE_ROOT_ATS
:
507 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
508 pr_info("ATSR flags: %#x\n", atsr
->flags
);
510 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY
:
511 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
512 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
513 (unsigned long long)rhsa
->base_address
,
514 rhsa
->proximity_domain
);
516 case ACPI_DMAR_TYPE_NAMESPACE
:
517 /* We don't print this here because we need to sanity-check
518 it first. So print it in dmar_parse_one_andd() instead. */
524 * dmar_table_detect - checks to see if the platform supports DMAR devices
526 static int __init
dmar_table_detect(void)
528 acpi_status status
= AE_OK
;
530 /* if we could find DMAR table, then there are DMAR devices */
531 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
532 (struct acpi_table_header
**)&dmar_tbl
,
535 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
536 pr_warn("Unable to map DMAR\n");
537 status
= AE_NOT_FOUND
;
540 return (ACPI_SUCCESS(status
) ? 1 : 0);
543 static int dmar_walk_remapping_entries(struct acpi_dmar_header
*start
,
544 size_t len
, struct dmar_res_callback
*cb
)
547 struct acpi_dmar_header
*iter
, *next
;
548 struct acpi_dmar_header
*end
= ((void *)start
) + len
;
550 for (iter
= start
; iter
< end
&& ret
== 0; iter
= next
) {
551 next
= (void *)iter
+ iter
->length
;
552 if (iter
->length
== 0) {
553 /* Avoid looping forever on bad ACPI tables */
554 pr_debug(FW_BUG
"Invalid 0-length structure\n");
556 } else if (next
> end
) {
557 /* Avoid passing table end */
558 pr_warn(FW_BUG
"Record passes table end\n");
564 dmar_table_print_dmar_entry(iter
);
566 if (iter
->type
>= ACPI_DMAR_TYPE_RESERVED
) {
567 /* continue for forward compatibility */
568 pr_debug("Unknown DMAR structure type %d\n",
570 } else if (cb
->cb
[iter
->type
]) {
571 ret
= cb
->cb
[iter
->type
](iter
, cb
->arg
[iter
->type
]);
572 } else if (!cb
->ignore_unhandled
) {
573 pr_warn("No handler for DMAR structure type %d\n",
582 static inline int dmar_walk_dmar_table(struct acpi_table_dmar
*dmar
,
583 struct dmar_res_callback
*cb
)
585 return dmar_walk_remapping_entries((void *)(dmar
+ 1),
586 dmar
->header
.length
- sizeof(*dmar
), cb
);
590 * parse_dmar_table - parses the DMA reporting table
593 parse_dmar_table(void)
595 struct acpi_table_dmar
*dmar
;
598 struct dmar_res_callback cb
= {
600 .ignore_unhandled
= true,
601 .arg
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &drhd_count
,
602 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_parse_one_drhd
,
603 .cb
[ACPI_DMAR_TYPE_RESERVED_MEMORY
] = &dmar_parse_one_rmrr
,
604 .cb
[ACPI_DMAR_TYPE_ROOT_ATS
] = &dmar_parse_one_atsr
,
605 .cb
[ACPI_DMAR_TYPE_HARDWARE_AFFINITY
] = &dmar_parse_one_rhsa
,
606 .cb
[ACPI_DMAR_TYPE_NAMESPACE
] = &dmar_parse_one_andd
,
610 * Do it again, earlier dmar_tbl mapping could be mapped with
616 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
617 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
619 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
621 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
625 if (dmar
->width
< PAGE_SHIFT
- 1) {
626 pr_warn("Invalid DMAR haw\n");
630 pr_info("Host address width %d\n", dmar
->width
+ 1);
631 ret
= dmar_walk_dmar_table(dmar
, &cb
);
632 if (ret
== 0 && drhd_count
== 0)
633 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
638 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
639 int cnt
, struct pci_dev
*dev
)
645 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
646 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
649 /* Check our parent */
650 dev
= dev
->bus
->self
;
656 struct dmar_drhd_unit
*
657 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
659 struct dmar_drhd_unit
*dmaru
;
660 struct acpi_dmar_hardware_unit
*drhd
;
662 dev
= pci_physfn(dev
);
665 for_each_drhd_unit(dmaru
) {
666 drhd
= container_of(dmaru
->hdr
,
667 struct acpi_dmar_hardware_unit
,
670 if (dmaru
->include_all
&&
671 drhd
->segment
== pci_domain_nr(dev
->bus
))
674 if (dmar_pci_device_match(dmaru
->devices
,
675 dmaru
->devices_cnt
, dev
))
685 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
686 struct acpi_device
*adev
)
688 struct dmar_drhd_unit
*dmaru
;
689 struct acpi_dmar_hardware_unit
*drhd
;
690 struct acpi_dmar_device_scope
*scope
;
693 struct acpi_dmar_pci_path
*path
;
695 for_each_drhd_unit(dmaru
) {
696 drhd
= container_of(dmaru
->hdr
,
697 struct acpi_dmar_hardware_unit
,
700 for (scope
= (void *)(drhd
+ 1);
701 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
702 scope
= ((void *)scope
) + scope
->length
) {
703 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_NAMESPACE
)
705 if (scope
->enumeration_id
!= device_number
)
708 path
= (void *)(scope
+ 1);
709 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
710 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
711 scope
->bus
, path
->device
, path
->function
);
712 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
714 dmaru
->devices
[i
].bus
= scope
->bus
;
715 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
717 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
718 get_device(&adev
->dev
));
721 BUG_ON(i
>= dmaru
->devices_cnt
);
724 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
725 device_number
, dev_name(&adev
->dev
));
728 static int __init
dmar_acpi_dev_scope_init(void)
730 struct acpi_dmar_andd
*andd
;
732 if (dmar_tbl
== NULL
)
735 for (andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
736 ((unsigned long)andd
) < ((unsigned long)dmar_tbl
) + dmar_tbl
->length
;
737 andd
= ((void *)andd
) + andd
->header
.length
) {
738 if (andd
->header
.type
== ACPI_DMAR_TYPE_NAMESPACE
) {
740 struct acpi_device
*adev
;
742 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
745 pr_err("Failed to find handle for ACPI object %s\n",
749 if (acpi_bus_get_device(h
, &adev
)) {
750 pr_err("Failed to get device for ACPI object %s\n",
754 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
760 int __init
dmar_dev_scope_init(void)
762 struct pci_dev
*dev
= NULL
;
763 struct dmar_pci_notify_info
*info
;
765 if (dmar_dev_scope_status
!= 1)
766 return dmar_dev_scope_status
;
768 if (list_empty(&dmar_drhd_units
)) {
769 dmar_dev_scope_status
= -ENODEV
;
771 dmar_dev_scope_status
= 0;
773 dmar_acpi_dev_scope_init();
775 for_each_pci_dev(dev
) {
779 info
= dmar_alloc_pci_notify_info(dev
,
780 BUS_NOTIFY_ADD_DEVICE
);
782 return dmar_dev_scope_status
;
784 dmar_pci_bus_add_dev(info
);
785 dmar_free_pci_notify_info(info
);
789 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
792 return dmar_dev_scope_status
;
796 int __init
dmar_table_init(void)
798 static int dmar_table_initialized
;
801 if (dmar_table_initialized
== 0) {
802 ret
= parse_dmar_table();
805 pr_info("Parse DMAR table failure.\n");
806 } else if (list_empty(&dmar_drhd_units
)) {
807 pr_info("No DMAR devices found\n");
812 dmar_table_initialized
= ret
;
814 dmar_table_initialized
= 1;
817 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
820 static void warn_invalid_dmar(u64 addr
, const char *message
)
823 1, TAINT_FIRMWARE_WORKAROUND
,
824 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
825 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
827 dmi_get_system_info(DMI_BIOS_VENDOR
),
828 dmi_get_system_info(DMI_BIOS_VERSION
),
829 dmi_get_system_info(DMI_PRODUCT_VERSION
));
833 dmar_validate_one_drhd(struct acpi_dmar_header
*entry
, void *arg
)
835 struct acpi_dmar_hardware_unit
*drhd
;
839 drhd
= (void *)entry
;
840 if (!drhd
->address
) {
841 warn_invalid_dmar(0, "");
846 addr
= ioremap(drhd
->address
, VTD_PAGE_SIZE
);
848 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
850 pr_warn("Can't validate DRHD address: %llx\n", drhd
->address
);
854 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
855 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
860 early_iounmap(addr
, VTD_PAGE_SIZE
);
862 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
863 warn_invalid_dmar(drhd
->address
, " returns all ones");
870 int __init
detect_intel_iommu(void)
873 struct dmar_res_callback validate_drhd_cb
= {
874 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_validate_one_drhd
,
875 .ignore_unhandled
= true,
878 down_write(&dmar_global_lock
);
879 ret
= dmar_table_detect();
881 ret
= !dmar_walk_dmar_table((struct acpi_table_dmar
*)dmar_tbl
,
883 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
885 /* Make sure ACS will be enabled */
891 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
894 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
896 up_write(&dmar_global_lock
);
898 return ret
? 1 : -ENODEV
;
902 static void unmap_iommu(struct intel_iommu
*iommu
)
905 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
909 * map_iommu: map the iommu's registers
910 * @iommu: the iommu to map
911 * @phys_addr: the physical address of the base resgister
913 * Memory map the iommu's registers. Start w/ a single page, and
914 * possibly expand if that turns out to be insufficent.
916 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
920 iommu
->reg_phys
= phys_addr
;
921 iommu
->reg_size
= VTD_PAGE_SIZE
;
923 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
924 pr_err("Can't reserve memory\n");
929 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
931 pr_err("Can't map the region\n");
936 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
937 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
939 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
941 warn_invalid_dmar(phys_addr
, " returns all ones");
945 /* the registers might be more than one page */
946 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
947 cap_max_fault_reg_offset(iommu
->cap
));
948 map_size
= VTD_PAGE_ALIGN(map_size
);
949 if (map_size
> iommu
->reg_size
) {
951 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
952 iommu
->reg_size
= map_size
;
953 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
955 pr_err("Can't reserve memory\n");
959 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
961 pr_err("Can't map the region\n");
972 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
977 static int dmar_alloc_seq_id(struct intel_iommu
*iommu
)
979 iommu
->seq_id
= find_first_zero_bit(dmar_seq_ids
,
980 DMAR_UNITS_SUPPORTED
);
981 if (iommu
->seq_id
>= DMAR_UNITS_SUPPORTED
) {
984 set_bit(iommu
->seq_id
, dmar_seq_ids
);
985 sprintf(iommu
->name
, "dmar%d", iommu
->seq_id
);
988 return iommu
->seq_id
;
991 static void dmar_free_seq_id(struct intel_iommu
*iommu
)
993 if (iommu
->seq_id
>= 0) {
994 clear_bit(iommu
->seq_id
, dmar_seq_ids
);
999 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
1001 struct intel_iommu
*iommu
;
1007 if (!drhd
->reg_base_addr
) {
1008 warn_invalid_dmar(0, "");
1012 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1016 if (dmar_alloc_seq_id(iommu
) < 0) {
1017 pr_err("Failed to allocate seq_id\n");
1022 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
1024 pr_err("Failed to map %s\n", iommu
->name
);
1025 goto error_free_seq_id
;
1029 agaw
= iommu_calculate_agaw(iommu
);
1031 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1035 msagaw
= iommu_calculate_max_sagaw(iommu
);
1037 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1042 iommu
->msagaw
= msagaw
;
1043 iommu
->segment
= drhd
->segment
;
1047 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
1048 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1050 (unsigned long long)drhd
->reg_base_addr
,
1051 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
1052 (unsigned long long)iommu
->cap
,
1053 (unsigned long long)iommu
->ecap
);
1055 /* Reflect status in gcmd */
1056 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1057 if (sts
& DMA_GSTS_IRES
)
1058 iommu
->gcmd
|= DMA_GCMD_IRE
;
1059 if (sts
& DMA_GSTS_TES
)
1060 iommu
->gcmd
|= DMA_GCMD_TE
;
1061 if (sts
& DMA_GSTS_QIES
)
1062 iommu
->gcmd
|= DMA_GCMD_QIE
;
1064 raw_spin_lock_init(&iommu
->register_lock
);
1066 drhd
->iommu
= iommu
;
1068 if (intel_iommu_enabled
)
1069 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
1078 dmar_free_seq_id(iommu
);
1084 static void free_iommu(struct intel_iommu
*iommu
)
1086 iommu_device_destroy(iommu
->iommu_dev
);
1089 if (iommu
->pr_irq
) {
1090 free_irq(iommu
->pr_irq
, iommu
);
1091 dmar_free_hwirq(iommu
->pr_irq
);
1094 free_irq(iommu
->irq
, iommu
);
1095 dmar_free_hwirq(iommu
->irq
);
1100 free_page((unsigned long)iommu
->qi
->desc
);
1101 kfree(iommu
->qi
->desc_status
);
1108 dmar_free_seq_id(iommu
);
1113 * Reclaim all the submitted descriptors which have completed its work.
1115 static inline void reclaim_free_desc(struct q_inval
*qi
)
1117 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1118 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1119 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1120 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1125 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1129 struct q_inval
*qi
= iommu
->qi
;
1130 int wait_index
= (index
+ 1) % QI_LENGTH
;
1132 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1135 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1138 * If IQE happens, the head points to the descriptor associated
1139 * with the error. No new descriptors are fetched until the IQE
1142 if (fault
& DMA_FSTS_IQE
) {
1143 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1144 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1145 pr_err("VT-d detected invalid descriptor: "
1146 "low=%llx, high=%llx\n",
1147 (unsigned long long)qi
->desc
[index
].low
,
1148 (unsigned long long)qi
->desc
[index
].high
);
1149 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1150 sizeof(struct qi_desc
));
1151 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
1152 sizeof(struct qi_desc
));
1153 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1159 * If ITE happens, all pending wait_desc commands are aborted.
1160 * No new descriptors are fetched until the ITE is cleared.
1162 if (fault
& DMA_FSTS_ITE
) {
1163 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1164 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1166 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1167 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1169 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1172 if (qi
->desc_status
[head
] == QI_IN_USE
)
1173 qi
->desc_status
[head
] = QI_ABORT
;
1174 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1175 } while (head
!= tail
);
1177 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1181 if (fault
& DMA_FSTS_ICE
)
1182 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1188 * Submit the queued invalidation descriptor to the remapping
1189 * hardware unit and wait for its completion.
1191 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1194 struct q_inval
*qi
= iommu
->qi
;
1195 struct qi_desc
*hw
, wait_desc
;
1196 int wait_index
, index
;
1197 unsigned long flags
;
1207 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1208 while (qi
->free_cnt
< 3) {
1209 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1211 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1214 index
= qi
->free_head
;
1215 wait_index
= (index
+ 1) % QI_LENGTH
;
1217 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1221 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1222 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1223 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1225 hw
[wait_index
] = wait_desc
;
1227 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
1228 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
1230 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1234 * update the HW tail register indicating the presence of
1237 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1239 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1241 * We will leave the interrupts disabled, to prevent interrupt
1242 * context to queue another cmd while a cmd is already submitted
1243 * and waiting for completion on this cpu. This is to avoid
1244 * a deadlock where the interrupt context can wait indefinitely
1245 * for free slots in the queue.
1247 rc
= qi_check_fault(iommu
, index
);
1251 raw_spin_unlock(&qi
->q_lock
);
1253 raw_spin_lock(&qi
->q_lock
);
1256 qi
->desc_status
[index
] = QI_DONE
;
1258 reclaim_free_desc(qi
);
1259 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1268 * Flush the global interrupt entry cache.
1270 void qi_global_iec(struct intel_iommu
*iommu
)
1272 struct qi_desc desc
;
1274 desc
.low
= QI_IEC_TYPE
;
1277 /* should never fail */
1278 qi_submit_sync(&desc
, iommu
);
1281 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1284 struct qi_desc desc
;
1286 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1287 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1290 qi_submit_sync(&desc
, iommu
);
1293 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1294 unsigned int size_order
, u64 type
)
1298 struct qi_desc desc
;
1301 if (cap_write_drain(iommu
->cap
))
1304 if (cap_read_drain(iommu
->cap
))
1307 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1308 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1309 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1310 | QI_IOTLB_AM(size_order
);
1312 qi_submit_sync(&desc
, iommu
);
1315 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1316 u64 addr
, unsigned mask
)
1318 struct qi_desc desc
;
1321 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1322 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1323 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1325 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1327 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1330 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1333 qi_submit_sync(&desc
, iommu
);
1337 * Disable Queued Invalidation interface.
1339 void dmar_disable_qi(struct intel_iommu
*iommu
)
1341 unsigned long flags
;
1343 cycles_t start_time
= get_cycles();
1345 if (!ecap_qis(iommu
->ecap
))
1348 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1350 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
1351 if (!(sts
& DMA_GSTS_QIES
))
1355 * Give a chance to HW to complete the pending invalidation requests.
1357 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1358 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1359 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1362 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1363 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1365 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1366 !(sts
& DMA_GSTS_QIES
), sts
);
1368 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1372 * Enable queued invalidation.
1374 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1377 unsigned long flags
;
1378 struct q_inval
*qi
= iommu
->qi
;
1380 qi
->free_head
= qi
->free_tail
= 0;
1381 qi
->free_cnt
= QI_LENGTH
;
1383 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1385 /* write zero to the tail reg */
1386 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1388 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1390 iommu
->gcmd
|= DMA_GCMD_QIE
;
1391 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1393 /* Make sure hardware complete it */
1394 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1396 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1400 * Enable Queued Invalidation interface. This is a must to support
1401 * interrupt-remapping. Also used by DMA-remapping, which replaces
1402 * register based IOTLB invalidation.
1404 int dmar_enable_qi(struct intel_iommu
*iommu
)
1407 struct page
*desc_page
;
1409 if (!ecap_qis(iommu
->ecap
))
1413 * queued invalidation is already setup and enabled.
1418 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1425 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1432 qi
->desc
= page_address(desc_page
);
1434 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1435 if (!qi
->desc_status
) {
1436 free_page((unsigned long) qi
->desc
);
1442 raw_spin_lock_init(&qi
->q_lock
);
1444 __dmar_enable_qi(iommu
);
1449 /* iommu interrupt handling. Most stuff are MSI-like. */
1457 static const char *dma_remap_fault_reasons
[] =
1460 "Present bit in root entry is clear",
1461 "Present bit in context entry is clear",
1462 "Invalid context entry",
1463 "Access beyond MGAW",
1464 "PTE Write access is not set",
1465 "PTE Read access is not set",
1466 "Next page table ptr is invalid",
1467 "Root table address invalid",
1468 "Context table ptr is invalid",
1469 "non-zero reserved fields in RTP",
1470 "non-zero reserved fields in CTP",
1471 "non-zero reserved fields in PTE",
1472 "PCE for translation request specifies blocking",
1475 static const char *irq_remap_fault_reasons
[] =
1477 "Detected reserved fields in the decoded interrupt-remapped request",
1478 "Interrupt index exceeded the interrupt-remapping table size",
1479 "Present field in the IRTE entry is clear",
1480 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1481 "Detected reserved fields in the IRTE entry",
1482 "Blocked a compatibility format interrupt request",
1483 "Blocked an interrupt request due to source-id verification failure",
1486 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1488 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1489 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1490 *fault_type
= INTR_REMAP
;
1491 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1492 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1493 *fault_type
= DMA_REMAP
;
1494 return dma_remap_fault_reasons
[fault_reason
];
1496 *fault_type
= UNKNOWN
;
1502 static inline int dmar_msi_reg(struct intel_iommu
*iommu
, int irq
)
1504 if (iommu
->irq
== irq
)
1505 return DMAR_FECTL_REG
;
1506 else if (iommu
->pr_irq
== irq
)
1507 return DMAR_PECTL_REG
;
1512 void dmar_msi_unmask(struct irq_data
*data
)
1514 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1515 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1519 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1520 writel(0, iommu
->reg
+ reg
);
1521 /* Read a reg to force flush the post write */
1522 readl(iommu
->reg
+ reg
);
1523 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1526 void dmar_msi_mask(struct irq_data
*data
)
1528 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1529 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1533 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1534 writel(DMA_FECTL_IM
, iommu
->reg
+ reg
);
1535 /* Read a reg to force flush the post write */
1536 readl(iommu
->reg
+ reg
);
1537 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1540 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1542 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1543 int reg
= dmar_msi_reg(iommu
, irq
);
1546 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1547 writel(msg
->data
, iommu
->reg
+ reg
+ 4);
1548 writel(msg
->address_lo
, iommu
->reg
+ reg
+ 8);
1549 writel(msg
->address_hi
, iommu
->reg
+ reg
+ 12);
1550 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1553 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1555 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1556 int reg
= dmar_msi_reg(iommu
, irq
);
1559 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1560 msg
->data
= readl(iommu
->reg
+ reg
+ 4);
1561 msg
->address_lo
= readl(iommu
->reg
+ reg
+ 8);
1562 msg
->address_hi
= readl(iommu
->reg
+ reg
+ 12);
1563 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1566 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1567 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1572 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1574 if (fault_type
== INTR_REMAP
)
1575 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1576 "fault index %llx\n"
1577 "INTR-REMAP:[fault reason %02d] %s\n",
1578 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1579 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1580 fault_reason
, reason
);
1582 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1583 "fault addr %llx \n"
1584 "DMAR:[fault reason %02d] %s\n",
1585 (type
? "DMA Read" : "DMA Write"),
1586 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1587 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1591 #define PRIMARY_FAULT_REG_LEN (16)
1592 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1594 struct intel_iommu
*iommu
= dev_id
;
1595 int reg
, fault_index
;
1599 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1600 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1602 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1604 /* TBD: ignore advanced fault log currently */
1605 if (!(fault_status
& DMA_FSTS_PPF
))
1608 fault_index
= dma_fsts_fault_record_index(fault_status
);
1609 reg
= cap_fault_reg_offset(iommu
->cap
);
1617 /* highest 32 bits */
1618 data
= readl(iommu
->reg
+ reg
+
1619 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1620 if (!(data
& DMA_FRCD_F
))
1623 fault_reason
= dma_frcd_fault_reason(data
);
1624 type
= dma_frcd_type(data
);
1626 data
= readl(iommu
->reg
+ reg
+
1627 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1628 source_id
= dma_frcd_source_id(data
);
1630 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1631 fault_index
* PRIMARY_FAULT_REG_LEN
);
1632 guest_addr
= dma_frcd_page_addr(guest_addr
);
1633 /* clear the fault */
1634 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1635 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1637 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1639 dmar_fault_do_one(iommu
, type
, fault_reason
,
1640 source_id
, guest_addr
);
1643 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1645 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1648 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1651 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1655 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1660 * Check if the fault interrupt is already initialized.
1665 irq
= dmar_alloc_hwirq(iommu
->seq_id
, iommu
->node
, iommu
);
1669 pr_err("No free IRQ vectors\n");
1673 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1675 pr_err("Can't request irq\n");
1679 int __init
enable_drhd_fault_handling(void)
1681 struct dmar_drhd_unit
*drhd
;
1682 struct intel_iommu
*iommu
;
1685 * Enable fault control interrupt.
1687 for_each_iommu(iommu
, drhd
) {
1689 int ret
= dmar_set_interrupt(iommu
);
1692 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1693 (unsigned long long)drhd
->reg_base_addr
, ret
);
1698 * Clear any previous faults.
1700 dmar_fault(iommu
->irq
, iommu
);
1701 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1702 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1709 * Re-enable Queued Invalidation interface.
1711 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1713 if (!ecap_qis(iommu
->ecap
))
1720 * First disable queued invalidation.
1722 dmar_disable_qi(iommu
);
1724 * Then enable queued invalidation again. Since there is no pending
1725 * invalidation requests now, it's safe to re-enable queued
1728 __dmar_enable_qi(iommu
);
1734 * Check interrupt remapping support in DMAR table description.
1736 int __init
dmar_ir_support(void)
1738 struct acpi_table_dmar
*dmar
;
1739 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1742 return dmar
->flags
& 0x1;
1745 /* Check whether DMAR units are in use */
1746 static inline bool dmar_in_use(void)
1748 return irq_remapping_enabled
|| intel_iommu_enabled
;
1751 static int __init
dmar_free_unused_resources(void)
1753 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1758 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1759 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1761 down_write(&dmar_global_lock
);
1762 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1763 list_del(&dmaru
->list
);
1764 dmar_free_drhd(dmaru
);
1766 up_write(&dmar_global_lock
);
1771 late_initcall(dmar_free_unused_resources
);
1772 IOMMU_INIT_POST(detect_intel_iommu
);
1775 * DMAR Hotplug Support
1776 * For more details, please refer to Intel(R) Virtualization Technology
1777 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1778 * "Remapping Hardware Unit Hot Plug".
1780 static u8 dmar_hp_uuid
[] = {
1781 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1782 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1786 * Currently there's only one revision and BIOS will not check the revision id,
1787 * so use 0 for safety.
1789 #define DMAR_DSM_REV_ID 0
1790 #define DMAR_DSM_FUNC_DRHD 1
1791 #define DMAR_DSM_FUNC_ATSR 2
1792 #define DMAR_DSM_FUNC_RHSA 3
1794 static inline bool dmar_detect_dsm(acpi_handle handle
, int func
)
1796 return acpi_check_dsm(handle
, dmar_hp_uuid
, DMAR_DSM_REV_ID
, 1 << func
);
1799 static int dmar_walk_dsm_resource(acpi_handle handle
, int func
,
1800 dmar_res_handler_t handler
, void *arg
)
1803 union acpi_object
*obj
;
1804 struct acpi_dmar_header
*start
;
1805 struct dmar_res_callback callback
;
1806 static int res_type
[] = {
1807 [DMAR_DSM_FUNC_DRHD
] = ACPI_DMAR_TYPE_HARDWARE_UNIT
,
1808 [DMAR_DSM_FUNC_ATSR
] = ACPI_DMAR_TYPE_ROOT_ATS
,
1809 [DMAR_DSM_FUNC_RHSA
] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY
,
1812 if (!dmar_detect_dsm(handle
, func
))
1815 obj
= acpi_evaluate_dsm_typed(handle
, dmar_hp_uuid
, DMAR_DSM_REV_ID
,
1816 func
, NULL
, ACPI_TYPE_BUFFER
);
1820 memset(&callback
, 0, sizeof(callback
));
1821 callback
.cb
[res_type
[func
]] = handler
;
1822 callback
.arg
[res_type
[func
]] = arg
;
1823 start
= (struct acpi_dmar_header
*)obj
->buffer
.pointer
;
1824 ret
= dmar_walk_remapping_entries(start
, obj
->buffer
.length
, &callback
);
1831 static int dmar_hp_add_drhd(struct acpi_dmar_header
*header
, void *arg
)
1834 struct dmar_drhd_unit
*dmaru
;
1836 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1840 ret
= dmar_ir_hotplug(dmaru
, true);
1842 ret
= dmar_iommu_hotplug(dmaru
, true);
1847 static int dmar_hp_remove_drhd(struct acpi_dmar_header
*header
, void *arg
)
1851 struct dmar_drhd_unit
*dmaru
;
1853 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1858 * All PCI devices managed by this unit should have been destroyed.
1860 if (!dmaru
->include_all
&& dmaru
->devices
&& dmaru
->devices_cnt
)
1861 for_each_active_dev_scope(dmaru
->devices
,
1862 dmaru
->devices_cnt
, i
, dev
)
1865 ret
= dmar_ir_hotplug(dmaru
, false);
1867 ret
= dmar_iommu_hotplug(dmaru
, false);
1872 static int dmar_hp_release_drhd(struct acpi_dmar_header
*header
, void *arg
)
1874 struct dmar_drhd_unit
*dmaru
;
1876 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1878 list_del_rcu(&dmaru
->list
);
1880 dmar_free_drhd(dmaru
);
1886 static int dmar_hotplug_insert(acpi_handle handle
)
1891 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1892 &dmar_validate_one_drhd
, (void *)1);
1896 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1897 &dmar_parse_one_drhd
, (void *)&drhd_count
);
1898 if (ret
== 0 && drhd_count
== 0) {
1899 pr_warn(FW_BUG
"No DRHD structures in buffer returned by _DSM method\n");
1905 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_RHSA
,
1906 &dmar_parse_one_rhsa
, NULL
);
1910 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1911 &dmar_parse_one_atsr
, NULL
);
1915 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1916 &dmar_hp_add_drhd
, NULL
);
1920 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1921 &dmar_hp_remove_drhd
, NULL
);
1923 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1924 &dmar_release_one_atsr
, NULL
);
1926 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1927 &dmar_hp_release_drhd
, NULL
);
1932 static int dmar_hotplug_remove(acpi_handle handle
)
1936 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1937 &dmar_check_one_atsr
, NULL
);
1941 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1942 &dmar_hp_remove_drhd
, NULL
);
1944 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1945 &dmar_release_one_atsr
, NULL
));
1946 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1947 &dmar_hp_release_drhd
, NULL
));
1949 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1950 &dmar_hp_add_drhd
, NULL
);
1956 static acpi_status
dmar_get_dsm_handle(acpi_handle handle
, u32 lvl
,
1957 void *context
, void **retval
)
1959 acpi_handle
*phdl
= retval
;
1961 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
1963 return AE_CTRL_TERMINATE
;
1969 static int dmar_device_hotplug(acpi_handle handle
, bool insert
)
1972 acpi_handle tmp
= NULL
;
1978 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
1981 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
,
1983 dmar_get_dsm_handle
,
1985 if (ACPI_FAILURE(status
)) {
1986 pr_warn("Failed to locate _DSM method.\n");
1993 down_write(&dmar_global_lock
);
1995 ret
= dmar_hotplug_insert(tmp
);
1997 ret
= dmar_hotplug_remove(tmp
);
1998 up_write(&dmar_global_lock
);
2003 int dmar_device_add(acpi_handle handle
)
2005 return dmar_device_hotplug(handle
, true);
2008 int dmar_device_remove(acpi_handle handle
)
2010 return dmar_device_hotplug(handle
, false);