2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t
)(struct acpi_dmar_header
*, void *);
48 struct dmar_res_callback
{
49 dmar_res_handler_t cb
[ACPI_DMAR_TYPE_RESERVED
];
50 void *arg
[ACPI_DMAR_TYPE_RESERVED
];
51 bool ignore_unhandled
;
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock
);
68 LIST_HEAD(dmar_drhd_units
);
70 struct acpi_table_header
* __initdata dmar_tbl
;
71 static acpi_size dmar_tbl_size
;
72 static int dmar_dev_scope_status
= 1;
73 static unsigned long dmar_seq_ids
[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED
)];
75 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
76 static void free_iommu(struct intel_iommu
*iommu
);
78 static void dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
81 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 if (drhd
->include_all
)
85 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
87 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
90 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
92 struct acpi_dmar_device_scope
*scope
;
97 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_NAMESPACE
||
98 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
99 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
101 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
102 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
103 pr_warn("Unsupported device scope\n");
105 start
+= scope
->length
;
110 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
113 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
116 struct device
*tmp_dev
;
118 if (*devices
&& *cnt
) {
119 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
128 /* Optimize out kzalloc()/kfree() for normal cases */
129 static char dmar_pci_notify_info_buf
[64];
131 static struct dmar_pci_notify_info
*
132 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
137 struct dmar_pci_notify_info
*info
;
139 BUG_ON(dev
->is_virtfn
);
141 /* Only generate path[] for device addition event */
142 if (event
== BUS_NOTIFY_ADD_DEVICE
)
143 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
146 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
147 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
148 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
150 info
= kzalloc(size
, GFP_KERNEL
);
152 pr_warn("Out of memory when allocating notify_info "
153 "for %s.\n", pci_name(dev
));
154 if (dmar_dev_scope_status
== 0)
155 dmar_dev_scope_status
= -ENOMEM
;
162 info
->seg
= pci_domain_nr(dev
->bus
);
164 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
165 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
) {
167 info
->path
[level
].bus
= tmp
->bus
->number
;
168 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
169 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
170 if (pci_is_root_bus(tmp
->bus
))
171 info
->bus
= tmp
->bus
->number
;
178 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
180 if ((void *)info
!= dmar_pci_notify_info_buf
)
184 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
185 struct acpi_dmar_pci_path
*path
, int count
)
189 if (info
->bus
!= bus
)
191 if (info
->level
!= count
)
194 for (i
= 0; i
< count
; i
++) {
195 if (path
[i
].device
!= info
->path
[i
].device
||
196 path
[i
].function
!= info
->path
[i
].function
)
208 if (bus
== info
->path
[i
].bus
&&
209 path
[0].device
== info
->path
[i
].device
&&
210 path
[0].function
== info
->path
[i
].function
) {
211 pr_info(FW_BUG
"RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212 bus
, path
[0].device
, path
[0].function
);
219 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
220 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
221 void *start
, void*end
, u16 segment
,
222 struct dmar_dev_scope
*devices
,
226 struct device
*tmp
, *dev
= &info
->dev
->dev
;
227 struct acpi_dmar_device_scope
*scope
;
228 struct acpi_dmar_pci_path
*path
;
230 if (segment
!= info
->seg
)
233 for (; start
< end
; start
+= scope
->length
) {
235 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
236 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
239 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
240 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
241 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
244 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
) ^
245 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
)) {
246 pr_warn("Device scope type does not match for %s\n",
247 pci_name(info
->dev
));
251 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
253 devices
[i
].bus
= info
->dev
->bus
->number
;
254 devices
[i
].devfn
= info
->dev
->devfn
;
255 rcu_assign_pointer(devices
[i
].dev
,
259 BUG_ON(i
>= devices_cnt
);
265 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
266 struct dmar_dev_scope
*devices
, int count
)
271 if (info
->seg
!= segment
)
274 for_each_active_dev_scope(devices
, count
, index
, tmp
)
275 if (tmp
== &info
->dev
->dev
) {
276 RCU_INIT_POINTER(devices
[index
].dev
, NULL
);
285 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
288 struct dmar_drhd_unit
*dmaru
;
289 struct acpi_dmar_hardware_unit
*drhd
;
291 for_each_drhd_unit(dmaru
) {
292 if (dmaru
->include_all
)
295 drhd
= container_of(dmaru
->hdr
,
296 struct acpi_dmar_hardware_unit
, header
);
297 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
298 ((void *)drhd
) + drhd
->header
.length
,
300 dmaru
->devices
, dmaru
->devices_cnt
);
305 ret
= dmar_iommu_notify_scope_dev(info
);
306 if (ret
< 0 && dmar_dev_scope_status
== 0)
307 dmar_dev_scope_status
= ret
;
312 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
314 struct dmar_drhd_unit
*dmaru
;
316 for_each_drhd_unit(dmaru
)
317 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
318 dmaru
->devices
, dmaru
->devices_cnt
))
320 dmar_iommu_notify_scope_dev(info
);
323 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
324 unsigned long action
, void *data
)
326 struct pci_dev
*pdev
= to_pci_dev(data
);
327 struct dmar_pci_notify_info
*info
;
329 /* Only care about add/remove events for physical functions.
330 * For VFs we actually do the lookup based on the corresponding
331 * PF in device_to_iommu() anyway. */
334 if (action
!= BUS_NOTIFY_ADD_DEVICE
&&
335 action
!= BUS_NOTIFY_REMOVED_DEVICE
)
338 info
= dmar_alloc_pci_notify_info(pdev
, action
);
342 down_write(&dmar_global_lock
);
343 if (action
== BUS_NOTIFY_ADD_DEVICE
)
344 dmar_pci_bus_add_dev(info
);
345 else if (action
== BUS_NOTIFY_REMOVED_DEVICE
)
346 dmar_pci_bus_del_dev(info
);
347 up_write(&dmar_global_lock
);
349 dmar_free_pci_notify_info(info
);
354 static struct notifier_block dmar_pci_bus_nb
= {
355 .notifier_call
= dmar_pci_bus_notifier
,
359 static struct dmar_drhd_unit
*
360 dmar_find_dmaru(struct acpi_dmar_hardware_unit
*drhd
)
362 struct dmar_drhd_unit
*dmaru
;
364 list_for_each_entry_rcu(dmaru
, &dmar_drhd_units
, list
)
365 if (dmaru
->segment
== drhd
->segment
&&
366 dmaru
->reg_base_addr
== drhd
->address
)
373 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
374 * structure which uniquely represent one DMA remapping hardware unit
375 * present in the platform
377 static int dmar_parse_one_drhd(struct acpi_dmar_header
*header
, void *arg
)
379 struct acpi_dmar_hardware_unit
*drhd
;
380 struct dmar_drhd_unit
*dmaru
;
383 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
384 dmaru
= dmar_find_dmaru(drhd
);
388 dmaru
= kzalloc(sizeof(*dmaru
) + header
->length
, GFP_KERNEL
);
393 * If header is allocated from slab by ACPI _DSM method, we need to
394 * copy the content because the memory buffer will be freed on return.
396 dmaru
->hdr
= (void *)(dmaru
+ 1);
397 memcpy(dmaru
->hdr
, header
, header
->length
);
398 dmaru
->reg_base_addr
= drhd
->address
;
399 dmaru
->segment
= drhd
->segment
;
400 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
401 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
402 ((void *)drhd
) + drhd
->header
.length
,
403 &dmaru
->devices_cnt
);
404 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
409 ret
= alloc_iommu(dmaru
);
411 dmar_free_dev_scope(&dmaru
->devices
,
412 &dmaru
->devices_cnt
);
416 dmar_register_drhd_unit(dmaru
);
425 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
427 if (dmaru
->devices
&& dmaru
->devices_cnt
)
428 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
430 free_iommu(dmaru
->iommu
);
434 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
,
437 struct acpi_dmar_andd
*andd
= (void *)header
;
439 /* Check for NUL termination within the designated length */
440 if (strnlen(andd
->device_name
, header
->length
- 8) == header
->length
- 8) {
441 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
442 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
443 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
444 dmi_get_system_info(DMI_BIOS_VENDOR
),
445 dmi_get_system_info(DMI_BIOS_VERSION
),
446 dmi_get_system_info(DMI_PRODUCT_VERSION
));
449 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
455 #ifdef CONFIG_ACPI_NUMA
456 static int dmar_parse_one_rhsa(struct acpi_dmar_header
*header
, void *arg
)
458 struct acpi_dmar_rhsa
*rhsa
;
459 struct dmar_drhd_unit
*drhd
;
461 rhsa
= (struct acpi_dmar_rhsa
*)header
;
462 for_each_drhd_unit(drhd
) {
463 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
464 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
466 if (!node_online(node
))
468 drhd
->iommu
->node
= node
;
473 1, TAINT_FIRMWARE_WORKAROUND
,
474 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
475 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
477 dmi_get_system_info(DMI_BIOS_VENDOR
),
478 dmi_get_system_info(DMI_BIOS_VERSION
),
479 dmi_get_system_info(DMI_PRODUCT_VERSION
));
484 #define dmar_parse_one_rhsa dmar_res_noop
488 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
490 struct acpi_dmar_hardware_unit
*drhd
;
491 struct acpi_dmar_reserved_memory
*rmrr
;
492 struct acpi_dmar_atsr
*atsr
;
493 struct acpi_dmar_rhsa
*rhsa
;
495 switch (header
->type
) {
496 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
497 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
499 pr_info("DRHD base: %#016Lx flags: %#x\n",
500 (unsigned long long)drhd
->address
, drhd
->flags
);
502 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
503 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
505 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
506 (unsigned long long)rmrr
->base_address
,
507 (unsigned long long)rmrr
->end_address
);
509 case ACPI_DMAR_TYPE_ROOT_ATS
:
510 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
511 pr_info("ATSR flags: %#x\n", atsr
->flags
);
513 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY
:
514 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
515 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
516 (unsigned long long)rhsa
->base_address
,
517 rhsa
->proximity_domain
);
519 case ACPI_DMAR_TYPE_NAMESPACE
:
520 /* We don't print this here because we need to sanity-check
521 it first. So print it in dmar_parse_one_andd() instead. */
527 * dmar_table_detect - checks to see if the platform supports DMAR devices
529 static int __init
dmar_table_detect(void)
531 acpi_status status
= AE_OK
;
533 /* if we could find DMAR table, then there are DMAR devices */
534 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
535 (struct acpi_table_header
**)&dmar_tbl
,
538 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
539 pr_warn("Unable to map DMAR\n");
540 status
= AE_NOT_FOUND
;
543 return (ACPI_SUCCESS(status
) ? 1 : 0);
546 static int dmar_walk_remapping_entries(struct acpi_dmar_header
*start
,
547 size_t len
, struct dmar_res_callback
*cb
)
550 struct acpi_dmar_header
*iter
, *next
;
551 struct acpi_dmar_header
*end
= ((void *)start
) + len
;
553 for (iter
= start
; iter
< end
&& ret
== 0; iter
= next
) {
554 next
= (void *)iter
+ iter
->length
;
555 if (iter
->length
== 0) {
556 /* Avoid looping forever on bad ACPI tables */
557 pr_debug(FW_BUG
"Invalid 0-length structure\n");
559 } else if (next
> end
) {
560 /* Avoid passing table end */
561 pr_warn(FW_BUG
"Record passes table end\n");
567 dmar_table_print_dmar_entry(iter
);
569 if (iter
->type
>= ACPI_DMAR_TYPE_RESERVED
) {
570 /* continue for forward compatibility */
571 pr_debug("Unknown DMAR structure type %d\n",
573 } else if (cb
->cb
[iter
->type
]) {
574 ret
= cb
->cb
[iter
->type
](iter
, cb
->arg
[iter
->type
]);
575 } else if (!cb
->ignore_unhandled
) {
576 pr_warn("No handler for DMAR structure type %d\n",
585 static inline int dmar_walk_dmar_table(struct acpi_table_dmar
*dmar
,
586 struct dmar_res_callback
*cb
)
588 return dmar_walk_remapping_entries((void *)(dmar
+ 1),
589 dmar
->header
.length
- sizeof(*dmar
), cb
);
593 * parse_dmar_table - parses the DMA reporting table
596 parse_dmar_table(void)
598 struct acpi_table_dmar
*dmar
;
601 struct dmar_res_callback cb
= {
603 .ignore_unhandled
= true,
604 .arg
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &drhd_count
,
605 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_parse_one_drhd
,
606 .cb
[ACPI_DMAR_TYPE_RESERVED_MEMORY
] = &dmar_parse_one_rmrr
,
607 .cb
[ACPI_DMAR_TYPE_ROOT_ATS
] = &dmar_parse_one_atsr
,
608 .cb
[ACPI_DMAR_TYPE_HARDWARE_AFFINITY
] = &dmar_parse_one_rhsa
,
609 .cb
[ACPI_DMAR_TYPE_NAMESPACE
] = &dmar_parse_one_andd
,
613 * Do it again, earlier dmar_tbl mapping could be mapped with
619 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
620 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
622 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
624 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
628 if (dmar
->width
< PAGE_SHIFT
- 1) {
629 pr_warn("Invalid DMAR haw\n");
633 pr_info("Host address width %d\n", dmar
->width
+ 1);
634 ret
= dmar_walk_dmar_table(dmar
, &cb
);
635 if (ret
== 0 && drhd_count
== 0)
636 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
641 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
642 int cnt
, struct pci_dev
*dev
)
648 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
649 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
652 /* Check our parent */
653 dev
= dev
->bus
->self
;
659 struct dmar_drhd_unit
*
660 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
662 struct dmar_drhd_unit
*dmaru
;
663 struct acpi_dmar_hardware_unit
*drhd
;
665 dev
= pci_physfn(dev
);
668 for_each_drhd_unit(dmaru
) {
669 drhd
= container_of(dmaru
->hdr
,
670 struct acpi_dmar_hardware_unit
,
673 if (dmaru
->include_all
&&
674 drhd
->segment
== pci_domain_nr(dev
->bus
))
677 if (dmar_pci_device_match(dmaru
->devices
,
678 dmaru
->devices_cnt
, dev
))
688 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
689 struct acpi_device
*adev
)
691 struct dmar_drhd_unit
*dmaru
;
692 struct acpi_dmar_hardware_unit
*drhd
;
693 struct acpi_dmar_device_scope
*scope
;
696 struct acpi_dmar_pci_path
*path
;
698 for_each_drhd_unit(dmaru
) {
699 drhd
= container_of(dmaru
->hdr
,
700 struct acpi_dmar_hardware_unit
,
703 for (scope
= (void *)(drhd
+ 1);
704 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
705 scope
= ((void *)scope
) + scope
->length
) {
706 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_NAMESPACE
)
708 if (scope
->enumeration_id
!= device_number
)
711 path
= (void *)(scope
+ 1);
712 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
713 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
714 scope
->bus
, path
->device
, path
->function
);
715 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
717 dmaru
->devices
[i
].bus
= scope
->bus
;
718 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
720 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
721 get_device(&adev
->dev
));
724 BUG_ON(i
>= dmaru
->devices_cnt
);
727 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
728 device_number
, dev_name(&adev
->dev
));
731 static int __init
dmar_acpi_dev_scope_init(void)
733 struct acpi_dmar_andd
*andd
;
735 if (dmar_tbl
== NULL
)
738 for (andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
739 ((unsigned long)andd
) < ((unsigned long)dmar_tbl
) + dmar_tbl
->length
;
740 andd
= ((void *)andd
) + andd
->header
.length
) {
741 if (andd
->header
.type
== ACPI_DMAR_TYPE_NAMESPACE
) {
743 struct acpi_device
*adev
;
745 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
748 pr_err("Failed to find handle for ACPI object %s\n",
752 if (acpi_bus_get_device(h
, &adev
)) {
753 pr_err("Failed to get device for ACPI object %s\n",
757 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
763 int __init
dmar_dev_scope_init(void)
765 struct pci_dev
*dev
= NULL
;
766 struct dmar_pci_notify_info
*info
;
768 if (dmar_dev_scope_status
!= 1)
769 return dmar_dev_scope_status
;
771 if (list_empty(&dmar_drhd_units
)) {
772 dmar_dev_scope_status
= -ENODEV
;
774 dmar_dev_scope_status
= 0;
776 dmar_acpi_dev_scope_init();
778 for_each_pci_dev(dev
) {
782 info
= dmar_alloc_pci_notify_info(dev
,
783 BUS_NOTIFY_ADD_DEVICE
);
785 return dmar_dev_scope_status
;
787 dmar_pci_bus_add_dev(info
);
788 dmar_free_pci_notify_info(info
);
792 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
795 return dmar_dev_scope_status
;
799 int __init
dmar_table_init(void)
801 static int dmar_table_initialized
;
804 if (dmar_table_initialized
== 0) {
805 ret
= parse_dmar_table();
808 pr_info("Parse DMAR table failure.\n");
809 } else if (list_empty(&dmar_drhd_units
)) {
810 pr_info("No DMAR devices found\n");
815 dmar_table_initialized
= ret
;
817 dmar_table_initialized
= 1;
820 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
823 static void warn_invalid_dmar(u64 addr
, const char *message
)
826 1, TAINT_FIRMWARE_WORKAROUND
,
827 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
828 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
830 dmi_get_system_info(DMI_BIOS_VENDOR
),
831 dmi_get_system_info(DMI_BIOS_VERSION
),
832 dmi_get_system_info(DMI_PRODUCT_VERSION
));
836 dmar_validate_one_drhd(struct acpi_dmar_header
*entry
, void *arg
)
838 struct acpi_dmar_hardware_unit
*drhd
;
842 drhd
= (void *)entry
;
843 if (!drhd
->address
) {
844 warn_invalid_dmar(0, "");
849 addr
= ioremap(drhd
->address
, VTD_PAGE_SIZE
);
851 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
853 pr_warn("Can't validate DRHD address: %llx\n", drhd
->address
);
857 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
858 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
863 early_iounmap(addr
, VTD_PAGE_SIZE
);
865 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
866 warn_invalid_dmar(drhd
->address
, " returns all ones");
873 int __init
detect_intel_iommu(void)
876 struct dmar_res_callback validate_drhd_cb
= {
877 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_validate_one_drhd
,
878 .ignore_unhandled
= true,
881 down_write(&dmar_global_lock
);
882 ret
= dmar_table_detect();
884 ret
= !dmar_walk_dmar_table((struct acpi_table_dmar
*)dmar_tbl
,
886 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
888 /* Make sure ACS will be enabled */
894 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
897 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
899 up_write(&dmar_global_lock
);
901 return ret
? 1 : -ENODEV
;
905 static void unmap_iommu(struct intel_iommu
*iommu
)
908 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
912 * map_iommu: map the iommu's registers
913 * @iommu: the iommu to map
914 * @phys_addr: the physical address of the base resgister
916 * Memory map the iommu's registers. Start w/ a single page, and
917 * possibly expand if that turns out to be insufficent.
919 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
923 iommu
->reg_phys
= phys_addr
;
924 iommu
->reg_size
= VTD_PAGE_SIZE
;
926 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
927 pr_err("Can't reserve memory\n");
932 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
934 pr_err("Can't map the region\n");
939 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
940 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
942 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
944 warn_invalid_dmar(phys_addr
, " returns all ones");
948 /* the registers might be more than one page */
949 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
950 cap_max_fault_reg_offset(iommu
->cap
));
951 map_size
= VTD_PAGE_ALIGN(map_size
);
952 if (map_size
> iommu
->reg_size
) {
954 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
955 iommu
->reg_size
= map_size
;
956 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
958 pr_err("Can't reserve memory\n");
962 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
964 pr_err("Can't map the region\n");
975 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
980 static int dmar_alloc_seq_id(struct intel_iommu
*iommu
)
982 iommu
->seq_id
= find_first_zero_bit(dmar_seq_ids
,
983 DMAR_UNITS_SUPPORTED
);
984 if (iommu
->seq_id
>= DMAR_UNITS_SUPPORTED
) {
987 set_bit(iommu
->seq_id
, dmar_seq_ids
);
988 sprintf(iommu
->name
, "dmar%d", iommu
->seq_id
);
991 return iommu
->seq_id
;
994 static void dmar_free_seq_id(struct intel_iommu
*iommu
)
996 if (iommu
->seq_id
>= 0) {
997 clear_bit(iommu
->seq_id
, dmar_seq_ids
);
1002 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
1004 struct intel_iommu
*iommu
;
1010 if (!drhd
->reg_base_addr
) {
1011 warn_invalid_dmar(0, "");
1015 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1019 if (dmar_alloc_seq_id(iommu
) < 0) {
1020 pr_err("Failed to allocate seq_id\n");
1025 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
1027 pr_err("Failed to map %s\n", iommu
->name
);
1028 goto error_free_seq_id
;
1032 agaw
= iommu_calculate_agaw(iommu
);
1034 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1038 msagaw
= iommu_calculate_max_sagaw(iommu
);
1040 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1045 iommu
->msagaw
= msagaw
;
1046 iommu
->segment
= drhd
->segment
;
1050 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
1051 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1053 (unsigned long long)drhd
->reg_base_addr
,
1054 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
1055 (unsigned long long)iommu
->cap
,
1056 (unsigned long long)iommu
->ecap
);
1058 /* Reflect status in gcmd */
1059 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1060 if (sts
& DMA_GSTS_IRES
)
1061 iommu
->gcmd
|= DMA_GCMD_IRE
;
1062 if (sts
& DMA_GSTS_TES
)
1063 iommu
->gcmd
|= DMA_GCMD_TE
;
1064 if (sts
& DMA_GSTS_QIES
)
1065 iommu
->gcmd
|= DMA_GCMD_QIE
;
1067 raw_spin_lock_init(&iommu
->register_lock
);
1069 drhd
->iommu
= iommu
;
1071 if (intel_iommu_enabled
)
1072 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
1081 dmar_free_seq_id(iommu
);
1087 static void free_iommu(struct intel_iommu
*iommu
)
1089 iommu_device_destroy(iommu
->iommu_dev
);
1092 if (iommu
->pr_irq
) {
1093 free_irq(iommu
->pr_irq
, iommu
);
1094 dmar_free_hwirq(iommu
->pr_irq
);
1097 free_irq(iommu
->irq
, iommu
);
1098 dmar_free_hwirq(iommu
->irq
);
1103 free_page((unsigned long)iommu
->qi
->desc
);
1104 kfree(iommu
->qi
->desc_status
);
1111 dmar_free_seq_id(iommu
);
1116 * Reclaim all the submitted descriptors which have completed its work.
1118 static inline void reclaim_free_desc(struct q_inval
*qi
)
1120 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1121 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1122 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1123 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1128 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1132 struct q_inval
*qi
= iommu
->qi
;
1133 int wait_index
= (index
+ 1) % QI_LENGTH
;
1135 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1138 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1141 * If IQE happens, the head points to the descriptor associated
1142 * with the error. No new descriptors are fetched until the IQE
1145 if (fault
& DMA_FSTS_IQE
) {
1146 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1147 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1148 pr_err("VT-d detected invalid descriptor: "
1149 "low=%llx, high=%llx\n",
1150 (unsigned long long)qi
->desc
[index
].low
,
1151 (unsigned long long)qi
->desc
[index
].high
);
1152 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1153 sizeof(struct qi_desc
));
1154 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
1155 sizeof(struct qi_desc
));
1156 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1162 * If ITE happens, all pending wait_desc commands are aborted.
1163 * No new descriptors are fetched until the ITE is cleared.
1165 if (fault
& DMA_FSTS_ITE
) {
1166 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1167 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1169 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1170 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1172 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1175 if (qi
->desc_status
[head
] == QI_IN_USE
)
1176 qi
->desc_status
[head
] = QI_ABORT
;
1177 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1178 } while (head
!= tail
);
1180 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1184 if (fault
& DMA_FSTS_ICE
)
1185 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1191 * Submit the queued invalidation descriptor to the remapping
1192 * hardware unit and wait for its completion.
1194 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1197 struct q_inval
*qi
= iommu
->qi
;
1198 struct qi_desc
*hw
, wait_desc
;
1199 int wait_index
, index
;
1200 unsigned long flags
;
1210 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1211 while (qi
->free_cnt
< 3) {
1212 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1214 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1217 index
= qi
->free_head
;
1218 wait_index
= (index
+ 1) % QI_LENGTH
;
1220 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1224 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1225 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1226 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1228 hw
[wait_index
] = wait_desc
;
1230 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
1231 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
1233 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1237 * update the HW tail register indicating the presence of
1240 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1242 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1244 * We will leave the interrupts disabled, to prevent interrupt
1245 * context to queue another cmd while a cmd is already submitted
1246 * and waiting for completion on this cpu. This is to avoid
1247 * a deadlock where the interrupt context can wait indefinitely
1248 * for free slots in the queue.
1250 rc
= qi_check_fault(iommu
, index
);
1254 raw_spin_unlock(&qi
->q_lock
);
1256 raw_spin_lock(&qi
->q_lock
);
1259 qi
->desc_status
[index
] = QI_DONE
;
1261 reclaim_free_desc(qi
);
1262 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1271 * Flush the global interrupt entry cache.
1273 void qi_global_iec(struct intel_iommu
*iommu
)
1275 struct qi_desc desc
;
1277 desc
.low
= QI_IEC_TYPE
;
1280 /* should never fail */
1281 qi_submit_sync(&desc
, iommu
);
1284 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1287 struct qi_desc desc
;
1289 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1290 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1293 qi_submit_sync(&desc
, iommu
);
1296 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1297 unsigned int size_order
, u64 type
)
1301 struct qi_desc desc
;
1304 if (cap_write_drain(iommu
->cap
))
1307 if (cap_read_drain(iommu
->cap
))
1310 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1311 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1312 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1313 | QI_IOTLB_AM(size_order
);
1315 qi_submit_sync(&desc
, iommu
);
1318 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1319 u64 addr
, unsigned mask
)
1321 struct qi_desc desc
;
1324 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1325 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1326 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1328 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1330 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1333 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1336 qi_submit_sync(&desc
, iommu
);
1340 * Disable Queued Invalidation interface.
1342 void dmar_disable_qi(struct intel_iommu
*iommu
)
1344 unsigned long flags
;
1346 cycles_t start_time
= get_cycles();
1348 if (!ecap_qis(iommu
->ecap
))
1351 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1353 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1354 if (!(sts
& DMA_GSTS_QIES
))
1358 * Give a chance to HW to complete the pending invalidation requests.
1360 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1361 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1362 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1365 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1366 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1368 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1369 !(sts
& DMA_GSTS_QIES
), sts
);
1371 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1375 * Enable queued invalidation.
1377 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1380 unsigned long flags
;
1381 struct q_inval
*qi
= iommu
->qi
;
1383 qi
->free_head
= qi
->free_tail
= 0;
1384 qi
->free_cnt
= QI_LENGTH
;
1386 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1388 /* write zero to the tail reg */
1389 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1391 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1393 iommu
->gcmd
|= DMA_GCMD_QIE
;
1394 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1399 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1403 * Enable Queued Invalidation interface. This is a must to support
1404 * interrupt-remapping. Also used by DMA-remapping, which replaces
1405 * register based IOTLB invalidation.
1407 int dmar_enable_qi(struct intel_iommu
*iommu
)
1410 struct page
*desc_page
;
1412 if (!ecap_qis(iommu
->ecap
))
1416 * queued invalidation is already setup and enabled.
1421 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1428 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1435 qi
->desc
= page_address(desc_page
);
1437 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1438 if (!qi
->desc_status
) {
1439 free_page((unsigned long) qi
->desc
);
1445 raw_spin_lock_init(&qi
->q_lock
);
1447 __dmar_enable_qi(iommu
);
1452 /* iommu interrupt handling. Most stuff are MSI-like. */
1460 static const char *dma_remap_fault_reasons
[] =
1463 "Present bit in root entry is clear",
1464 "Present bit in context entry is clear",
1465 "Invalid context entry",
1466 "Access beyond MGAW",
1467 "PTE Write access is not set",
1468 "PTE Read access is not set",
1469 "Next page table ptr is invalid",
1470 "Root table address invalid",
1471 "Context table ptr is invalid",
1472 "non-zero reserved fields in RTP",
1473 "non-zero reserved fields in CTP",
1474 "non-zero reserved fields in PTE",
1475 "PCE for translation request specifies blocking",
1478 static const char *irq_remap_fault_reasons
[] =
1480 "Detected reserved fields in the decoded interrupt-remapped request",
1481 "Interrupt index exceeded the interrupt-remapping table size",
1482 "Present field in the IRTE entry is clear",
1483 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1484 "Detected reserved fields in the IRTE entry",
1485 "Blocked a compatibility format interrupt request",
1486 "Blocked an interrupt request due to source-id verification failure",
1489 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1491 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1492 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1493 *fault_type
= INTR_REMAP
;
1494 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1495 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1496 *fault_type
= DMA_REMAP
;
1497 return dma_remap_fault_reasons
[fault_reason
];
1499 *fault_type
= UNKNOWN
;
1505 static inline int dmar_msi_reg(struct intel_iommu
*iommu
, int irq
)
1507 if (iommu
->irq
== irq
)
1508 return DMAR_FECTL_REG
;
1509 else if (iommu
->pr_irq
== irq
)
1510 return DMAR_PECTL_REG
;
1515 void dmar_msi_unmask(struct irq_data
*data
)
1517 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1518 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1522 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1523 writel(0, iommu
->reg
+ reg
);
1524 /* Read a reg to force flush the post write */
1525 readl(iommu
->reg
+ reg
);
1526 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1529 void dmar_msi_mask(struct irq_data
*data
)
1531 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1532 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1536 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1537 writel(DMA_FECTL_IM
, iommu
->reg
+ reg
);
1538 /* Read a reg to force flush the post write */
1539 readl(iommu
->reg
+ reg
);
1540 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1543 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1545 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1546 int reg
= dmar_msi_reg(iommu
, irq
);
1549 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1550 writel(msg
->data
, iommu
->reg
+ reg
+ 4);
1551 writel(msg
->address_lo
, iommu
->reg
+ reg
+ 8);
1552 writel(msg
->address_hi
, iommu
->reg
+ reg
+ 12);
1553 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1556 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1558 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1559 int reg
= dmar_msi_reg(iommu
, irq
);
1562 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1563 msg
->data
= readl(iommu
->reg
+ reg
+ 4);
1564 msg
->address_lo
= readl(iommu
->reg
+ reg
+ 8);
1565 msg
->address_hi
= readl(iommu
->reg
+ reg
+ 12);
1566 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1569 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1570 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1575 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1577 if (fault_type
== INTR_REMAP
)
1578 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1579 "fault index %llx\n"
1580 "INTR-REMAP:[fault reason %02d] %s\n",
1581 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1582 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1583 fault_reason
, reason
);
1585 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1586 "fault addr %llx \n"
1587 "DMAR:[fault reason %02d] %s\n",
1588 (type
? "DMA Read" : "DMA Write"),
1589 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1590 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1594 #define PRIMARY_FAULT_REG_LEN (16)
1595 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1597 struct intel_iommu
*iommu
= dev_id
;
1598 int reg
, fault_index
;
1602 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1603 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1605 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1607 /* TBD: ignore advanced fault log currently */
1608 if (!(fault_status
& DMA_FSTS_PPF
))
1611 fault_index
= dma_fsts_fault_record_index(fault_status
);
1612 reg
= cap_fault_reg_offset(iommu
->cap
);
1620 /* highest 32 bits */
1621 data
= readl(iommu
->reg
+ reg
+
1622 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1623 if (!(data
& DMA_FRCD_F
))
1626 fault_reason
= dma_frcd_fault_reason(data
);
1627 type
= dma_frcd_type(data
);
1629 data
= readl(iommu
->reg
+ reg
+
1630 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1631 source_id
= dma_frcd_source_id(data
);
1633 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1634 fault_index
* PRIMARY_FAULT_REG_LEN
);
1635 guest_addr
= dma_frcd_page_addr(guest_addr
);
1636 /* clear the fault */
1637 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1638 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1640 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1642 dmar_fault_do_one(iommu
, type
, fault_reason
,
1643 source_id
, guest_addr
);
1646 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1648 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1651 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1654 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1658 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1663 * Check if the fault interrupt is already initialized.
1668 irq
= dmar_alloc_hwirq(iommu
->seq_id
, iommu
->node
, iommu
);
1672 pr_err("No free IRQ vectors\n");
1676 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1678 pr_err("Can't request irq\n");
1682 int __init
enable_drhd_fault_handling(void)
1684 struct dmar_drhd_unit
*drhd
;
1685 struct intel_iommu
*iommu
;
1688 * Enable fault control interrupt.
1690 for_each_iommu(iommu
, drhd
) {
1692 int ret
= dmar_set_interrupt(iommu
);
1695 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1696 (unsigned long long)drhd
->reg_base_addr
, ret
);
1701 * Clear any previous faults.
1703 dmar_fault(iommu
->irq
, iommu
);
1704 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1705 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1712 * Re-enable Queued Invalidation interface.
1714 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1716 if (!ecap_qis(iommu
->ecap
))
1723 * First disable queued invalidation.
1725 dmar_disable_qi(iommu
);
1727 * Then enable queued invalidation again. Since there is no pending
1728 * invalidation requests now, it's safe to re-enable queued
1731 __dmar_enable_qi(iommu
);
1737 * Check interrupt remapping support in DMAR table description.
1739 int __init
dmar_ir_support(void)
1741 struct acpi_table_dmar
*dmar
;
1742 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1745 return dmar
->flags
& 0x1;
1748 /* Check whether DMAR units are in use */
1749 static inline bool dmar_in_use(void)
1751 return irq_remapping_enabled
|| intel_iommu_enabled
;
1754 static int __init
dmar_free_unused_resources(void)
1756 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1761 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1762 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1764 down_write(&dmar_global_lock
);
1765 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1766 list_del(&dmaru
->list
);
1767 dmar_free_drhd(dmaru
);
1769 up_write(&dmar_global_lock
);
1774 late_initcall(dmar_free_unused_resources
);
1775 IOMMU_INIT_POST(detect_intel_iommu
);
1778 * DMAR Hotplug Support
1779 * For more details, please refer to Intel(R) Virtualization Technology
1780 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1781 * "Remapping Hardware Unit Hot Plug".
1783 static u8 dmar_hp_uuid
[] = {
1784 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1785 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1789 * Currently there's only one revision and BIOS will not check the revision id,
1790 * so use 0 for safety.
1792 #define DMAR_DSM_REV_ID 0
1793 #define DMAR_DSM_FUNC_DRHD 1
1794 #define DMAR_DSM_FUNC_ATSR 2
1795 #define DMAR_DSM_FUNC_RHSA 3
1797 static inline bool dmar_detect_dsm(acpi_handle handle
, int func
)
1799 return acpi_check_dsm(handle
, dmar_hp_uuid
, DMAR_DSM_REV_ID
, 1 << func
);
1802 static int dmar_walk_dsm_resource(acpi_handle handle
, int func
,
1803 dmar_res_handler_t handler
, void *arg
)
1806 union acpi_object
*obj
;
1807 struct acpi_dmar_header
*start
;
1808 struct dmar_res_callback callback
;
1809 static int res_type
[] = {
1810 [DMAR_DSM_FUNC_DRHD
] = ACPI_DMAR_TYPE_HARDWARE_UNIT
,
1811 [DMAR_DSM_FUNC_ATSR
] = ACPI_DMAR_TYPE_ROOT_ATS
,
1812 [DMAR_DSM_FUNC_RHSA
] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY
,
1815 if (!dmar_detect_dsm(handle
, func
))
1818 obj
= acpi_evaluate_dsm_typed(handle
, dmar_hp_uuid
, DMAR_DSM_REV_ID
,
1819 func
, NULL
, ACPI_TYPE_BUFFER
);
1823 memset(&callback
, 0, sizeof(callback
));
1824 callback
.cb
[res_type
[func
]] = handler
;
1825 callback
.arg
[res_type
[func
]] = arg
;
1826 start
= (struct acpi_dmar_header
*)obj
->buffer
.pointer
;
1827 ret
= dmar_walk_remapping_entries(start
, obj
->buffer
.length
, &callback
);
1834 static int dmar_hp_add_drhd(struct acpi_dmar_header
*header
, void *arg
)
1837 struct dmar_drhd_unit
*dmaru
;
1839 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1843 ret
= dmar_ir_hotplug(dmaru
, true);
1845 ret
= dmar_iommu_hotplug(dmaru
, true);
1850 static int dmar_hp_remove_drhd(struct acpi_dmar_header
*header
, void *arg
)
1854 struct dmar_drhd_unit
*dmaru
;
1856 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1861 * All PCI devices managed by this unit should have been destroyed.
1863 if (!dmaru
->include_all
&& dmaru
->devices
&& dmaru
->devices_cnt
) {
1864 for_each_active_dev_scope(dmaru
->devices
,
1865 dmaru
->devices_cnt
, i
, dev
)
1869 ret
= dmar_ir_hotplug(dmaru
, false);
1871 ret
= dmar_iommu_hotplug(dmaru
, false);
1876 static int dmar_hp_release_drhd(struct acpi_dmar_header
*header
, void *arg
)
1878 struct dmar_drhd_unit
*dmaru
;
1880 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1882 list_del_rcu(&dmaru
->list
);
1884 dmar_free_drhd(dmaru
);
1890 static int dmar_hotplug_insert(acpi_handle handle
)
1895 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1896 &dmar_validate_one_drhd
, (void *)1);
1900 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1901 &dmar_parse_one_drhd
, (void *)&drhd_count
);
1902 if (ret
== 0 && drhd_count
== 0) {
1903 pr_warn(FW_BUG
"No DRHD structures in buffer returned by _DSM method\n");
1909 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_RHSA
,
1910 &dmar_parse_one_rhsa
, NULL
);
1914 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1915 &dmar_parse_one_atsr
, NULL
);
1919 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1920 &dmar_hp_add_drhd
, NULL
);
1924 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1925 &dmar_hp_remove_drhd
, NULL
);
1927 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1928 &dmar_release_one_atsr
, NULL
);
1930 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1931 &dmar_hp_release_drhd
, NULL
);
1936 static int dmar_hotplug_remove(acpi_handle handle
)
1940 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1941 &dmar_check_one_atsr
, NULL
);
1945 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1946 &dmar_hp_remove_drhd
, NULL
);
1948 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1949 &dmar_release_one_atsr
, NULL
));
1950 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1951 &dmar_hp_release_drhd
, NULL
));
1953 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1954 &dmar_hp_add_drhd
, NULL
);
1960 static acpi_status
dmar_get_dsm_handle(acpi_handle handle
, u32 lvl
,
1961 void *context
, void **retval
)
1963 acpi_handle
*phdl
= retval
;
1965 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
1967 return AE_CTRL_TERMINATE
;
1973 static int dmar_device_hotplug(acpi_handle handle
, bool insert
)
1976 acpi_handle tmp
= NULL
;
1982 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
1985 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
,
1987 dmar_get_dsm_handle
,
1989 if (ACPI_FAILURE(status
)) {
1990 pr_warn("Failed to locate _DSM method.\n");
1997 down_write(&dmar_global_lock
);
1999 ret
= dmar_hotplug_insert(tmp
);
2001 ret
= dmar_hotplug_remove(tmp
);
2002 up_write(&dmar_global_lock
);
2007 int dmar_device_add(acpi_handle handle
)
2009 return dmar_device_hotplug(handle
, true);
2012 int dmar_device_remove(acpi_handle handle
)
2014 return dmar_device_hotplug(handle
, false);