Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming...
[linux/fpc-iii.git] / drivers / iommu / dmar.c
blob60ab474bfff38e58ba1e246d7da6ca3a201677cb
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
48 * Assumptions:
49 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
50 * before IO devices managed by that unit.
51 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
52 * after IO devices managed by that unit.
53 * 3) Hotplug events are rare.
55 * Locking rules for DMA and interrupt remapping related global data structures:
56 * 1) Use dmar_global_lock in process context
57 * 2) Use RCU in interrupt context
59 DECLARE_RWSEM(dmar_global_lock);
60 LIST_HEAD(dmar_drhd_units);
62 struct acpi_table_header * __initdata dmar_tbl;
63 static acpi_size dmar_tbl_size;
64 static int dmar_dev_scope_status = 1;
66 static int alloc_iommu(struct dmar_drhd_unit *drhd);
67 static void free_iommu(struct intel_iommu *iommu);
69 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
72 * add INCLUDE_ALL at the tail, so scan the list will find it at
73 * the very end.
75 if (drhd->include_all)
76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
77 else
78 list_add_rcu(&drhd->list, &dmar_drhd_units);
81 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 struct acpi_dmar_device_scope *scope;
85 *cnt = 0;
86 while (start < end) {
87 scope = start;
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
91 (*cnt)++;
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94 pr_warn("Unsupported device scope\n");
96 start += scope->length;
98 if (*cnt == 0)
99 return NULL;
101 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
104 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
106 int i;
107 struct device *tmp_dev;
109 if (*devices && *cnt) {
110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
111 put_device(tmp_dev);
112 kfree(*devices);
115 *devices = NULL;
116 *cnt = 0;
119 /* Optimize out kzalloc()/kfree() for normal cases */
120 static char dmar_pci_notify_info_buf[64];
122 static struct dmar_pci_notify_info *
123 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
125 int level = 0;
126 size_t size;
127 struct pci_dev *tmp;
128 struct dmar_pci_notify_info *info;
130 BUG_ON(dev->is_virtfn);
132 /* Only generate path[] for device addition event */
133 if (event == BUS_NOTIFY_ADD_DEVICE)
134 for (tmp = dev; tmp; tmp = tmp->bus->self)
135 level++;
137 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
138 if (size <= sizeof(dmar_pci_notify_info_buf)) {
139 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
140 } else {
141 info = kzalloc(size, GFP_KERNEL);
142 if (!info) {
143 pr_warn("Out of memory when allocating notify_info "
144 "for %s.\n", pci_name(dev));
145 if (dmar_dev_scope_status == 0)
146 dmar_dev_scope_status = -ENOMEM;
147 return NULL;
151 info->event = event;
152 info->dev = dev;
153 info->seg = pci_domain_nr(dev->bus);
154 info->level = level;
155 if (event == BUS_NOTIFY_ADD_DEVICE) {
156 for (tmp = dev; tmp; tmp = tmp->bus->self) {
157 level--;
158 info->path[level].device = PCI_SLOT(tmp->devfn);
159 info->path[level].function = PCI_FUNC(tmp->devfn);
160 if (pci_is_root_bus(tmp->bus))
161 info->bus = tmp->bus->number;
165 return info;
168 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
170 if ((void *)info != dmar_pci_notify_info_buf)
171 kfree(info);
174 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
175 struct acpi_dmar_pci_path *path, int count)
177 int i;
179 if (info->bus != bus)
180 return false;
181 if (info->level != count)
182 return false;
184 for (i = 0; i < count; i++) {
185 if (path[i].device != info->path[i].device ||
186 path[i].function != info->path[i].function)
187 return false;
190 return true;
193 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
194 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
195 void *start, void*end, u16 segment,
196 struct dmar_dev_scope *devices,
197 int devices_cnt)
199 int i, level;
200 struct device *tmp, *dev = &info->dev->dev;
201 struct acpi_dmar_device_scope *scope;
202 struct acpi_dmar_pci_path *path;
204 if (segment != info->seg)
205 return 0;
207 for (; start < end; start += scope->length) {
208 scope = start;
209 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
210 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
211 continue;
213 path = (struct acpi_dmar_pci_path *)(scope + 1);
214 level = (scope->length - sizeof(*scope)) / sizeof(*path);
215 if (!dmar_match_pci_path(info, scope->bus, path, level))
216 continue;
218 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
219 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
220 pr_warn("Device scope type does not match for %s\n",
221 pci_name(info->dev));
222 return -EINVAL;
225 for_each_dev_scope(devices, devices_cnt, i, tmp)
226 if (tmp == NULL) {
227 devices[i].bus = info->dev->bus->number;
228 devices[i].devfn = info->dev->devfn;
229 rcu_assign_pointer(devices[i].dev,
230 get_device(dev));
231 return 1;
233 BUG_ON(i >= devices_cnt);
236 return 0;
239 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
240 struct dmar_dev_scope *devices, int count)
242 int index;
243 struct device *tmp;
245 if (info->seg != segment)
246 return 0;
248 for_each_active_dev_scope(devices, count, index, tmp)
249 if (tmp == &info->dev->dev) {
250 rcu_assign_pointer(devices[index].dev, NULL);
251 synchronize_rcu();
252 put_device(tmp);
253 return 1;
256 return 0;
259 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
261 int ret = 0;
262 struct dmar_drhd_unit *dmaru;
263 struct acpi_dmar_hardware_unit *drhd;
265 for_each_drhd_unit(dmaru) {
266 if (dmaru->include_all)
267 continue;
269 drhd = container_of(dmaru->hdr,
270 struct acpi_dmar_hardware_unit, header);
271 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
272 ((void *)drhd) + drhd->header.length,
273 dmaru->segment,
274 dmaru->devices, dmaru->devices_cnt);
275 if (ret != 0)
276 break;
278 if (ret >= 0)
279 ret = dmar_iommu_notify_scope_dev(info);
280 if (ret < 0 && dmar_dev_scope_status == 0)
281 dmar_dev_scope_status = ret;
283 return ret;
286 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
288 struct dmar_drhd_unit *dmaru;
290 for_each_drhd_unit(dmaru)
291 if (dmar_remove_dev_scope(info, dmaru->segment,
292 dmaru->devices, dmaru->devices_cnt))
293 break;
294 dmar_iommu_notify_scope_dev(info);
297 static int dmar_pci_bus_notifier(struct notifier_block *nb,
298 unsigned long action, void *data)
300 struct pci_dev *pdev = to_pci_dev(data);
301 struct dmar_pci_notify_info *info;
303 /* Only care about add/remove events for physical functions */
304 if (pdev->is_virtfn)
305 return NOTIFY_DONE;
306 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
307 return NOTIFY_DONE;
309 info = dmar_alloc_pci_notify_info(pdev, action);
310 if (!info)
311 return NOTIFY_DONE;
313 down_write(&dmar_global_lock);
314 if (action == BUS_NOTIFY_ADD_DEVICE)
315 dmar_pci_bus_add_dev(info);
316 else if (action == BUS_NOTIFY_DEL_DEVICE)
317 dmar_pci_bus_del_dev(info);
318 up_write(&dmar_global_lock);
320 dmar_free_pci_notify_info(info);
322 return NOTIFY_OK;
325 static struct notifier_block dmar_pci_bus_nb = {
326 .notifier_call = dmar_pci_bus_notifier,
327 .priority = INT_MIN,
331 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
332 * structure which uniquely represent one DMA remapping hardware unit
333 * present in the platform
335 static int __init
336 dmar_parse_one_drhd(struct acpi_dmar_header *header)
338 struct acpi_dmar_hardware_unit *drhd;
339 struct dmar_drhd_unit *dmaru;
340 int ret = 0;
342 drhd = (struct acpi_dmar_hardware_unit *)header;
343 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
344 if (!dmaru)
345 return -ENOMEM;
347 dmaru->hdr = header;
348 dmaru->reg_base_addr = drhd->address;
349 dmaru->segment = drhd->segment;
350 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
351 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
352 ((void *)drhd) + drhd->header.length,
353 &dmaru->devices_cnt);
354 if (dmaru->devices_cnt && dmaru->devices == NULL) {
355 kfree(dmaru);
356 return -ENOMEM;
359 ret = alloc_iommu(dmaru);
360 if (ret) {
361 dmar_free_dev_scope(&dmaru->devices,
362 &dmaru->devices_cnt);
363 kfree(dmaru);
364 return ret;
366 dmar_register_drhd_unit(dmaru);
367 return 0;
370 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
372 if (dmaru->devices && dmaru->devices_cnt)
373 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
374 if (dmaru->iommu)
375 free_iommu(dmaru->iommu);
376 kfree(dmaru);
379 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
381 struct acpi_dmar_andd *andd = (void *)header;
383 /* Check for NUL termination within the designated length */
384 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
385 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
386 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
387 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
388 dmi_get_system_info(DMI_BIOS_VENDOR),
389 dmi_get_system_info(DMI_BIOS_VERSION),
390 dmi_get_system_info(DMI_PRODUCT_VERSION));
391 return -EINVAL;
393 pr_info("ANDD device: %x name: %s\n", andd->device_number,
394 andd->device_name);
396 return 0;
399 #ifdef CONFIG_ACPI_NUMA
400 static int __init
401 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
403 struct acpi_dmar_rhsa *rhsa;
404 struct dmar_drhd_unit *drhd;
406 rhsa = (struct acpi_dmar_rhsa *)header;
407 for_each_drhd_unit(drhd) {
408 if (drhd->reg_base_addr == rhsa->base_address) {
409 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
411 if (!node_online(node))
412 node = -1;
413 drhd->iommu->node = node;
414 return 0;
417 WARN_TAINT(
418 1, TAINT_FIRMWARE_WORKAROUND,
419 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
420 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
421 drhd->reg_base_addr,
422 dmi_get_system_info(DMI_BIOS_VENDOR),
423 dmi_get_system_info(DMI_BIOS_VERSION),
424 dmi_get_system_info(DMI_PRODUCT_VERSION));
426 return 0;
428 #endif
430 static void __init
431 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
433 struct acpi_dmar_hardware_unit *drhd;
434 struct acpi_dmar_reserved_memory *rmrr;
435 struct acpi_dmar_atsr *atsr;
436 struct acpi_dmar_rhsa *rhsa;
438 switch (header->type) {
439 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
440 drhd = container_of(header, struct acpi_dmar_hardware_unit,
441 header);
442 pr_info("DRHD base: %#016Lx flags: %#x\n",
443 (unsigned long long)drhd->address, drhd->flags);
444 break;
445 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
446 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
447 header);
448 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
449 (unsigned long long)rmrr->base_address,
450 (unsigned long long)rmrr->end_address);
451 break;
452 case ACPI_DMAR_TYPE_ROOT_ATS:
453 atsr = container_of(header, struct acpi_dmar_atsr, header);
454 pr_info("ATSR flags: %#x\n", atsr->flags);
455 break;
456 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
457 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
458 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
459 (unsigned long long)rhsa->base_address,
460 rhsa->proximity_domain);
461 break;
462 case ACPI_DMAR_TYPE_NAMESPACE:
463 /* We don't print this here because we need to sanity-check
464 it first. So print it in dmar_parse_one_andd() instead. */
465 break;
470 * dmar_table_detect - checks to see if the platform supports DMAR devices
472 static int __init dmar_table_detect(void)
474 acpi_status status = AE_OK;
476 /* if we could find DMAR table, then there are DMAR devices */
477 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
478 (struct acpi_table_header **)&dmar_tbl,
479 &dmar_tbl_size);
481 if (ACPI_SUCCESS(status) && !dmar_tbl) {
482 pr_warn("Unable to map DMAR\n");
483 status = AE_NOT_FOUND;
486 return (ACPI_SUCCESS(status) ? 1 : 0);
490 * parse_dmar_table - parses the DMA reporting table
492 static int __init
493 parse_dmar_table(void)
495 struct acpi_table_dmar *dmar;
496 struct acpi_dmar_header *entry_header;
497 int ret = 0;
498 int drhd_count = 0;
501 * Do it again, earlier dmar_tbl mapping could be mapped with
502 * fixed map.
504 dmar_table_detect();
507 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
508 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
510 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
512 dmar = (struct acpi_table_dmar *)dmar_tbl;
513 if (!dmar)
514 return -ENODEV;
516 if (dmar->width < PAGE_SHIFT - 1) {
517 pr_warn("Invalid DMAR haw\n");
518 return -EINVAL;
521 pr_info("Host address width %d\n", dmar->width + 1);
523 entry_header = (struct acpi_dmar_header *)(dmar + 1);
524 while (((unsigned long)entry_header) <
525 (((unsigned long)dmar) + dmar_tbl->length)) {
526 /* Avoid looping forever on bad ACPI tables */
527 if (entry_header->length == 0) {
528 pr_warn("Invalid 0-length structure\n");
529 ret = -EINVAL;
530 break;
533 dmar_table_print_dmar_entry(entry_header);
535 switch (entry_header->type) {
536 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
537 drhd_count++;
538 ret = dmar_parse_one_drhd(entry_header);
539 break;
540 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
541 ret = dmar_parse_one_rmrr(entry_header);
542 break;
543 case ACPI_DMAR_TYPE_ROOT_ATS:
544 ret = dmar_parse_one_atsr(entry_header);
545 break;
546 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
547 #ifdef CONFIG_ACPI_NUMA
548 ret = dmar_parse_one_rhsa(entry_header);
549 #endif
550 break;
551 case ACPI_DMAR_TYPE_NAMESPACE:
552 ret = dmar_parse_one_andd(entry_header);
553 break;
554 default:
555 pr_warn("Unknown DMAR structure type %d\n",
556 entry_header->type);
557 ret = 0; /* for forward compatibility */
558 break;
560 if (ret)
561 break;
563 entry_header = ((void *)entry_header + entry_header->length);
565 if (drhd_count == 0)
566 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
567 return ret;
570 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
571 int cnt, struct pci_dev *dev)
573 int index;
574 struct device *tmp;
576 while (dev) {
577 for_each_active_dev_scope(devices, cnt, index, tmp)
578 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
579 return 1;
581 /* Check our parent */
582 dev = dev->bus->self;
585 return 0;
588 struct dmar_drhd_unit *
589 dmar_find_matched_drhd_unit(struct pci_dev *dev)
591 struct dmar_drhd_unit *dmaru;
592 struct acpi_dmar_hardware_unit *drhd;
594 dev = pci_physfn(dev);
596 rcu_read_lock();
597 for_each_drhd_unit(dmaru) {
598 drhd = container_of(dmaru->hdr,
599 struct acpi_dmar_hardware_unit,
600 header);
602 if (dmaru->include_all &&
603 drhd->segment == pci_domain_nr(dev->bus))
604 goto out;
606 if (dmar_pci_device_match(dmaru->devices,
607 dmaru->devices_cnt, dev))
608 goto out;
610 dmaru = NULL;
611 out:
612 rcu_read_unlock();
614 return dmaru;
617 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
618 struct acpi_device *adev)
620 struct dmar_drhd_unit *dmaru;
621 struct acpi_dmar_hardware_unit *drhd;
622 struct acpi_dmar_device_scope *scope;
623 struct device *tmp;
624 int i;
625 struct acpi_dmar_pci_path *path;
627 for_each_drhd_unit(dmaru) {
628 drhd = container_of(dmaru->hdr,
629 struct acpi_dmar_hardware_unit,
630 header);
632 for (scope = (void *)(drhd + 1);
633 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
634 scope = ((void *)scope) + scope->length) {
635 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
636 continue;
637 if (scope->enumeration_id != device_number)
638 continue;
640 path = (void *)(scope + 1);
641 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
642 dev_name(&adev->dev), dmaru->reg_base_addr,
643 scope->bus, path->device, path->function);
644 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
645 if (tmp == NULL) {
646 dmaru->devices[i].bus = scope->bus;
647 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
648 path->function);
649 rcu_assign_pointer(dmaru->devices[i].dev,
650 get_device(&adev->dev));
651 return;
653 BUG_ON(i >= dmaru->devices_cnt);
656 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
657 device_number, dev_name(&adev->dev));
660 static int __init dmar_acpi_dev_scope_init(void)
662 struct acpi_dmar_andd *andd;
664 if (dmar_tbl == NULL)
665 return -ENODEV;
667 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
668 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
669 andd = ((void *)andd) + andd->header.length) {
670 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
671 acpi_handle h;
672 struct acpi_device *adev;
674 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
675 andd->device_name,
676 &h))) {
677 pr_err("Failed to find handle for ACPI object %s\n",
678 andd->device_name);
679 continue;
681 acpi_bus_get_device(h, &adev);
682 if (!adev) {
683 pr_err("Failed to get device for ACPI object %s\n",
684 andd->device_name);
685 continue;
687 dmar_acpi_insert_dev_scope(andd->device_number, adev);
690 return 0;
693 int __init dmar_dev_scope_init(void)
695 struct pci_dev *dev = NULL;
696 struct dmar_pci_notify_info *info;
698 if (dmar_dev_scope_status != 1)
699 return dmar_dev_scope_status;
701 if (list_empty(&dmar_drhd_units)) {
702 dmar_dev_scope_status = -ENODEV;
703 } else {
704 dmar_dev_scope_status = 0;
706 dmar_acpi_dev_scope_init();
708 for_each_pci_dev(dev) {
709 if (dev->is_virtfn)
710 continue;
712 info = dmar_alloc_pci_notify_info(dev,
713 BUS_NOTIFY_ADD_DEVICE);
714 if (!info) {
715 return dmar_dev_scope_status;
716 } else {
717 dmar_pci_bus_add_dev(info);
718 dmar_free_pci_notify_info(info);
722 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
725 return dmar_dev_scope_status;
729 int __init dmar_table_init(void)
731 static int dmar_table_initialized;
732 int ret;
734 if (dmar_table_initialized == 0) {
735 ret = parse_dmar_table();
736 if (ret < 0) {
737 if (ret != -ENODEV)
738 pr_info("parse DMAR table failure.\n");
739 } else if (list_empty(&dmar_drhd_units)) {
740 pr_info("No DMAR devices found\n");
741 ret = -ENODEV;
744 if (ret < 0)
745 dmar_table_initialized = ret;
746 else
747 dmar_table_initialized = 1;
750 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
753 static void warn_invalid_dmar(u64 addr, const char *message)
755 WARN_TAINT_ONCE(
756 1, TAINT_FIRMWARE_WORKAROUND,
757 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
758 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
759 addr, message,
760 dmi_get_system_info(DMI_BIOS_VENDOR),
761 dmi_get_system_info(DMI_BIOS_VERSION),
762 dmi_get_system_info(DMI_PRODUCT_VERSION));
765 static int __init check_zero_address(void)
767 struct acpi_table_dmar *dmar;
768 struct acpi_dmar_header *entry_header;
769 struct acpi_dmar_hardware_unit *drhd;
771 dmar = (struct acpi_table_dmar *)dmar_tbl;
772 entry_header = (struct acpi_dmar_header *)(dmar + 1);
774 while (((unsigned long)entry_header) <
775 (((unsigned long)dmar) + dmar_tbl->length)) {
776 /* Avoid looping forever on bad ACPI tables */
777 if (entry_header->length == 0) {
778 pr_warn("Invalid 0-length structure\n");
779 return 0;
782 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
783 void __iomem *addr;
784 u64 cap, ecap;
786 drhd = (void *)entry_header;
787 if (!drhd->address) {
788 warn_invalid_dmar(0, "");
789 goto failed;
792 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
793 if (!addr ) {
794 printk("IOMMU: can't validate: %llx\n", drhd->address);
795 goto failed;
797 cap = dmar_readq(addr + DMAR_CAP_REG);
798 ecap = dmar_readq(addr + DMAR_ECAP_REG);
799 early_iounmap(addr, VTD_PAGE_SIZE);
800 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
801 warn_invalid_dmar(drhd->address,
802 " returns all ones");
803 goto failed;
807 entry_header = ((void *)entry_header + entry_header->length);
809 return 1;
811 failed:
812 return 0;
815 int __init detect_intel_iommu(void)
817 int ret;
819 down_write(&dmar_global_lock);
820 ret = dmar_table_detect();
821 if (ret)
822 ret = check_zero_address();
824 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
825 iommu_detected = 1;
826 /* Make sure ACS will be enabled */
827 pci_request_acs();
830 #ifdef CONFIG_X86
831 if (ret)
832 x86_init.iommu.iommu_init = intel_iommu_init;
833 #endif
835 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
836 dmar_tbl = NULL;
837 up_write(&dmar_global_lock);
839 return ret ? 1 : -ENODEV;
843 static void unmap_iommu(struct intel_iommu *iommu)
845 iounmap(iommu->reg);
846 release_mem_region(iommu->reg_phys, iommu->reg_size);
850 * map_iommu: map the iommu's registers
851 * @iommu: the iommu to map
852 * @phys_addr: the physical address of the base resgister
854 * Memory map the iommu's registers. Start w/ a single page, and
855 * possibly expand if that turns out to be insufficent.
857 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
859 int map_size, err=0;
861 iommu->reg_phys = phys_addr;
862 iommu->reg_size = VTD_PAGE_SIZE;
864 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
865 pr_err("IOMMU: can't reserve memory\n");
866 err = -EBUSY;
867 goto out;
870 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
871 if (!iommu->reg) {
872 pr_err("IOMMU: can't map the region\n");
873 err = -ENOMEM;
874 goto release;
877 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
878 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
880 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
881 err = -EINVAL;
882 warn_invalid_dmar(phys_addr, " returns all ones");
883 goto unmap;
886 /* the registers might be more than one page */
887 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
888 cap_max_fault_reg_offset(iommu->cap));
889 map_size = VTD_PAGE_ALIGN(map_size);
890 if (map_size > iommu->reg_size) {
891 iounmap(iommu->reg);
892 release_mem_region(iommu->reg_phys, iommu->reg_size);
893 iommu->reg_size = map_size;
894 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
895 iommu->name)) {
896 pr_err("IOMMU: can't reserve memory\n");
897 err = -EBUSY;
898 goto out;
900 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
901 if (!iommu->reg) {
902 pr_err("IOMMU: can't map the region\n");
903 err = -ENOMEM;
904 goto release;
907 err = 0;
908 goto out;
910 unmap:
911 iounmap(iommu->reg);
912 release:
913 release_mem_region(iommu->reg_phys, iommu->reg_size);
914 out:
915 return err;
918 static int alloc_iommu(struct dmar_drhd_unit *drhd)
920 struct intel_iommu *iommu;
921 u32 ver, sts;
922 static int iommu_allocated = 0;
923 int agaw = 0;
924 int msagaw = 0;
925 int err;
927 if (!drhd->reg_base_addr) {
928 warn_invalid_dmar(0, "");
929 return -EINVAL;
932 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
933 if (!iommu)
934 return -ENOMEM;
936 iommu->seq_id = iommu_allocated++;
937 sprintf (iommu->name, "dmar%d", iommu->seq_id);
939 err = map_iommu(iommu, drhd->reg_base_addr);
940 if (err) {
941 pr_err("IOMMU: failed to map %s\n", iommu->name);
942 goto error;
945 err = -EINVAL;
946 agaw = iommu_calculate_agaw(iommu);
947 if (agaw < 0) {
948 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
949 iommu->seq_id);
950 goto err_unmap;
952 msagaw = iommu_calculate_max_sagaw(iommu);
953 if (msagaw < 0) {
954 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
955 iommu->seq_id);
956 goto err_unmap;
958 iommu->agaw = agaw;
959 iommu->msagaw = msagaw;
960 iommu->segment = drhd->segment;
962 iommu->node = -1;
964 ver = readl(iommu->reg + DMAR_VER_REG);
965 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
966 iommu->seq_id,
967 (unsigned long long)drhd->reg_base_addr,
968 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
969 (unsigned long long)iommu->cap,
970 (unsigned long long)iommu->ecap);
972 /* Reflect status in gcmd */
973 sts = readl(iommu->reg + DMAR_GSTS_REG);
974 if (sts & DMA_GSTS_IRES)
975 iommu->gcmd |= DMA_GCMD_IRE;
976 if (sts & DMA_GSTS_TES)
977 iommu->gcmd |= DMA_GCMD_TE;
978 if (sts & DMA_GSTS_QIES)
979 iommu->gcmd |= DMA_GCMD_QIE;
981 raw_spin_lock_init(&iommu->register_lock);
983 drhd->iommu = iommu;
985 if (intel_iommu_enabled)
986 iommu->iommu_dev = iommu_device_create(NULL, iommu,
987 intel_iommu_groups,
988 iommu->name);
990 return 0;
992 err_unmap:
993 unmap_iommu(iommu);
994 error:
995 kfree(iommu);
996 return err;
999 static void free_iommu(struct intel_iommu *iommu)
1001 iommu_device_destroy(iommu->iommu_dev);
1003 if (iommu->irq) {
1004 free_irq(iommu->irq, iommu);
1005 irq_set_handler_data(iommu->irq, NULL);
1006 dmar_free_hwirq(iommu->irq);
1009 if (iommu->qi) {
1010 free_page((unsigned long)iommu->qi->desc);
1011 kfree(iommu->qi->desc_status);
1012 kfree(iommu->qi);
1015 if (iommu->reg)
1016 unmap_iommu(iommu);
1018 kfree(iommu);
1022 * Reclaim all the submitted descriptors which have completed its work.
1024 static inline void reclaim_free_desc(struct q_inval *qi)
1026 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1027 qi->desc_status[qi->free_tail] == QI_ABORT) {
1028 qi->desc_status[qi->free_tail] = QI_FREE;
1029 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1030 qi->free_cnt++;
1034 static int qi_check_fault(struct intel_iommu *iommu, int index)
1036 u32 fault;
1037 int head, tail;
1038 struct q_inval *qi = iommu->qi;
1039 int wait_index = (index + 1) % QI_LENGTH;
1041 if (qi->desc_status[wait_index] == QI_ABORT)
1042 return -EAGAIN;
1044 fault = readl(iommu->reg + DMAR_FSTS_REG);
1047 * If IQE happens, the head points to the descriptor associated
1048 * with the error. No new descriptors are fetched until the IQE
1049 * is cleared.
1051 if (fault & DMA_FSTS_IQE) {
1052 head = readl(iommu->reg + DMAR_IQH_REG);
1053 if ((head >> DMAR_IQ_SHIFT) == index) {
1054 pr_err("VT-d detected invalid descriptor: "
1055 "low=%llx, high=%llx\n",
1056 (unsigned long long)qi->desc[index].low,
1057 (unsigned long long)qi->desc[index].high);
1058 memcpy(&qi->desc[index], &qi->desc[wait_index],
1059 sizeof(struct qi_desc));
1060 __iommu_flush_cache(iommu, &qi->desc[index],
1061 sizeof(struct qi_desc));
1062 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1063 return -EINVAL;
1068 * If ITE happens, all pending wait_desc commands are aborted.
1069 * No new descriptors are fetched until the ITE is cleared.
1071 if (fault & DMA_FSTS_ITE) {
1072 head = readl(iommu->reg + DMAR_IQH_REG);
1073 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1074 head |= 1;
1075 tail = readl(iommu->reg + DMAR_IQT_REG);
1076 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1078 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1080 do {
1081 if (qi->desc_status[head] == QI_IN_USE)
1082 qi->desc_status[head] = QI_ABORT;
1083 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1084 } while (head != tail);
1086 if (qi->desc_status[wait_index] == QI_ABORT)
1087 return -EAGAIN;
1090 if (fault & DMA_FSTS_ICE)
1091 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1093 return 0;
1097 * Submit the queued invalidation descriptor to the remapping
1098 * hardware unit and wait for its completion.
1100 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1102 int rc;
1103 struct q_inval *qi = iommu->qi;
1104 struct qi_desc *hw, wait_desc;
1105 int wait_index, index;
1106 unsigned long flags;
1108 if (!qi)
1109 return 0;
1111 hw = qi->desc;
1113 restart:
1114 rc = 0;
1116 raw_spin_lock_irqsave(&qi->q_lock, flags);
1117 while (qi->free_cnt < 3) {
1118 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1119 cpu_relax();
1120 raw_spin_lock_irqsave(&qi->q_lock, flags);
1123 index = qi->free_head;
1124 wait_index = (index + 1) % QI_LENGTH;
1126 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1128 hw[index] = *desc;
1130 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1131 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1132 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1134 hw[wait_index] = wait_desc;
1136 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1137 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1139 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1140 qi->free_cnt -= 2;
1143 * update the HW tail register indicating the presence of
1144 * new descriptors.
1146 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1148 while (qi->desc_status[wait_index] != QI_DONE) {
1150 * We will leave the interrupts disabled, to prevent interrupt
1151 * context to queue another cmd while a cmd is already submitted
1152 * and waiting for completion on this cpu. This is to avoid
1153 * a deadlock where the interrupt context can wait indefinitely
1154 * for free slots in the queue.
1156 rc = qi_check_fault(iommu, index);
1157 if (rc)
1158 break;
1160 raw_spin_unlock(&qi->q_lock);
1161 cpu_relax();
1162 raw_spin_lock(&qi->q_lock);
1165 qi->desc_status[index] = QI_DONE;
1167 reclaim_free_desc(qi);
1168 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1170 if (rc == -EAGAIN)
1171 goto restart;
1173 return rc;
1177 * Flush the global interrupt entry cache.
1179 void qi_global_iec(struct intel_iommu *iommu)
1181 struct qi_desc desc;
1183 desc.low = QI_IEC_TYPE;
1184 desc.high = 0;
1186 /* should never fail */
1187 qi_submit_sync(&desc, iommu);
1190 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1191 u64 type)
1193 struct qi_desc desc;
1195 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1196 | QI_CC_GRAN(type) | QI_CC_TYPE;
1197 desc.high = 0;
1199 qi_submit_sync(&desc, iommu);
1202 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1203 unsigned int size_order, u64 type)
1205 u8 dw = 0, dr = 0;
1207 struct qi_desc desc;
1208 int ih = 0;
1210 if (cap_write_drain(iommu->cap))
1211 dw = 1;
1213 if (cap_read_drain(iommu->cap))
1214 dr = 1;
1216 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1217 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1218 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1219 | QI_IOTLB_AM(size_order);
1221 qi_submit_sync(&desc, iommu);
1224 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1225 u64 addr, unsigned mask)
1227 struct qi_desc desc;
1229 if (mask) {
1230 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1231 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1232 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1233 } else
1234 desc.high = QI_DEV_IOTLB_ADDR(addr);
1236 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1237 qdep = 0;
1239 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1240 QI_DIOTLB_TYPE;
1242 qi_submit_sync(&desc, iommu);
1246 * Disable Queued Invalidation interface.
1248 void dmar_disable_qi(struct intel_iommu *iommu)
1250 unsigned long flags;
1251 u32 sts;
1252 cycles_t start_time = get_cycles();
1254 if (!ecap_qis(iommu->ecap))
1255 return;
1257 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1259 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1260 if (!(sts & DMA_GSTS_QIES))
1261 goto end;
1264 * Give a chance to HW to complete the pending invalidation requests.
1266 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1267 readl(iommu->reg + DMAR_IQH_REG)) &&
1268 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1269 cpu_relax();
1271 iommu->gcmd &= ~DMA_GCMD_QIE;
1272 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1274 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1275 !(sts & DMA_GSTS_QIES), sts);
1276 end:
1277 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1281 * Enable queued invalidation.
1283 static void __dmar_enable_qi(struct intel_iommu *iommu)
1285 u32 sts;
1286 unsigned long flags;
1287 struct q_inval *qi = iommu->qi;
1289 qi->free_head = qi->free_tail = 0;
1290 qi->free_cnt = QI_LENGTH;
1292 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1294 /* write zero to the tail reg */
1295 writel(0, iommu->reg + DMAR_IQT_REG);
1297 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1299 iommu->gcmd |= DMA_GCMD_QIE;
1300 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1302 /* Make sure hardware complete it */
1303 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1305 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1309 * Enable Queued Invalidation interface. This is a must to support
1310 * interrupt-remapping. Also used by DMA-remapping, which replaces
1311 * register based IOTLB invalidation.
1313 int dmar_enable_qi(struct intel_iommu *iommu)
1315 struct q_inval *qi;
1316 struct page *desc_page;
1318 if (!ecap_qis(iommu->ecap))
1319 return -ENOENT;
1322 * queued invalidation is already setup and enabled.
1324 if (iommu->qi)
1325 return 0;
1327 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1328 if (!iommu->qi)
1329 return -ENOMEM;
1331 qi = iommu->qi;
1334 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1335 if (!desc_page) {
1336 kfree(qi);
1337 iommu->qi = NULL;
1338 return -ENOMEM;
1341 qi->desc = page_address(desc_page);
1343 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1344 if (!qi->desc_status) {
1345 free_page((unsigned long) qi->desc);
1346 kfree(qi);
1347 iommu->qi = NULL;
1348 return -ENOMEM;
1351 raw_spin_lock_init(&qi->q_lock);
1353 __dmar_enable_qi(iommu);
1355 return 0;
1358 /* iommu interrupt handling. Most stuff are MSI-like. */
1360 enum faulttype {
1361 DMA_REMAP,
1362 INTR_REMAP,
1363 UNKNOWN,
1366 static const char *dma_remap_fault_reasons[] =
1368 "Software",
1369 "Present bit in root entry is clear",
1370 "Present bit in context entry is clear",
1371 "Invalid context entry",
1372 "Access beyond MGAW",
1373 "PTE Write access is not set",
1374 "PTE Read access is not set",
1375 "Next page table ptr is invalid",
1376 "Root table address invalid",
1377 "Context table ptr is invalid",
1378 "non-zero reserved fields in RTP",
1379 "non-zero reserved fields in CTP",
1380 "non-zero reserved fields in PTE",
1381 "PCE for translation request specifies blocking",
1384 static const char *irq_remap_fault_reasons[] =
1386 "Detected reserved fields in the decoded interrupt-remapped request",
1387 "Interrupt index exceeded the interrupt-remapping table size",
1388 "Present field in the IRTE entry is clear",
1389 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1390 "Detected reserved fields in the IRTE entry",
1391 "Blocked a compatibility format interrupt request",
1392 "Blocked an interrupt request due to source-id verification failure",
1395 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1397 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1398 ARRAY_SIZE(irq_remap_fault_reasons))) {
1399 *fault_type = INTR_REMAP;
1400 return irq_remap_fault_reasons[fault_reason - 0x20];
1401 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1402 *fault_type = DMA_REMAP;
1403 return dma_remap_fault_reasons[fault_reason];
1404 } else {
1405 *fault_type = UNKNOWN;
1406 return "Unknown";
1410 void dmar_msi_unmask(struct irq_data *data)
1412 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1413 unsigned long flag;
1415 /* unmask it */
1416 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1417 writel(0, iommu->reg + DMAR_FECTL_REG);
1418 /* Read a reg to force flush the post write */
1419 readl(iommu->reg + DMAR_FECTL_REG);
1420 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1423 void dmar_msi_mask(struct irq_data *data)
1425 unsigned long flag;
1426 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1428 /* mask it */
1429 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1430 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1431 /* Read a reg to force flush the post write */
1432 readl(iommu->reg + DMAR_FECTL_REG);
1433 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1436 void dmar_msi_write(int irq, struct msi_msg *msg)
1438 struct intel_iommu *iommu = irq_get_handler_data(irq);
1439 unsigned long flag;
1441 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1442 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1443 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1444 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1445 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1448 void dmar_msi_read(int irq, struct msi_msg *msg)
1450 struct intel_iommu *iommu = irq_get_handler_data(irq);
1451 unsigned long flag;
1453 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1454 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1455 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1456 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1457 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1460 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1461 u8 fault_reason, u16 source_id, unsigned long long addr)
1463 const char *reason;
1464 int fault_type;
1466 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1468 if (fault_type == INTR_REMAP)
1469 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1470 "fault index %llx\n"
1471 "INTR-REMAP:[fault reason %02d] %s\n",
1472 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1473 PCI_FUNC(source_id & 0xFF), addr >> 48,
1474 fault_reason, reason);
1475 else
1476 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1477 "fault addr %llx \n"
1478 "DMAR:[fault reason %02d] %s\n",
1479 (type ? "DMA Read" : "DMA Write"),
1480 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1481 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1482 return 0;
1485 #define PRIMARY_FAULT_REG_LEN (16)
1486 irqreturn_t dmar_fault(int irq, void *dev_id)
1488 struct intel_iommu *iommu = dev_id;
1489 int reg, fault_index;
1490 u32 fault_status;
1491 unsigned long flag;
1493 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1494 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1495 if (fault_status)
1496 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1498 /* TBD: ignore advanced fault log currently */
1499 if (!(fault_status & DMA_FSTS_PPF))
1500 goto unlock_exit;
1502 fault_index = dma_fsts_fault_record_index(fault_status);
1503 reg = cap_fault_reg_offset(iommu->cap);
1504 while (1) {
1505 u8 fault_reason;
1506 u16 source_id;
1507 u64 guest_addr;
1508 int type;
1509 u32 data;
1511 /* highest 32 bits */
1512 data = readl(iommu->reg + reg +
1513 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1514 if (!(data & DMA_FRCD_F))
1515 break;
1517 fault_reason = dma_frcd_fault_reason(data);
1518 type = dma_frcd_type(data);
1520 data = readl(iommu->reg + reg +
1521 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1522 source_id = dma_frcd_source_id(data);
1524 guest_addr = dmar_readq(iommu->reg + reg +
1525 fault_index * PRIMARY_FAULT_REG_LEN);
1526 guest_addr = dma_frcd_page_addr(guest_addr);
1527 /* clear the fault */
1528 writel(DMA_FRCD_F, iommu->reg + reg +
1529 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1531 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1533 dmar_fault_do_one(iommu, type, fault_reason,
1534 source_id, guest_addr);
1536 fault_index++;
1537 if (fault_index >= cap_num_fault_regs(iommu->cap))
1538 fault_index = 0;
1539 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1542 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1544 unlock_exit:
1545 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1546 return IRQ_HANDLED;
1549 int dmar_set_interrupt(struct intel_iommu *iommu)
1551 int irq, ret;
1554 * Check if the fault interrupt is already initialized.
1556 if (iommu->irq)
1557 return 0;
1559 irq = dmar_alloc_hwirq();
1560 if (irq <= 0) {
1561 pr_err("IOMMU: no free vectors\n");
1562 return -EINVAL;
1565 irq_set_handler_data(irq, iommu);
1566 iommu->irq = irq;
1568 ret = arch_setup_dmar_msi(irq);
1569 if (ret) {
1570 irq_set_handler_data(irq, NULL);
1571 iommu->irq = 0;
1572 dmar_free_hwirq(irq);
1573 return ret;
1576 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1577 if (ret)
1578 pr_err("IOMMU: can't request irq\n");
1579 return ret;
1582 int __init enable_drhd_fault_handling(void)
1584 struct dmar_drhd_unit *drhd;
1585 struct intel_iommu *iommu;
1588 * Enable fault control interrupt.
1590 for_each_iommu(iommu, drhd) {
1591 u32 fault_status;
1592 int ret = dmar_set_interrupt(iommu);
1594 if (ret) {
1595 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1596 (unsigned long long)drhd->reg_base_addr, ret);
1597 return -1;
1601 * Clear any previous faults.
1603 dmar_fault(iommu->irq, iommu);
1604 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1605 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1608 return 0;
1612 * Re-enable Queued Invalidation interface.
1614 int dmar_reenable_qi(struct intel_iommu *iommu)
1616 if (!ecap_qis(iommu->ecap))
1617 return -ENOENT;
1619 if (!iommu->qi)
1620 return -ENOENT;
1623 * First disable queued invalidation.
1625 dmar_disable_qi(iommu);
1627 * Then enable queued invalidation again. Since there is no pending
1628 * invalidation requests now, it's safe to re-enable queued
1629 * invalidation.
1631 __dmar_enable_qi(iommu);
1633 return 0;
1637 * Check interrupt remapping support in DMAR table description.
1639 int __init dmar_ir_support(void)
1641 struct acpi_table_dmar *dmar;
1642 dmar = (struct acpi_table_dmar *)dmar_tbl;
1643 if (!dmar)
1644 return 0;
1645 return dmar->flags & 0x1;
1648 static int __init dmar_free_unused_resources(void)
1650 struct dmar_drhd_unit *dmaru, *dmaru_n;
1652 /* DMAR units are in use */
1653 if (irq_remapping_enabled || intel_iommu_enabled)
1654 return 0;
1656 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1657 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1659 down_write(&dmar_global_lock);
1660 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1661 list_del(&dmaru->list);
1662 dmar_free_drhd(dmaru);
1664 up_write(&dmar_global_lock);
1666 return 0;
1669 late_initcall(dmar_free_unused_resources);
1670 IOMMU_INIT_POST(detect_intel_iommu);