perf intel-pt: Factor out intel_pt_8b_tsc()
[linux/fpc-iii.git] / drivers / iommu / dmar.c
blob6d969a172fbb3491df6b1d38cc6f3b33cd99a4d0
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <linux/numa.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/iommu_table.h>
46 #include "irq_remapping.h"
48 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49 struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
53 bool print_entry;
57 * Assumptions:
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
68 DECLARE_RWSEM(dmar_global_lock);
69 LIST_HEAD(dmar_drhd_units);
71 struct acpi_table_header * __initdata dmar_tbl;
72 static int dmar_dev_scope_status = 1;
73 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
75 static int alloc_iommu(struct dmar_drhd_unit *drhd);
76 static void free_iommu(struct intel_iommu *iommu);
78 extern const struct iommu_ops intel_iommu_ops;
80 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
83 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 * the very end.
86 if (drhd->include_all)
87 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
88 else
89 list_add_rcu(&drhd->list, &dmar_drhd_units);
92 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
94 struct acpi_dmar_device_scope *scope;
96 *cnt = 0;
97 while (start < end) {
98 scope = start;
99 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
101 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
102 (*cnt)++;
103 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
104 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
105 pr_warn("Unsupported device scope\n");
107 start += scope->length;
109 if (*cnt == 0)
110 return NULL;
112 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
115 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
117 int i;
118 struct device *tmp_dev;
120 if (*devices && *cnt) {
121 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
122 put_device(tmp_dev);
123 kfree(*devices);
126 *devices = NULL;
127 *cnt = 0;
130 /* Optimize out kzalloc()/kfree() for normal cases */
131 static char dmar_pci_notify_info_buf[64];
133 static struct dmar_pci_notify_info *
134 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
136 int level = 0;
137 size_t size;
138 struct pci_dev *tmp;
139 struct dmar_pci_notify_info *info;
141 BUG_ON(dev->is_virtfn);
143 /* Only generate path[] for device addition event */
144 if (event == BUS_NOTIFY_ADD_DEVICE)
145 for (tmp = dev; tmp; tmp = tmp->bus->self)
146 level++;
148 size = struct_size(info, path, level);
149 if (size <= sizeof(dmar_pci_notify_info_buf)) {
150 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
151 } else {
152 info = kzalloc(size, GFP_KERNEL);
153 if (!info) {
154 pr_warn("Out of memory when allocating notify_info "
155 "for %s.\n", pci_name(dev));
156 if (dmar_dev_scope_status == 0)
157 dmar_dev_scope_status = -ENOMEM;
158 return NULL;
162 info->event = event;
163 info->dev = dev;
164 info->seg = pci_domain_nr(dev->bus);
165 info->level = level;
166 if (event == BUS_NOTIFY_ADD_DEVICE) {
167 for (tmp = dev; tmp; tmp = tmp->bus->self) {
168 level--;
169 info->path[level].bus = tmp->bus->number;
170 info->path[level].device = PCI_SLOT(tmp->devfn);
171 info->path[level].function = PCI_FUNC(tmp->devfn);
172 if (pci_is_root_bus(tmp->bus))
173 info->bus = tmp->bus->number;
177 return info;
180 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
182 if ((void *)info != dmar_pci_notify_info_buf)
183 kfree(info);
186 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
187 struct acpi_dmar_pci_path *path, int count)
189 int i;
191 if (info->bus != bus)
192 goto fallback;
193 if (info->level != count)
194 goto fallback;
196 for (i = 0; i < count; i++) {
197 if (path[i].device != info->path[i].device ||
198 path[i].function != info->path[i].function)
199 goto fallback;
202 return true;
204 fallback:
206 if (count != 1)
207 return false;
209 i = info->level - 1;
210 if (bus == info->path[i].bus &&
211 path[0].device == info->path[i].device &&
212 path[0].function == info->path[i].function) {
213 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
214 bus, path[0].device, path[0].function);
215 return true;
218 return false;
221 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
222 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
223 void *start, void*end, u16 segment,
224 struct dmar_dev_scope *devices,
225 int devices_cnt)
227 int i, level;
228 struct device *tmp, *dev = &info->dev->dev;
229 struct acpi_dmar_device_scope *scope;
230 struct acpi_dmar_pci_path *path;
232 if (segment != info->seg)
233 return 0;
235 for (; start < end; start += scope->length) {
236 scope = start;
237 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
238 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
239 continue;
241 path = (struct acpi_dmar_pci_path *)(scope + 1);
242 level = (scope->length - sizeof(*scope)) / sizeof(*path);
243 if (!dmar_match_pci_path(info, scope->bus, path, level))
244 continue;
247 * We expect devices with endpoint scope to have normal PCI
248 * headers, and devices with bridge scope to have bridge PCI
249 * headers. However PCI NTB devices may be listed in the
250 * DMAR table with bridge scope, even though they have a
251 * normal PCI header. NTB devices are identified by class
252 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
253 * for this special case.
255 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
256 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
257 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
258 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
259 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
260 pr_warn("Device scope type does not match for %s\n",
261 pci_name(info->dev));
262 return -EINVAL;
265 for_each_dev_scope(devices, devices_cnt, i, tmp)
266 if (tmp == NULL) {
267 devices[i].bus = info->dev->bus->number;
268 devices[i].devfn = info->dev->devfn;
269 rcu_assign_pointer(devices[i].dev,
270 get_device(dev));
271 return 1;
273 BUG_ON(i >= devices_cnt);
276 return 0;
279 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
280 struct dmar_dev_scope *devices, int count)
282 int index;
283 struct device *tmp;
285 if (info->seg != segment)
286 return 0;
288 for_each_active_dev_scope(devices, count, index, tmp)
289 if (tmp == &info->dev->dev) {
290 RCU_INIT_POINTER(devices[index].dev, NULL);
291 synchronize_rcu();
292 put_device(tmp);
293 return 1;
296 return 0;
299 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
301 int ret = 0;
302 struct dmar_drhd_unit *dmaru;
303 struct acpi_dmar_hardware_unit *drhd;
305 for_each_drhd_unit(dmaru) {
306 if (dmaru->include_all)
307 continue;
309 drhd = container_of(dmaru->hdr,
310 struct acpi_dmar_hardware_unit, header);
311 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
312 ((void *)drhd) + drhd->header.length,
313 dmaru->segment,
314 dmaru->devices, dmaru->devices_cnt);
315 if (ret)
316 break;
318 if (ret >= 0)
319 ret = dmar_iommu_notify_scope_dev(info);
320 if (ret < 0 && dmar_dev_scope_status == 0)
321 dmar_dev_scope_status = ret;
323 return ret;
326 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
328 struct dmar_drhd_unit *dmaru;
330 for_each_drhd_unit(dmaru)
331 if (dmar_remove_dev_scope(info, dmaru->segment,
332 dmaru->devices, dmaru->devices_cnt))
333 break;
334 dmar_iommu_notify_scope_dev(info);
337 static int dmar_pci_bus_notifier(struct notifier_block *nb,
338 unsigned long action, void *data)
340 struct pci_dev *pdev = to_pci_dev(data);
341 struct dmar_pci_notify_info *info;
343 /* Only care about add/remove events for physical functions.
344 * For VFs we actually do the lookup based on the corresponding
345 * PF in device_to_iommu() anyway. */
346 if (pdev->is_virtfn)
347 return NOTIFY_DONE;
348 if (action != BUS_NOTIFY_ADD_DEVICE &&
349 action != BUS_NOTIFY_REMOVED_DEVICE)
350 return NOTIFY_DONE;
352 info = dmar_alloc_pci_notify_info(pdev, action);
353 if (!info)
354 return NOTIFY_DONE;
356 down_write(&dmar_global_lock);
357 if (action == BUS_NOTIFY_ADD_DEVICE)
358 dmar_pci_bus_add_dev(info);
359 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
360 dmar_pci_bus_del_dev(info);
361 up_write(&dmar_global_lock);
363 dmar_free_pci_notify_info(info);
365 return NOTIFY_OK;
368 static struct notifier_block dmar_pci_bus_nb = {
369 .notifier_call = dmar_pci_bus_notifier,
370 .priority = INT_MIN,
373 static struct dmar_drhd_unit *
374 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
376 struct dmar_drhd_unit *dmaru;
378 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
379 if (dmaru->segment == drhd->segment &&
380 dmaru->reg_base_addr == drhd->address)
381 return dmaru;
383 return NULL;
387 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
388 * structure which uniquely represent one DMA remapping hardware unit
389 * present in the platform
391 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
393 struct acpi_dmar_hardware_unit *drhd;
394 struct dmar_drhd_unit *dmaru;
395 int ret;
397 drhd = (struct acpi_dmar_hardware_unit *)header;
398 dmaru = dmar_find_dmaru(drhd);
399 if (dmaru)
400 goto out;
402 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
403 if (!dmaru)
404 return -ENOMEM;
407 * If header is allocated from slab by ACPI _DSM method, we need to
408 * copy the content because the memory buffer will be freed on return.
410 dmaru->hdr = (void *)(dmaru + 1);
411 memcpy(dmaru->hdr, header, header->length);
412 dmaru->reg_base_addr = drhd->address;
413 dmaru->segment = drhd->segment;
414 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
415 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
416 ((void *)drhd) + drhd->header.length,
417 &dmaru->devices_cnt);
418 if (dmaru->devices_cnt && dmaru->devices == NULL) {
419 kfree(dmaru);
420 return -ENOMEM;
423 ret = alloc_iommu(dmaru);
424 if (ret) {
425 dmar_free_dev_scope(&dmaru->devices,
426 &dmaru->devices_cnt);
427 kfree(dmaru);
428 return ret;
430 dmar_register_drhd_unit(dmaru);
432 out:
433 if (arg)
434 (*(int *)arg)++;
436 return 0;
439 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
441 if (dmaru->devices && dmaru->devices_cnt)
442 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
443 if (dmaru->iommu)
444 free_iommu(dmaru->iommu);
445 kfree(dmaru);
448 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
449 void *arg)
451 struct acpi_dmar_andd *andd = (void *)header;
453 /* Check for NUL termination within the designated length */
454 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
455 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
456 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
457 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
458 dmi_get_system_info(DMI_BIOS_VENDOR),
459 dmi_get_system_info(DMI_BIOS_VERSION),
460 dmi_get_system_info(DMI_PRODUCT_VERSION));
461 return -EINVAL;
463 pr_info("ANDD device: %x name: %s\n", andd->device_number,
464 andd->device_name);
466 return 0;
469 #ifdef CONFIG_ACPI_NUMA
470 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
472 struct acpi_dmar_rhsa *rhsa;
473 struct dmar_drhd_unit *drhd;
475 rhsa = (struct acpi_dmar_rhsa *)header;
476 for_each_drhd_unit(drhd) {
477 if (drhd->reg_base_addr == rhsa->base_address) {
478 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
480 if (!node_online(node))
481 node = NUMA_NO_NODE;
482 drhd->iommu->node = node;
483 return 0;
486 WARN_TAINT(
487 1, TAINT_FIRMWARE_WORKAROUND,
488 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
489 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
490 drhd->reg_base_addr,
491 dmi_get_system_info(DMI_BIOS_VENDOR),
492 dmi_get_system_info(DMI_BIOS_VERSION),
493 dmi_get_system_info(DMI_PRODUCT_VERSION));
495 return 0;
497 #else
498 #define dmar_parse_one_rhsa dmar_res_noop
499 #endif
501 static void
502 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
504 struct acpi_dmar_hardware_unit *drhd;
505 struct acpi_dmar_reserved_memory *rmrr;
506 struct acpi_dmar_atsr *atsr;
507 struct acpi_dmar_rhsa *rhsa;
509 switch (header->type) {
510 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
511 drhd = container_of(header, struct acpi_dmar_hardware_unit,
512 header);
513 pr_info("DRHD base: %#016Lx flags: %#x\n",
514 (unsigned long long)drhd->address, drhd->flags);
515 break;
516 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
517 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
518 header);
519 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
520 (unsigned long long)rmrr->base_address,
521 (unsigned long long)rmrr->end_address);
522 break;
523 case ACPI_DMAR_TYPE_ROOT_ATS:
524 atsr = container_of(header, struct acpi_dmar_atsr, header);
525 pr_info("ATSR flags: %#x\n", atsr->flags);
526 break;
527 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
528 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
529 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
530 (unsigned long long)rhsa->base_address,
531 rhsa->proximity_domain);
532 break;
533 case ACPI_DMAR_TYPE_NAMESPACE:
534 /* We don't print this here because we need to sanity-check
535 it first. So print it in dmar_parse_one_andd() instead. */
536 break;
541 * dmar_table_detect - checks to see if the platform supports DMAR devices
543 static int __init dmar_table_detect(void)
545 acpi_status status = AE_OK;
547 /* if we could find DMAR table, then there are DMAR devices */
548 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
550 if (ACPI_SUCCESS(status) && !dmar_tbl) {
551 pr_warn("Unable to map DMAR\n");
552 status = AE_NOT_FOUND;
555 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
558 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
559 size_t len, struct dmar_res_callback *cb)
561 struct acpi_dmar_header *iter, *next;
562 struct acpi_dmar_header *end = ((void *)start) + len;
564 for (iter = start; iter < end; iter = next) {
565 next = (void *)iter + iter->length;
566 if (iter->length == 0) {
567 /* Avoid looping forever on bad ACPI tables */
568 pr_debug(FW_BUG "Invalid 0-length structure\n");
569 break;
570 } else if (next > end) {
571 /* Avoid passing table end */
572 pr_warn(FW_BUG "Record passes table end\n");
573 return -EINVAL;
576 if (cb->print_entry)
577 dmar_table_print_dmar_entry(iter);
579 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
580 /* continue for forward compatibility */
581 pr_debug("Unknown DMAR structure type %d\n",
582 iter->type);
583 } else if (cb->cb[iter->type]) {
584 int ret;
586 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
587 if (ret)
588 return ret;
589 } else if (!cb->ignore_unhandled) {
590 pr_warn("No handler for DMAR structure type %d\n",
591 iter->type);
592 return -EINVAL;
596 return 0;
599 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
600 struct dmar_res_callback *cb)
602 return dmar_walk_remapping_entries((void *)(dmar + 1),
603 dmar->header.length - sizeof(*dmar), cb);
607 * parse_dmar_table - parses the DMA reporting table
609 static int __init
610 parse_dmar_table(void)
612 struct acpi_table_dmar *dmar;
613 int drhd_count = 0;
614 int ret;
615 struct dmar_res_callback cb = {
616 .print_entry = true,
617 .ignore_unhandled = true,
618 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
619 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
620 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
621 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
622 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
623 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
627 * Do it again, earlier dmar_tbl mapping could be mapped with
628 * fixed map.
630 dmar_table_detect();
633 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
634 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
636 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
638 dmar = (struct acpi_table_dmar *)dmar_tbl;
639 if (!dmar)
640 return -ENODEV;
642 if (dmar->width < PAGE_SHIFT - 1) {
643 pr_warn("Invalid DMAR haw\n");
644 return -EINVAL;
647 pr_info("Host address width %d\n", dmar->width + 1);
648 ret = dmar_walk_dmar_table(dmar, &cb);
649 if (ret == 0 && drhd_count == 0)
650 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
652 return ret;
655 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
656 int cnt, struct pci_dev *dev)
658 int index;
659 struct device *tmp;
661 while (dev) {
662 for_each_active_dev_scope(devices, cnt, index, tmp)
663 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
664 return 1;
666 /* Check our parent */
667 dev = dev->bus->self;
670 return 0;
673 struct dmar_drhd_unit *
674 dmar_find_matched_drhd_unit(struct pci_dev *dev)
676 struct dmar_drhd_unit *dmaru;
677 struct acpi_dmar_hardware_unit *drhd;
679 dev = pci_physfn(dev);
681 rcu_read_lock();
682 for_each_drhd_unit(dmaru) {
683 drhd = container_of(dmaru->hdr,
684 struct acpi_dmar_hardware_unit,
685 header);
687 if (dmaru->include_all &&
688 drhd->segment == pci_domain_nr(dev->bus))
689 goto out;
691 if (dmar_pci_device_match(dmaru->devices,
692 dmaru->devices_cnt, dev))
693 goto out;
695 dmaru = NULL;
696 out:
697 rcu_read_unlock();
699 return dmaru;
702 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
703 struct acpi_device *adev)
705 struct dmar_drhd_unit *dmaru;
706 struct acpi_dmar_hardware_unit *drhd;
707 struct acpi_dmar_device_scope *scope;
708 struct device *tmp;
709 int i;
710 struct acpi_dmar_pci_path *path;
712 for_each_drhd_unit(dmaru) {
713 drhd = container_of(dmaru->hdr,
714 struct acpi_dmar_hardware_unit,
715 header);
717 for (scope = (void *)(drhd + 1);
718 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
719 scope = ((void *)scope) + scope->length) {
720 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
721 continue;
722 if (scope->enumeration_id != device_number)
723 continue;
725 path = (void *)(scope + 1);
726 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
727 dev_name(&adev->dev), dmaru->reg_base_addr,
728 scope->bus, path->device, path->function);
729 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
730 if (tmp == NULL) {
731 dmaru->devices[i].bus = scope->bus;
732 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
733 path->function);
734 rcu_assign_pointer(dmaru->devices[i].dev,
735 get_device(&adev->dev));
736 return;
738 BUG_ON(i >= dmaru->devices_cnt);
741 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
742 device_number, dev_name(&adev->dev));
745 static int __init dmar_acpi_dev_scope_init(void)
747 struct acpi_dmar_andd *andd;
749 if (dmar_tbl == NULL)
750 return -ENODEV;
752 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
753 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
754 andd = ((void *)andd) + andd->header.length) {
755 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
756 acpi_handle h;
757 struct acpi_device *adev;
759 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
760 andd->device_name,
761 &h))) {
762 pr_err("Failed to find handle for ACPI object %s\n",
763 andd->device_name);
764 continue;
766 if (acpi_bus_get_device(h, &adev)) {
767 pr_err("Failed to get device for ACPI object %s\n",
768 andd->device_name);
769 continue;
771 dmar_acpi_insert_dev_scope(andd->device_number, adev);
774 return 0;
777 int __init dmar_dev_scope_init(void)
779 struct pci_dev *dev = NULL;
780 struct dmar_pci_notify_info *info;
782 if (dmar_dev_scope_status != 1)
783 return dmar_dev_scope_status;
785 if (list_empty(&dmar_drhd_units)) {
786 dmar_dev_scope_status = -ENODEV;
787 } else {
788 dmar_dev_scope_status = 0;
790 dmar_acpi_dev_scope_init();
792 for_each_pci_dev(dev) {
793 if (dev->is_virtfn)
794 continue;
796 info = dmar_alloc_pci_notify_info(dev,
797 BUS_NOTIFY_ADD_DEVICE);
798 if (!info) {
799 return dmar_dev_scope_status;
800 } else {
801 dmar_pci_bus_add_dev(info);
802 dmar_free_pci_notify_info(info);
807 return dmar_dev_scope_status;
810 void __init dmar_register_bus_notifier(void)
812 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
816 int __init dmar_table_init(void)
818 static int dmar_table_initialized;
819 int ret;
821 if (dmar_table_initialized == 0) {
822 ret = parse_dmar_table();
823 if (ret < 0) {
824 if (ret != -ENODEV)
825 pr_info("Parse DMAR table failure.\n");
826 } else if (list_empty(&dmar_drhd_units)) {
827 pr_info("No DMAR devices found\n");
828 ret = -ENODEV;
831 if (ret < 0)
832 dmar_table_initialized = ret;
833 else
834 dmar_table_initialized = 1;
837 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
840 static void warn_invalid_dmar(u64 addr, const char *message)
842 WARN_TAINT_ONCE(
843 1, TAINT_FIRMWARE_WORKAROUND,
844 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
845 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
846 addr, message,
847 dmi_get_system_info(DMI_BIOS_VENDOR),
848 dmi_get_system_info(DMI_BIOS_VERSION),
849 dmi_get_system_info(DMI_PRODUCT_VERSION));
852 static int __ref
853 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
855 struct acpi_dmar_hardware_unit *drhd;
856 void __iomem *addr;
857 u64 cap, ecap;
859 drhd = (void *)entry;
860 if (!drhd->address) {
861 warn_invalid_dmar(0, "");
862 return -EINVAL;
865 if (arg)
866 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
867 else
868 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
869 if (!addr) {
870 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
871 return -EINVAL;
874 cap = dmar_readq(addr + DMAR_CAP_REG);
875 ecap = dmar_readq(addr + DMAR_ECAP_REG);
877 if (arg)
878 iounmap(addr);
879 else
880 early_iounmap(addr, VTD_PAGE_SIZE);
882 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
883 warn_invalid_dmar(drhd->address, " returns all ones");
884 return -EINVAL;
887 return 0;
890 int __init detect_intel_iommu(void)
892 int ret;
893 struct dmar_res_callback validate_drhd_cb = {
894 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
895 .ignore_unhandled = true,
898 down_write(&dmar_global_lock);
899 ret = dmar_table_detect();
900 if (!ret)
901 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
902 &validate_drhd_cb);
903 if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
904 iommu_detected = 1;
905 /* Make sure ACS will be enabled */
906 pci_request_acs();
909 #ifdef CONFIG_X86
910 if (!ret)
911 x86_init.iommu.iommu_init = intel_iommu_init;
912 #endif
914 if (dmar_tbl) {
915 acpi_put_table(dmar_tbl);
916 dmar_tbl = NULL;
918 up_write(&dmar_global_lock);
920 return ret ? ret : 1;
923 static void unmap_iommu(struct intel_iommu *iommu)
925 iounmap(iommu->reg);
926 release_mem_region(iommu->reg_phys, iommu->reg_size);
930 * map_iommu: map the iommu's registers
931 * @iommu: the iommu to map
932 * @phys_addr: the physical address of the base resgister
934 * Memory map the iommu's registers. Start w/ a single page, and
935 * possibly expand if that turns out to be insufficent.
937 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
939 int map_size, err=0;
941 iommu->reg_phys = phys_addr;
942 iommu->reg_size = VTD_PAGE_SIZE;
944 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
945 pr_err("Can't reserve memory\n");
946 err = -EBUSY;
947 goto out;
950 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
951 if (!iommu->reg) {
952 pr_err("Can't map the region\n");
953 err = -ENOMEM;
954 goto release;
957 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
958 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
960 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
961 err = -EINVAL;
962 warn_invalid_dmar(phys_addr, " returns all ones");
963 goto unmap;
966 /* the registers might be more than one page */
967 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
968 cap_max_fault_reg_offset(iommu->cap));
969 map_size = VTD_PAGE_ALIGN(map_size);
970 if (map_size > iommu->reg_size) {
971 iounmap(iommu->reg);
972 release_mem_region(iommu->reg_phys, iommu->reg_size);
973 iommu->reg_size = map_size;
974 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
975 iommu->name)) {
976 pr_err("Can't reserve memory\n");
977 err = -EBUSY;
978 goto out;
980 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
981 if (!iommu->reg) {
982 pr_err("Can't map the region\n");
983 err = -ENOMEM;
984 goto release;
987 err = 0;
988 goto out;
990 unmap:
991 iounmap(iommu->reg);
992 release:
993 release_mem_region(iommu->reg_phys, iommu->reg_size);
994 out:
995 return err;
998 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1000 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1001 DMAR_UNITS_SUPPORTED);
1002 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1003 iommu->seq_id = -1;
1004 } else {
1005 set_bit(iommu->seq_id, dmar_seq_ids);
1006 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1009 return iommu->seq_id;
1012 static void dmar_free_seq_id(struct intel_iommu *iommu)
1014 if (iommu->seq_id >= 0) {
1015 clear_bit(iommu->seq_id, dmar_seq_ids);
1016 iommu->seq_id = -1;
1020 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1022 struct intel_iommu *iommu;
1023 u32 ver, sts;
1024 int agaw = 0;
1025 int msagaw = 0;
1026 int err;
1028 if (!drhd->reg_base_addr) {
1029 warn_invalid_dmar(0, "");
1030 return -EINVAL;
1033 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1034 if (!iommu)
1035 return -ENOMEM;
1037 if (dmar_alloc_seq_id(iommu) < 0) {
1038 pr_err("Failed to allocate seq_id\n");
1039 err = -ENOSPC;
1040 goto error;
1043 err = map_iommu(iommu, drhd->reg_base_addr);
1044 if (err) {
1045 pr_err("Failed to map %s\n", iommu->name);
1046 goto error_free_seq_id;
1049 err = -EINVAL;
1050 agaw = iommu_calculate_agaw(iommu);
1051 if (agaw < 0) {
1052 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1053 iommu->seq_id);
1054 goto err_unmap;
1056 msagaw = iommu_calculate_max_sagaw(iommu);
1057 if (msagaw < 0) {
1058 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1059 iommu->seq_id);
1060 goto err_unmap;
1062 iommu->agaw = agaw;
1063 iommu->msagaw = msagaw;
1064 iommu->segment = drhd->segment;
1066 iommu->node = NUMA_NO_NODE;
1068 ver = readl(iommu->reg + DMAR_VER_REG);
1069 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1070 iommu->name,
1071 (unsigned long long)drhd->reg_base_addr,
1072 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1073 (unsigned long long)iommu->cap,
1074 (unsigned long long)iommu->ecap);
1076 /* Reflect status in gcmd */
1077 sts = readl(iommu->reg + DMAR_GSTS_REG);
1078 if (sts & DMA_GSTS_IRES)
1079 iommu->gcmd |= DMA_GCMD_IRE;
1080 if (sts & DMA_GSTS_TES)
1081 iommu->gcmd |= DMA_GCMD_TE;
1082 if (sts & DMA_GSTS_QIES)
1083 iommu->gcmd |= DMA_GCMD_QIE;
1085 raw_spin_lock_init(&iommu->register_lock);
1087 if (intel_iommu_enabled) {
1088 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1089 intel_iommu_groups,
1090 "%s", iommu->name);
1091 if (err)
1092 goto err_unmap;
1094 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1096 err = iommu_device_register(&iommu->iommu);
1097 if (err)
1098 goto err_unmap;
1101 drhd->iommu = iommu;
1103 return 0;
1105 err_unmap:
1106 unmap_iommu(iommu);
1107 error_free_seq_id:
1108 dmar_free_seq_id(iommu);
1109 error:
1110 kfree(iommu);
1111 return err;
1114 static void free_iommu(struct intel_iommu *iommu)
1116 if (intel_iommu_enabled) {
1117 iommu_device_unregister(&iommu->iommu);
1118 iommu_device_sysfs_remove(&iommu->iommu);
1121 if (iommu->irq) {
1122 if (iommu->pr_irq) {
1123 free_irq(iommu->pr_irq, iommu);
1124 dmar_free_hwirq(iommu->pr_irq);
1125 iommu->pr_irq = 0;
1127 free_irq(iommu->irq, iommu);
1128 dmar_free_hwirq(iommu->irq);
1129 iommu->irq = 0;
1132 if (iommu->qi) {
1133 free_page((unsigned long)iommu->qi->desc);
1134 kfree(iommu->qi->desc_status);
1135 kfree(iommu->qi);
1138 if (iommu->reg)
1139 unmap_iommu(iommu);
1141 dmar_free_seq_id(iommu);
1142 kfree(iommu);
1146 * Reclaim all the submitted descriptors which have completed its work.
1148 static inline void reclaim_free_desc(struct q_inval *qi)
1150 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1151 qi->desc_status[qi->free_tail] == QI_ABORT) {
1152 qi->desc_status[qi->free_tail] = QI_FREE;
1153 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1154 qi->free_cnt++;
1158 static int qi_check_fault(struct intel_iommu *iommu, int index)
1160 u32 fault;
1161 int head, tail;
1162 struct q_inval *qi = iommu->qi;
1163 int wait_index = (index + 1) % QI_LENGTH;
1164 int shift = qi_shift(iommu);
1166 if (qi->desc_status[wait_index] == QI_ABORT)
1167 return -EAGAIN;
1169 fault = readl(iommu->reg + DMAR_FSTS_REG);
1172 * If IQE happens, the head points to the descriptor associated
1173 * with the error. No new descriptors are fetched until the IQE
1174 * is cleared.
1176 if (fault & DMA_FSTS_IQE) {
1177 head = readl(iommu->reg + DMAR_IQH_REG);
1178 if ((head >> shift) == index) {
1179 struct qi_desc *desc = qi->desc + head;
1182 * desc->qw2 and desc->qw3 are either reserved or
1183 * used by software as private data. We won't print
1184 * out these two qw's for security consideration.
1186 pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
1187 (unsigned long long)desc->qw0,
1188 (unsigned long long)desc->qw1);
1189 memcpy(desc, qi->desc + (wait_index << shift),
1190 1 << shift);
1191 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1192 return -EINVAL;
1197 * If ITE happens, all pending wait_desc commands are aborted.
1198 * No new descriptors are fetched until the ITE is cleared.
1200 if (fault & DMA_FSTS_ITE) {
1201 head = readl(iommu->reg + DMAR_IQH_REG);
1202 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1203 head |= 1;
1204 tail = readl(iommu->reg + DMAR_IQT_REG);
1205 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1207 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1209 do {
1210 if (qi->desc_status[head] == QI_IN_USE)
1211 qi->desc_status[head] = QI_ABORT;
1212 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1213 } while (head != tail);
1215 if (qi->desc_status[wait_index] == QI_ABORT)
1216 return -EAGAIN;
1219 if (fault & DMA_FSTS_ICE)
1220 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1222 return 0;
1226 * Submit the queued invalidation descriptor to the remapping
1227 * hardware unit and wait for its completion.
1229 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1231 int rc;
1232 struct q_inval *qi = iommu->qi;
1233 int offset, shift, length;
1234 struct qi_desc wait_desc;
1235 int wait_index, index;
1236 unsigned long flags;
1238 if (!qi)
1239 return 0;
1241 restart:
1242 rc = 0;
1244 raw_spin_lock_irqsave(&qi->q_lock, flags);
1245 while (qi->free_cnt < 3) {
1246 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1247 cpu_relax();
1248 raw_spin_lock_irqsave(&qi->q_lock, flags);
1251 index = qi->free_head;
1252 wait_index = (index + 1) % QI_LENGTH;
1253 shift = qi_shift(iommu);
1254 length = 1 << shift;
1256 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1258 offset = index << shift;
1259 memcpy(qi->desc + offset, desc, length);
1260 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1261 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1262 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1263 wait_desc.qw2 = 0;
1264 wait_desc.qw3 = 0;
1266 offset = wait_index << shift;
1267 memcpy(qi->desc + offset, &wait_desc, length);
1269 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1270 qi->free_cnt -= 2;
1273 * update the HW tail register indicating the presence of
1274 * new descriptors.
1276 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1278 while (qi->desc_status[wait_index] != QI_DONE) {
1280 * We will leave the interrupts disabled, to prevent interrupt
1281 * context to queue another cmd while a cmd is already submitted
1282 * and waiting for completion on this cpu. This is to avoid
1283 * a deadlock where the interrupt context can wait indefinitely
1284 * for free slots in the queue.
1286 rc = qi_check_fault(iommu, index);
1287 if (rc)
1288 break;
1290 raw_spin_unlock(&qi->q_lock);
1291 cpu_relax();
1292 raw_spin_lock(&qi->q_lock);
1295 qi->desc_status[index] = QI_DONE;
1297 reclaim_free_desc(qi);
1298 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1300 if (rc == -EAGAIN)
1301 goto restart;
1303 return rc;
1307 * Flush the global interrupt entry cache.
1309 void qi_global_iec(struct intel_iommu *iommu)
1311 struct qi_desc desc;
1313 desc.qw0 = QI_IEC_TYPE;
1314 desc.qw1 = 0;
1315 desc.qw2 = 0;
1316 desc.qw3 = 0;
1318 /* should never fail */
1319 qi_submit_sync(&desc, iommu);
1322 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1323 u64 type)
1325 struct qi_desc desc;
1327 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1328 | QI_CC_GRAN(type) | QI_CC_TYPE;
1329 desc.qw1 = 0;
1330 desc.qw2 = 0;
1331 desc.qw3 = 0;
1333 qi_submit_sync(&desc, iommu);
1336 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1337 unsigned int size_order, u64 type)
1339 u8 dw = 0, dr = 0;
1341 struct qi_desc desc;
1342 int ih = 0;
1344 if (cap_write_drain(iommu->cap))
1345 dw = 1;
1347 if (cap_read_drain(iommu->cap))
1348 dr = 1;
1350 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1351 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1352 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1353 | QI_IOTLB_AM(size_order);
1354 desc.qw2 = 0;
1355 desc.qw3 = 0;
1357 qi_submit_sync(&desc, iommu);
1360 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1361 u16 qdep, u64 addr, unsigned mask)
1363 struct qi_desc desc;
1365 if (mask) {
1366 WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
1367 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1368 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1369 } else
1370 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1372 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1373 qdep = 0;
1375 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1376 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1377 desc.qw2 = 0;
1378 desc.qw3 = 0;
1380 qi_submit_sync(&desc, iommu);
1384 * Disable Queued Invalidation interface.
1386 void dmar_disable_qi(struct intel_iommu *iommu)
1388 unsigned long flags;
1389 u32 sts;
1390 cycles_t start_time = get_cycles();
1392 if (!ecap_qis(iommu->ecap))
1393 return;
1395 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1397 sts = readl(iommu->reg + DMAR_GSTS_REG);
1398 if (!(sts & DMA_GSTS_QIES))
1399 goto end;
1402 * Give a chance to HW to complete the pending invalidation requests.
1404 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1405 readl(iommu->reg + DMAR_IQH_REG)) &&
1406 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1407 cpu_relax();
1409 iommu->gcmd &= ~DMA_GCMD_QIE;
1410 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1413 !(sts & DMA_GSTS_QIES), sts);
1414 end:
1415 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1419 * Enable queued invalidation.
1421 static void __dmar_enable_qi(struct intel_iommu *iommu)
1423 u32 sts;
1424 unsigned long flags;
1425 struct q_inval *qi = iommu->qi;
1426 u64 val = virt_to_phys(qi->desc);
1428 qi->free_head = qi->free_tail = 0;
1429 qi->free_cnt = QI_LENGTH;
1432 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1433 * is present.
1435 if (ecap_smts(iommu->ecap))
1436 val |= (1 << 11) | 1;
1438 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1440 /* write zero to the tail reg */
1441 writel(0, iommu->reg + DMAR_IQT_REG);
1443 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1445 iommu->gcmd |= DMA_GCMD_QIE;
1446 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1448 /* Make sure hardware complete it */
1449 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1451 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1455 * Enable Queued Invalidation interface. This is a must to support
1456 * interrupt-remapping. Also used by DMA-remapping, which replaces
1457 * register based IOTLB invalidation.
1459 int dmar_enable_qi(struct intel_iommu *iommu)
1461 struct q_inval *qi;
1462 struct page *desc_page;
1464 if (!ecap_qis(iommu->ecap))
1465 return -ENOENT;
1468 * queued invalidation is already setup and enabled.
1470 if (iommu->qi)
1471 return 0;
1473 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1474 if (!iommu->qi)
1475 return -ENOMEM;
1477 qi = iommu->qi;
1480 * Need two pages to accommodate 256 descriptors of 256 bits each
1481 * if the remapping hardware supports scalable mode translation.
1483 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1484 !!ecap_smts(iommu->ecap));
1485 if (!desc_page) {
1486 kfree(qi);
1487 iommu->qi = NULL;
1488 return -ENOMEM;
1491 qi->desc = page_address(desc_page);
1493 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1494 if (!qi->desc_status) {
1495 free_page((unsigned long) qi->desc);
1496 kfree(qi);
1497 iommu->qi = NULL;
1498 return -ENOMEM;
1501 raw_spin_lock_init(&qi->q_lock);
1503 __dmar_enable_qi(iommu);
1505 return 0;
1508 /* iommu interrupt handling. Most stuff are MSI-like. */
1510 enum faulttype {
1511 DMA_REMAP,
1512 INTR_REMAP,
1513 UNKNOWN,
1516 static const char *dma_remap_fault_reasons[] =
1518 "Software",
1519 "Present bit in root entry is clear",
1520 "Present bit in context entry is clear",
1521 "Invalid context entry",
1522 "Access beyond MGAW",
1523 "PTE Write access is not set",
1524 "PTE Read access is not set",
1525 "Next page table ptr is invalid",
1526 "Root table address invalid",
1527 "Context table ptr is invalid",
1528 "non-zero reserved fields in RTP",
1529 "non-zero reserved fields in CTP",
1530 "non-zero reserved fields in PTE",
1531 "PCE for translation request specifies blocking",
1534 static const char *irq_remap_fault_reasons[] =
1536 "Detected reserved fields in the decoded interrupt-remapped request",
1537 "Interrupt index exceeded the interrupt-remapping table size",
1538 "Present field in the IRTE entry is clear",
1539 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1540 "Detected reserved fields in the IRTE entry",
1541 "Blocked a compatibility format interrupt request",
1542 "Blocked an interrupt request due to source-id verification failure",
1545 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1547 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1548 ARRAY_SIZE(irq_remap_fault_reasons))) {
1549 *fault_type = INTR_REMAP;
1550 return irq_remap_fault_reasons[fault_reason - 0x20];
1551 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1552 *fault_type = DMA_REMAP;
1553 return dma_remap_fault_reasons[fault_reason];
1554 } else {
1555 *fault_type = UNKNOWN;
1556 return "Unknown";
1561 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1563 if (iommu->irq == irq)
1564 return DMAR_FECTL_REG;
1565 else if (iommu->pr_irq == irq)
1566 return DMAR_PECTL_REG;
1567 else
1568 BUG();
1571 void dmar_msi_unmask(struct irq_data *data)
1573 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1574 int reg = dmar_msi_reg(iommu, data->irq);
1575 unsigned long flag;
1577 /* unmask it */
1578 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1579 writel(0, iommu->reg + reg);
1580 /* Read a reg to force flush the post write */
1581 readl(iommu->reg + reg);
1582 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1585 void dmar_msi_mask(struct irq_data *data)
1587 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1588 int reg = dmar_msi_reg(iommu, data->irq);
1589 unsigned long flag;
1591 /* mask it */
1592 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1593 writel(DMA_FECTL_IM, iommu->reg + reg);
1594 /* Read a reg to force flush the post write */
1595 readl(iommu->reg + reg);
1596 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1599 void dmar_msi_write(int irq, struct msi_msg *msg)
1601 struct intel_iommu *iommu = irq_get_handler_data(irq);
1602 int reg = dmar_msi_reg(iommu, irq);
1603 unsigned long flag;
1605 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1606 writel(msg->data, iommu->reg + reg + 4);
1607 writel(msg->address_lo, iommu->reg + reg + 8);
1608 writel(msg->address_hi, iommu->reg + reg + 12);
1609 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1612 void dmar_msi_read(int irq, struct msi_msg *msg)
1614 struct intel_iommu *iommu = irq_get_handler_data(irq);
1615 int reg = dmar_msi_reg(iommu, irq);
1616 unsigned long flag;
1618 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1619 msg->data = readl(iommu->reg + reg + 4);
1620 msg->address_lo = readl(iommu->reg + reg + 8);
1621 msg->address_hi = readl(iommu->reg + reg + 12);
1622 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1625 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1626 u8 fault_reason, u16 source_id, unsigned long long addr)
1628 const char *reason;
1629 int fault_type;
1631 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1633 if (fault_type == INTR_REMAP)
1634 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1635 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1636 PCI_FUNC(source_id & 0xFF), addr >> 48,
1637 fault_reason, reason);
1638 else
1639 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1640 type ? "DMA Read" : "DMA Write",
1641 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1642 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1643 return 0;
1646 #define PRIMARY_FAULT_REG_LEN (16)
1647 irqreturn_t dmar_fault(int irq, void *dev_id)
1649 struct intel_iommu *iommu = dev_id;
1650 int reg, fault_index;
1651 u32 fault_status;
1652 unsigned long flag;
1653 static DEFINE_RATELIMIT_STATE(rs,
1654 DEFAULT_RATELIMIT_INTERVAL,
1655 DEFAULT_RATELIMIT_BURST);
1657 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1658 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1659 if (fault_status && __ratelimit(&rs))
1660 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1662 /* TBD: ignore advanced fault log currently */
1663 if (!(fault_status & DMA_FSTS_PPF))
1664 goto unlock_exit;
1666 fault_index = dma_fsts_fault_record_index(fault_status);
1667 reg = cap_fault_reg_offset(iommu->cap);
1668 while (1) {
1669 /* Disable printing, simply clear the fault when ratelimited */
1670 bool ratelimited = !__ratelimit(&rs);
1671 u8 fault_reason;
1672 u16 source_id;
1673 u64 guest_addr;
1674 int type;
1675 u32 data;
1677 /* highest 32 bits */
1678 data = readl(iommu->reg + reg +
1679 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1680 if (!(data & DMA_FRCD_F))
1681 break;
1683 if (!ratelimited) {
1684 fault_reason = dma_frcd_fault_reason(data);
1685 type = dma_frcd_type(data);
1687 data = readl(iommu->reg + reg +
1688 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1689 source_id = dma_frcd_source_id(data);
1691 guest_addr = dmar_readq(iommu->reg + reg +
1692 fault_index * PRIMARY_FAULT_REG_LEN);
1693 guest_addr = dma_frcd_page_addr(guest_addr);
1696 /* clear the fault */
1697 writel(DMA_FRCD_F, iommu->reg + reg +
1698 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1700 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1702 if (!ratelimited)
1703 dmar_fault_do_one(iommu, type, fault_reason,
1704 source_id, guest_addr);
1706 fault_index++;
1707 if (fault_index >= cap_num_fault_regs(iommu->cap))
1708 fault_index = 0;
1709 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1712 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1713 iommu->reg + DMAR_FSTS_REG);
1715 unlock_exit:
1716 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1717 return IRQ_HANDLED;
1720 int dmar_set_interrupt(struct intel_iommu *iommu)
1722 int irq, ret;
1725 * Check if the fault interrupt is already initialized.
1727 if (iommu->irq)
1728 return 0;
1730 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1731 if (irq > 0) {
1732 iommu->irq = irq;
1733 } else {
1734 pr_err("No free IRQ vectors\n");
1735 return -EINVAL;
1738 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1739 if (ret)
1740 pr_err("Can't request irq\n");
1741 return ret;
1744 int __init enable_drhd_fault_handling(void)
1746 struct dmar_drhd_unit *drhd;
1747 struct intel_iommu *iommu;
1750 * Enable fault control interrupt.
1752 for_each_iommu(iommu, drhd) {
1753 u32 fault_status;
1754 int ret = dmar_set_interrupt(iommu);
1756 if (ret) {
1757 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1758 (unsigned long long)drhd->reg_base_addr, ret);
1759 return -1;
1763 * Clear any previous faults.
1765 dmar_fault(iommu->irq, iommu);
1766 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1767 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1770 return 0;
1774 * Re-enable Queued Invalidation interface.
1776 int dmar_reenable_qi(struct intel_iommu *iommu)
1778 if (!ecap_qis(iommu->ecap))
1779 return -ENOENT;
1781 if (!iommu->qi)
1782 return -ENOENT;
1785 * First disable queued invalidation.
1787 dmar_disable_qi(iommu);
1789 * Then enable queued invalidation again. Since there is no pending
1790 * invalidation requests now, it's safe to re-enable queued
1791 * invalidation.
1793 __dmar_enable_qi(iommu);
1795 return 0;
1799 * Check interrupt remapping support in DMAR table description.
1801 int __init dmar_ir_support(void)
1803 struct acpi_table_dmar *dmar;
1804 dmar = (struct acpi_table_dmar *)dmar_tbl;
1805 if (!dmar)
1806 return 0;
1807 return dmar->flags & 0x1;
1810 /* Check whether DMAR units are in use */
1811 static inline bool dmar_in_use(void)
1813 return irq_remapping_enabled || intel_iommu_enabled;
1816 static int __init dmar_free_unused_resources(void)
1818 struct dmar_drhd_unit *dmaru, *dmaru_n;
1820 if (dmar_in_use())
1821 return 0;
1823 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1824 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1826 down_write(&dmar_global_lock);
1827 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1828 list_del(&dmaru->list);
1829 dmar_free_drhd(dmaru);
1831 up_write(&dmar_global_lock);
1833 return 0;
1836 late_initcall(dmar_free_unused_resources);
1837 IOMMU_INIT_POST(detect_intel_iommu);
1840 * DMAR Hotplug Support
1841 * For more details, please refer to Intel(R) Virtualization Technology
1842 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1843 * "Remapping Hardware Unit Hot Plug".
1845 static guid_t dmar_hp_guid =
1846 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1847 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1850 * Currently there's only one revision and BIOS will not check the revision id,
1851 * so use 0 for safety.
1853 #define DMAR_DSM_REV_ID 0
1854 #define DMAR_DSM_FUNC_DRHD 1
1855 #define DMAR_DSM_FUNC_ATSR 2
1856 #define DMAR_DSM_FUNC_RHSA 3
1858 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1860 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
1863 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1864 dmar_res_handler_t handler, void *arg)
1866 int ret = -ENODEV;
1867 union acpi_object *obj;
1868 struct acpi_dmar_header *start;
1869 struct dmar_res_callback callback;
1870 static int res_type[] = {
1871 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1872 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1873 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1876 if (!dmar_detect_dsm(handle, func))
1877 return 0;
1879 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
1880 func, NULL, ACPI_TYPE_BUFFER);
1881 if (!obj)
1882 return -ENODEV;
1884 memset(&callback, 0, sizeof(callback));
1885 callback.cb[res_type[func]] = handler;
1886 callback.arg[res_type[func]] = arg;
1887 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1888 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1890 ACPI_FREE(obj);
1892 return ret;
1895 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1897 int ret;
1898 struct dmar_drhd_unit *dmaru;
1900 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1901 if (!dmaru)
1902 return -ENODEV;
1904 ret = dmar_ir_hotplug(dmaru, true);
1905 if (ret == 0)
1906 ret = dmar_iommu_hotplug(dmaru, true);
1908 return ret;
1911 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1913 int i, ret;
1914 struct device *dev;
1915 struct dmar_drhd_unit *dmaru;
1917 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1918 if (!dmaru)
1919 return 0;
1922 * All PCI devices managed by this unit should have been destroyed.
1924 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1925 for_each_active_dev_scope(dmaru->devices,
1926 dmaru->devices_cnt, i, dev)
1927 return -EBUSY;
1930 ret = dmar_ir_hotplug(dmaru, false);
1931 if (ret == 0)
1932 ret = dmar_iommu_hotplug(dmaru, false);
1934 return ret;
1937 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1939 struct dmar_drhd_unit *dmaru;
1941 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1942 if (dmaru) {
1943 list_del_rcu(&dmaru->list);
1944 synchronize_rcu();
1945 dmar_free_drhd(dmaru);
1948 return 0;
1951 static int dmar_hotplug_insert(acpi_handle handle)
1953 int ret;
1954 int drhd_count = 0;
1956 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1957 &dmar_validate_one_drhd, (void *)1);
1958 if (ret)
1959 goto out;
1961 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1962 &dmar_parse_one_drhd, (void *)&drhd_count);
1963 if (ret == 0 && drhd_count == 0) {
1964 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1965 goto out;
1966 } else if (ret) {
1967 goto release_drhd;
1970 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1971 &dmar_parse_one_rhsa, NULL);
1972 if (ret)
1973 goto release_drhd;
1975 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1976 &dmar_parse_one_atsr, NULL);
1977 if (ret)
1978 goto release_atsr;
1980 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1981 &dmar_hp_add_drhd, NULL);
1982 if (!ret)
1983 return 0;
1985 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1986 &dmar_hp_remove_drhd, NULL);
1987 release_atsr:
1988 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1989 &dmar_release_one_atsr, NULL);
1990 release_drhd:
1991 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1992 &dmar_hp_release_drhd, NULL);
1993 out:
1994 return ret;
1997 static int dmar_hotplug_remove(acpi_handle handle)
1999 int ret;
2001 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2002 &dmar_check_one_atsr, NULL);
2003 if (ret)
2004 return ret;
2006 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2007 &dmar_hp_remove_drhd, NULL);
2008 if (ret == 0) {
2009 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2010 &dmar_release_one_atsr, NULL));
2011 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2012 &dmar_hp_release_drhd, NULL));
2013 } else {
2014 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2015 &dmar_hp_add_drhd, NULL);
2018 return ret;
2021 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2022 void *context, void **retval)
2024 acpi_handle *phdl = retval;
2026 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2027 *phdl = handle;
2028 return AE_CTRL_TERMINATE;
2031 return AE_OK;
2034 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2036 int ret;
2037 acpi_handle tmp = NULL;
2038 acpi_status status;
2040 if (!dmar_in_use())
2041 return 0;
2043 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2044 tmp = handle;
2045 } else {
2046 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2047 ACPI_UINT32_MAX,
2048 dmar_get_dsm_handle,
2049 NULL, NULL, &tmp);
2050 if (ACPI_FAILURE(status)) {
2051 pr_warn("Failed to locate _DSM method.\n");
2052 return -ENXIO;
2055 if (tmp == NULL)
2056 return 0;
2058 down_write(&dmar_global_lock);
2059 if (insert)
2060 ret = dmar_hotplug_insert(tmp);
2061 else
2062 ret = dmar_hotplug_remove(tmp);
2063 up_write(&dmar_global_lock);
2065 return ret;
2068 int dmar_device_add(acpi_handle handle)
2070 return dmar_device_hotplug(handle, true);
2073 int dmar_device_remove(acpi_handle handle)
2075 return dmar_device_hotplug(handle, false);
2079 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2081 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2082 * the ACPI DMAR table. This means that the platform boot firmware has made
2083 * sure no device can issue DMA outside of RMRR regions.
2085 bool dmar_platform_optin(void)
2087 struct acpi_table_dmar *dmar;
2088 acpi_status status;
2089 bool ret;
2091 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2092 (struct acpi_table_header **)&dmar);
2093 if (ACPI_FAILURE(status))
2094 return false;
2096 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2097 acpi_put_table((struct acpi_table_header *)dmar);
2099 return ret;
2101 EXPORT_SYMBOL_GPL(dmar_platform_optin);