Linux 4.19.133
[linux/fpc-iii.git] / drivers / iommu / dmar.c
blobd936ff765fe4a0904e53f6c9ff30a1130205d38b
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <linux/limits.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/iommu_table.h>
46 #include "irq_remapping.h"
48 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49 struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
53 bool print_entry;
57 * Assumptions:
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
68 DECLARE_RWSEM(dmar_global_lock);
69 LIST_HEAD(dmar_drhd_units);
71 struct acpi_table_header * __initdata dmar_tbl;
72 static int dmar_dev_scope_status = 1;
73 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
75 static int alloc_iommu(struct dmar_drhd_unit *drhd);
76 static void free_iommu(struct intel_iommu *iommu);
78 extern const struct iommu_ops intel_iommu_ops;
80 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
83 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 * the very end.
86 if (drhd->include_all)
87 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
88 else
89 list_add_rcu(&drhd->list, &dmar_drhd_units);
92 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
94 struct acpi_dmar_device_scope *scope;
96 *cnt = 0;
97 while (start < end) {
98 scope = start;
99 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
101 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
102 (*cnt)++;
103 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
104 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
105 pr_warn("Unsupported device scope\n");
107 start += scope->length;
109 if (*cnt == 0)
110 return NULL;
112 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
115 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
117 int i;
118 struct device *tmp_dev;
120 if (*devices && *cnt) {
121 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
122 put_device(tmp_dev);
123 kfree(*devices);
126 *devices = NULL;
127 *cnt = 0;
130 /* Optimize out kzalloc()/kfree() for normal cases */
131 static char dmar_pci_notify_info_buf[64];
133 static struct dmar_pci_notify_info *
134 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
136 int level = 0;
137 size_t size;
138 struct pci_dev *tmp;
139 struct dmar_pci_notify_info *info;
141 BUG_ON(dev->is_virtfn);
144 * Ignore devices that have a domain number higher than what can
145 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
147 if (pci_domain_nr(dev->bus) > U16_MAX)
148 return NULL;
150 /* Only generate path[] for device addition event */
151 if (event == BUS_NOTIFY_ADD_DEVICE)
152 for (tmp = dev; tmp; tmp = tmp->bus->self)
153 level++;
155 size = sizeof(*info) + level * sizeof(info->path[0]);
156 if (size <= sizeof(dmar_pci_notify_info_buf)) {
157 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
158 } else {
159 info = kzalloc(size, GFP_KERNEL);
160 if (!info) {
161 pr_warn("Out of memory when allocating notify_info "
162 "for %s.\n", pci_name(dev));
163 if (dmar_dev_scope_status == 0)
164 dmar_dev_scope_status = -ENOMEM;
165 return NULL;
169 info->event = event;
170 info->dev = dev;
171 info->seg = pci_domain_nr(dev->bus);
172 info->level = level;
173 if (event == BUS_NOTIFY_ADD_DEVICE) {
174 for (tmp = dev; tmp; tmp = tmp->bus->self) {
175 level--;
176 info->path[level].bus = tmp->bus->number;
177 info->path[level].device = PCI_SLOT(tmp->devfn);
178 info->path[level].function = PCI_FUNC(tmp->devfn);
179 if (pci_is_root_bus(tmp->bus))
180 info->bus = tmp->bus->number;
184 return info;
187 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
189 if ((void *)info != dmar_pci_notify_info_buf)
190 kfree(info);
193 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
194 struct acpi_dmar_pci_path *path, int count)
196 int i;
198 if (info->bus != bus)
199 goto fallback;
200 if (info->level != count)
201 goto fallback;
203 for (i = 0; i < count; i++) {
204 if (path[i].device != info->path[i].device ||
205 path[i].function != info->path[i].function)
206 goto fallback;
209 return true;
211 fallback:
213 if (count != 1)
214 return false;
216 i = info->level - 1;
217 if (bus == info->path[i].bus &&
218 path[0].device == info->path[i].device &&
219 path[0].function == info->path[i].function) {
220 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
221 bus, path[0].device, path[0].function);
222 return true;
225 return false;
228 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
229 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
230 void *start, void*end, u16 segment,
231 struct dmar_dev_scope *devices,
232 int devices_cnt)
234 int i, level;
235 struct device *tmp, *dev = &info->dev->dev;
236 struct acpi_dmar_device_scope *scope;
237 struct acpi_dmar_pci_path *path;
239 if (segment != info->seg)
240 return 0;
242 for (; start < end; start += scope->length) {
243 scope = start;
244 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
245 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
246 continue;
248 path = (struct acpi_dmar_pci_path *)(scope + 1);
249 level = (scope->length - sizeof(*scope)) / sizeof(*path);
250 if (!dmar_match_pci_path(info, scope->bus, path, level))
251 continue;
254 * We expect devices with endpoint scope to have normal PCI
255 * headers, and devices with bridge scope to have bridge PCI
256 * headers. However PCI NTB devices may be listed in the
257 * DMAR table with bridge scope, even though they have a
258 * normal PCI header. NTB devices are identified by class
259 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
260 * for this special case.
262 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
263 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
264 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
265 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
266 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
267 pr_warn("Device scope type does not match for %s\n",
268 pci_name(info->dev));
269 return -EINVAL;
272 for_each_dev_scope(devices, devices_cnt, i, tmp)
273 if (tmp == NULL) {
274 devices[i].bus = info->dev->bus->number;
275 devices[i].devfn = info->dev->devfn;
276 rcu_assign_pointer(devices[i].dev,
277 get_device(dev));
278 return 1;
280 BUG_ON(i >= devices_cnt);
283 return 0;
286 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
287 struct dmar_dev_scope *devices, int count)
289 int index;
290 struct device *tmp;
292 if (info->seg != segment)
293 return 0;
295 for_each_active_dev_scope(devices, count, index, tmp)
296 if (tmp == &info->dev->dev) {
297 RCU_INIT_POINTER(devices[index].dev, NULL);
298 synchronize_rcu();
299 put_device(tmp);
300 return 1;
303 return 0;
306 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
308 int ret = 0;
309 struct dmar_drhd_unit *dmaru;
310 struct acpi_dmar_hardware_unit *drhd;
312 for_each_drhd_unit(dmaru) {
313 if (dmaru->include_all)
314 continue;
316 drhd = container_of(dmaru->hdr,
317 struct acpi_dmar_hardware_unit, header);
318 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
319 ((void *)drhd) + drhd->header.length,
320 dmaru->segment,
321 dmaru->devices, dmaru->devices_cnt);
322 if (ret)
323 break;
325 if (ret >= 0)
326 ret = dmar_iommu_notify_scope_dev(info);
327 if (ret < 0 && dmar_dev_scope_status == 0)
328 dmar_dev_scope_status = ret;
330 return ret;
333 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
335 struct dmar_drhd_unit *dmaru;
337 for_each_drhd_unit(dmaru)
338 if (dmar_remove_dev_scope(info, dmaru->segment,
339 dmaru->devices, dmaru->devices_cnt))
340 break;
341 dmar_iommu_notify_scope_dev(info);
344 static int dmar_pci_bus_notifier(struct notifier_block *nb,
345 unsigned long action, void *data)
347 struct pci_dev *pdev = to_pci_dev(data);
348 struct dmar_pci_notify_info *info;
350 /* Only care about add/remove events for physical functions.
351 * For VFs we actually do the lookup based on the corresponding
352 * PF in device_to_iommu() anyway. */
353 if (pdev->is_virtfn)
354 return NOTIFY_DONE;
355 if (action != BUS_NOTIFY_ADD_DEVICE &&
356 action != BUS_NOTIFY_REMOVED_DEVICE)
357 return NOTIFY_DONE;
359 info = dmar_alloc_pci_notify_info(pdev, action);
360 if (!info)
361 return NOTIFY_DONE;
363 down_write(&dmar_global_lock);
364 if (action == BUS_NOTIFY_ADD_DEVICE)
365 dmar_pci_bus_add_dev(info);
366 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
367 dmar_pci_bus_del_dev(info);
368 up_write(&dmar_global_lock);
370 dmar_free_pci_notify_info(info);
372 return NOTIFY_OK;
375 static struct notifier_block dmar_pci_bus_nb = {
376 .notifier_call = dmar_pci_bus_notifier,
377 .priority = INT_MIN,
380 static struct dmar_drhd_unit *
381 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
383 struct dmar_drhd_unit *dmaru;
385 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
386 if (dmaru->segment == drhd->segment &&
387 dmaru->reg_base_addr == drhd->address)
388 return dmaru;
390 return NULL;
394 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
395 * structure which uniquely represent one DMA remapping hardware unit
396 * present in the platform
398 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
400 struct acpi_dmar_hardware_unit *drhd;
401 struct dmar_drhd_unit *dmaru;
402 int ret;
404 drhd = (struct acpi_dmar_hardware_unit *)header;
405 dmaru = dmar_find_dmaru(drhd);
406 if (dmaru)
407 goto out;
409 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
410 if (!dmaru)
411 return -ENOMEM;
414 * If header is allocated from slab by ACPI _DSM method, we need to
415 * copy the content because the memory buffer will be freed on return.
417 dmaru->hdr = (void *)(dmaru + 1);
418 memcpy(dmaru->hdr, header, header->length);
419 dmaru->reg_base_addr = drhd->address;
420 dmaru->segment = drhd->segment;
421 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
422 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
423 ((void *)drhd) + drhd->header.length,
424 &dmaru->devices_cnt);
425 if (dmaru->devices_cnt && dmaru->devices == NULL) {
426 kfree(dmaru);
427 return -ENOMEM;
430 ret = alloc_iommu(dmaru);
431 if (ret) {
432 dmar_free_dev_scope(&dmaru->devices,
433 &dmaru->devices_cnt);
434 kfree(dmaru);
435 return ret;
437 dmar_register_drhd_unit(dmaru);
439 out:
440 if (arg)
441 (*(int *)arg)++;
443 return 0;
446 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
448 if (dmaru->devices && dmaru->devices_cnt)
449 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
450 if (dmaru->iommu)
451 free_iommu(dmaru->iommu);
452 kfree(dmaru);
455 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
456 void *arg)
458 struct acpi_dmar_andd *andd = (void *)header;
460 /* Check for NUL termination within the designated length */
461 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
462 pr_warn(FW_BUG
463 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
464 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
465 dmi_get_system_info(DMI_BIOS_VENDOR),
466 dmi_get_system_info(DMI_BIOS_VERSION),
467 dmi_get_system_info(DMI_PRODUCT_VERSION));
468 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
469 return -EINVAL;
471 pr_info("ANDD device: %x name: %s\n", andd->device_number,
472 andd->device_name);
474 return 0;
477 #ifdef CONFIG_ACPI_NUMA
478 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
480 struct acpi_dmar_rhsa *rhsa;
481 struct dmar_drhd_unit *drhd;
483 rhsa = (struct acpi_dmar_rhsa *)header;
484 for_each_drhd_unit(drhd) {
485 if (drhd->reg_base_addr == rhsa->base_address) {
486 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
488 if (!node_online(node))
489 node = -1;
490 drhd->iommu->node = node;
491 return 0;
494 pr_warn(FW_BUG
495 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
496 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497 rhsa->base_address,
498 dmi_get_system_info(DMI_BIOS_VENDOR),
499 dmi_get_system_info(DMI_BIOS_VERSION),
500 dmi_get_system_info(DMI_PRODUCT_VERSION));
501 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
503 return 0;
505 #else
506 #define dmar_parse_one_rhsa dmar_res_noop
507 #endif
509 static void
510 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
512 struct acpi_dmar_hardware_unit *drhd;
513 struct acpi_dmar_reserved_memory *rmrr;
514 struct acpi_dmar_atsr *atsr;
515 struct acpi_dmar_rhsa *rhsa;
517 switch (header->type) {
518 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
519 drhd = container_of(header, struct acpi_dmar_hardware_unit,
520 header);
521 pr_info("DRHD base: %#016Lx flags: %#x\n",
522 (unsigned long long)drhd->address, drhd->flags);
523 break;
524 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
525 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
526 header);
527 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
528 (unsigned long long)rmrr->base_address,
529 (unsigned long long)rmrr->end_address);
530 break;
531 case ACPI_DMAR_TYPE_ROOT_ATS:
532 atsr = container_of(header, struct acpi_dmar_atsr, header);
533 pr_info("ATSR flags: %#x\n", atsr->flags);
534 break;
535 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
536 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
537 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
538 (unsigned long long)rhsa->base_address,
539 rhsa->proximity_domain);
540 break;
541 case ACPI_DMAR_TYPE_NAMESPACE:
542 /* We don't print this here because we need to sanity-check
543 it first. So print it in dmar_parse_one_andd() instead. */
544 break;
549 * dmar_table_detect - checks to see if the platform supports DMAR devices
551 static int __init dmar_table_detect(void)
553 acpi_status status = AE_OK;
555 /* if we could find DMAR table, then there are DMAR devices */
556 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
558 if (ACPI_SUCCESS(status) && !dmar_tbl) {
559 pr_warn("Unable to map DMAR\n");
560 status = AE_NOT_FOUND;
563 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
566 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
567 size_t len, struct dmar_res_callback *cb)
569 struct acpi_dmar_header *iter, *next;
570 struct acpi_dmar_header *end = ((void *)start) + len;
572 for (iter = start; iter < end; iter = next) {
573 next = (void *)iter + iter->length;
574 if (iter->length == 0) {
575 /* Avoid looping forever on bad ACPI tables */
576 pr_debug(FW_BUG "Invalid 0-length structure\n");
577 break;
578 } else if (next > end) {
579 /* Avoid passing table end */
580 pr_warn(FW_BUG "Record passes table end\n");
581 return -EINVAL;
584 if (cb->print_entry)
585 dmar_table_print_dmar_entry(iter);
587 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
588 /* continue for forward compatibility */
589 pr_debug("Unknown DMAR structure type %d\n",
590 iter->type);
591 } else if (cb->cb[iter->type]) {
592 int ret;
594 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
595 if (ret)
596 return ret;
597 } else if (!cb->ignore_unhandled) {
598 pr_warn("No handler for DMAR structure type %d\n",
599 iter->type);
600 return -EINVAL;
604 return 0;
607 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
608 struct dmar_res_callback *cb)
610 return dmar_walk_remapping_entries((void *)(dmar + 1),
611 dmar->header.length - sizeof(*dmar), cb);
615 * parse_dmar_table - parses the DMA reporting table
617 static int __init
618 parse_dmar_table(void)
620 struct acpi_table_dmar *dmar;
621 int drhd_count = 0;
622 int ret;
623 struct dmar_res_callback cb = {
624 .print_entry = true,
625 .ignore_unhandled = true,
626 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
627 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
628 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
629 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
630 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
631 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
635 * Do it again, earlier dmar_tbl mapping could be mapped with
636 * fixed map.
638 dmar_table_detect();
641 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
642 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
644 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
646 dmar = (struct acpi_table_dmar *)dmar_tbl;
647 if (!dmar)
648 return -ENODEV;
650 if (dmar->width < PAGE_SHIFT - 1) {
651 pr_warn("Invalid DMAR haw\n");
652 return -EINVAL;
655 pr_info("Host address width %d\n", dmar->width + 1);
656 ret = dmar_walk_dmar_table(dmar, &cb);
657 if (ret == 0 && drhd_count == 0)
658 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
660 return ret;
663 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
664 int cnt, struct pci_dev *dev)
666 int index;
667 struct device *tmp;
669 while (dev) {
670 for_each_active_dev_scope(devices, cnt, index, tmp)
671 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
672 return 1;
674 /* Check our parent */
675 dev = dev->bus->self;
678 return 0;
681 struct dmar_drhd_unit *
682 dmar_find_matched_drhd_unit(struct pci_dev *dev)
684 struct dmar_drhd_unit *dmaru;
685 struct acpi_dmar_hardware_unit *drhd;
687 dev = pci_physfn(dev);
689 rcu_read_lock();
690 for_each_drhd_unit(dmaru) {
691 drhd = container_of(dmaru->hdr,
692 struct acpi_dmar_hardware_unit,
693 header);
695 if (dmaru->include_all &&
696 drhd->segment == pci_domain_nr(dev->bus))
697 goto out;
699 if (dmar_pci_device_match(dmaru->devices,
700 dmaru->devices_cnt, dev))
701 goto out;
703 dmaru = NULL;
704 out:
705 rcu_read_unlock();
707 return dmaru;
710 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
711 struct acpi_device *adev)
713 struct dmar_drhd_unit *dmaru;
714 struct acpi_dmar_hardware_unit *drhd;
715 struct acpi_dmar_device_scope *scope;
716 struct device *tmp;
717 int i;
718 struct acpi_dmar_pci_path *path;
720 for_each_drhd_unit(dmaru) {
721 drhd = container_of(dmaru->hdr,
722 struct acpi_dmar_hardware_unit,
723 header);
725 for (scope = (void *)(drhd + 1);
726 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
727 scope = ((void *)scope) + scope->length) {
728 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
729 continue;
730 if (scope->enumeration_id != device_number)
731 continue;
733 path = (void *)(scope + 1);
734 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
735 dev_name(&adev->dev), dmaru->reg_base_addr,
736 scope->bus, path->device, path->function);
737 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
738 if (tmp == NULL) {
739 dmaru->devices[i].bus = scope->bus;
740 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
741 path->function);
742 rcu_assign_pointer(dmaru->devices[i].dev,
743 get_device(&adev->dev));
744 return;
746 BUG_ON(i >= dmaru->devices_cnt);
749 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
750 device_number, dev_name(&adev->dev));
753 static int __init dmar_acpi_dev_scope_init(void)
755 struct acpi_dmar_andd *andd;
757 if (dmar_tbl == NULL)
758 return -ENODEV;
760 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
761 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
762 andd = ((void *)andd) + andd->header.length) {
763 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
764 acpi_handle h;
765 struct acpi_device *adev;
767 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
768 andd->device_name,
769 &h))) {
770 pr_err("Failed to find handle for ACPI object %s\n",
771 andd->device_name);
772 continue;
774 if (acpi_bus_get_device(h, &adev)) {
775 pr_err("Failed to get device for ACPI object %s\n",
776 andd->device_name);
777 continue;
779 dmar_acpi_insert_dev_scope(andd->device_number, adev);
782 return 0;
785 int __init dmar_dev_scope_init(void)
787 struct pci_dev *dev = NULL;
788 struct dmar_pci_notify_info *info;
790 if (dmar_dev_scope_status != 1)
791 return dmar_dev_scope_status;
793 if (list_empty(&dmar_drhd_units)) {
794 dmar_dev_scope_status = -ENODEV;
795 } else {
796 dmar_dev_scope_status = 0;
798 dmar_acpi_dev_scope_init();
800 for_each_pci_dev(dev) {
801 if (dev->is_virtfn)
802 continue;
804 info = dmar_alloc_pci_notify_info(dev,
805 BUS_NOTIFY_ADD_DEVICE);
806 if (!info) {
807 return dmar_dev_scope_status;
808 } else {
809 dmar_pci_bus_add_dev(info);
810 dmar_free_pci_notify_info(info);
815 return dmar_dev_scope_status;
818 void __init dmar_register_bus_notifier(void)
820 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
824 int __init dmar_table_init(void)
826 static int dmar_table_initialized;
827 int ret;
829 if (dmar_table_initialized == 0) {
830 ret = parse_dmar_table();
831 if (ret < 0) {
832 if (ret != -ENODEV)
833 pr_info("Parse DMAR table failure.\n");
834 } else if (list_empty(&dmar_drhd_units)) {
835 pr_info("No DMAR devices found\n");
836 ret = -ENODEV;
839 if (ret < 0)
840 dmar_table_initialized = ret;
841 else
842 dmar_table_initialized = 1;
845 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
848 static void warn_invalid_dmar(u64 addr, const char *message)
850 pr_warn_once(FW_BUG
851 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
852 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
853 addr, message,
854 dmi_get_system_info(DMI_BIOS_VENDOR),
855 dmi_get_system_info(DMI_BIOS_VERSION),
856 dmi_get_system_info(DMI_PRODUCT_VERSION));
857 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
860 static int __ref
861 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
863 struct acpi_dmar_hardware_unit *drhd;
864 void __iomem *addr;
865 u64 cap, ecap;
867 drhd = (void *)entry;
868 if (!drhd->address) {
869 warn_invalid_dmar(0, "");
870 return -EINVAL;
873 if (arg)
874 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
875 else
876 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
877 if (!addr) {
878 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
879 return -EINVAL;
882 cap = dmar_readq(addr + DMAR_CAP_REG);
883 ecap = dmar_readq(addr + DMAR_ECAP_REG);
885 if (arg)
886 iounmap(addr);
887 else
888 early_iounmap(addr, VTD_PAGE_SIZE);
890 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
891 warn_invalid_dmar(drhd->address, " returns all ones");
892 return -EINVAL;
895 return 0;
898 int __init detect_intel_iommu(void)
900 int ret;
901 struct dmar_res_callback validate_drhd_cb = {
902 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
903 .ignore_unhandled = true,
906 down_write(&dmar_global_lock);
907 ret = dmar_table_detect();
908 if (!ret)
909 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
910 &validate_drhd_cb);
911 if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
912 iommu_detected = 1;
913 /* Make sure ACS will be enabled */
914 pci_request_acs();
917 #ifdef CONFIG_X86
918 if (!ret)
919 x86_init.iommu.iommu_init = intel_iommu_init;
920 #endif
922 if (dmar_tbl) {
923 acpi_put_table(dmar_tbl);
924 dmar_tbl = NULL;
926 up_write(&dmar_global_lock);
928 return ret ? ret : 1;
931 static void unmap_iommu(struct intel_iommu *iommu)
933 iounmap(iommu->reg);
934 release_mem_region(iommu->reg_phys, iommu->reg_size);
938 * map_iommu: map the iommu's registers
939 * @iommu: the iommu to map
940 * @phys_addr: the physical address of the base resgister
942 * Memory map the iommu's registers. Start w/ a single page, and
943 * possibly expand if that turns out to be insufficent.
945 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
947 int map_size, err=0;
949 iommu->reg_phys = phys_addr;
950 iommu->reg_size = VTD_PAGE_SIZE;
952 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
953 pr_err("Can't reserve memory\n");
954 err = -EBUSY;
955 goto out;
958 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
959 if (!iommu->reg) {
960 pr_err("Can't map the region\n");
961 err = -ENOMEM;
962 goto release;
965 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
966 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
968 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
969 err = -EINVAL;
970 warn_invalid_dmar(phys_addr, " returns all ones");
971 goto unmap;
974 /* the registers might be more than one page */
975 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
976 cap_max_fault_reg_offset(iommu->cap));
977 map_size = VTD_PAGE_ALIGN(map_size);
978 if (map_size > iommu->reg_size) {
979 iounmap(iommu->reg);
980 release_mem_region(iommu->reg_phys, iommu->reg_size);
981 iommu->reg_size = map_size;
982 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
983 iommu->name)) {
984 pr_err("Can't reserve memory\n");
985 err = -EBUSY;
986 goto out;
988 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
989 if (!iommu->reg) {
990 pr_err("Can't map the region\n");
991 err = -ENOMEM;
992 goto release;
995 err = 0;
996 goto out;
998 unmap:
999 iounmap(iommu->reg);
1000 release:
1001 release_mem_region(iommu->reg_phys, iommu->reg_size);
1002 out:
1003 return err;
1006 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1008 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1009 DMAR_UNITS_SUPPORTED);
1010 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1011 iommu->seq_id = -1;
1012 } else {
1013 set_bit(iommu->seq_id, dmar_seq_ids);
1014 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1017 return iommu->seq_id;
1020 static void dmar_free_seq_id(struct intel_iommu *iommu)
1022 if (iommu->seq_id >= 0) {
1023 clear_bit(iommu->seq_id, dmar_seq_ids);
1024 iommu->seq_id = -1;
1028 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1030 struct intel_iommu *iommu;
1031 u32 ver, sts;
1032 int agaw = 0;
1033 int msagaw = 0;
1034 int err;
1036 if (!drhd->reg_base_addr) {
1037 warn_invalid_dmar(0, "");
1038 return -EINVAL;
1041 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1042 if (!iommu)
1043 return -ENOMEM;
1045 if (dmar_alloc_seq_id(iommu) < 0) {
1046 pr_err("Failed to allocate seq_id\n");
1047 err = -ENOSPC;
1048 goto error;
1051 err = map_iommu(iommu, drhd->reg_base_addr);
1052 if (err) {
1053 pr_err("Failed to map %s\n", iommu->name);
1054 goto error_free_seq_id;
1057 err = -EINVAL;
1058 agaw = iommu_calculate_agaw(iommu);
1059 if (agaw < 0) {
1060 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1061 iommu->seq_id);
1062 goto err_unmap;
1064 msagaw = iommu_calculate_max_sagaw(iommu);
1065 if (msagaw < 0) {
1066 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1067 iommu->seq_id);
1068 goto err_unmap;
1070 iommu->agaw = agaw;
1071 iommu->msagaw = msagaw;
1072 iommu->segment = drhd->segment;
1074 iommu->node = -1;
1076 ver = readl(iommu->reg + DMAR_VER_REG);
1077 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1078 iommu->name,
1079 (unsigned long long)drhd->reg_base_addr,
1080 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1081 (unsigned long long)iommu->cap,
1082 (unsigned long long)iommu->ecap);
1084 /* Reflect status in gcmd */
1085 sts = readl(iommu->reg + DMAR_GSTS_REG);
1086 if (sts & DMA_GSTS_IRES)
1087 iommu->gcmd |= DMA_GCMD_IRE;
1088 if (sts & DMA_GSTS_TES)
1089 iommu->gcmd |= DMA_GCMD_TE;
1090 if (sts & DMA_GSTS_QIES)
1091 iommu->gcmd |= DMA_GCMD_QIE;
1093 raw_spin_lock_init(&iommu->register_lock);
1095 if (intel_iommu_enabled) {
1096 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1097 intel_iommu_groups,
1098 "%s", iommu->name);
1099 if (err)
1100 goto err_unmap;
1102 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1104 err = iommu_device_register(&iommu->iommu);
1105 if (err)
1106 goto err_unmap;
1109 drhd->iommu = iommu;
1111 return 0;
1113 err_unmap:
1114 unmap_iommu(iommu);
1115 error_free_seq_id:
1116 dmar_free_seq_id(iommu);
1117 error:
1118 kfree(iommu);
1119 return err;
1122 static void free_iommu(struct intel_iommu *iommu)
1124 if (intel_iommu_enabled) {
1125 iommu_device_unregister(&iommu->iommu);
1126 iommu_device_sysfs_remove(&iommu->iommu);
1129 if (iommu->irq) {
1130 if (iommu->pr_irq) {
1131 free_irq(iommu->pr_irq, iommu);
1132 dmar_free_hwirq(iommu->pr_irq);
1133 iommu->pr_irq = 0;
1135 free_irq(iommu->irq, iommu);
1136 dmar_free_hwirq(iommu->irq);
1137 iommu->irq = 0;
1140 if (iommu->qi) {
1141 free_page((unsigned long)iommu->qi->desc);
1142 kfree(iommu->qi->desc_status);
1143 kfree(iommu->qi);
1146 if (iommu->reg)
1147 unmap_iommu(iommu);
1149 dmar_free_seq_id(iommu);
1150 kfree(iommu);
1154 * Reclaim all the submitted descriptors which have completed its work.
1156 static inline void reclaim_free_desc(struct q_inval *qi)
1158 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1159 qi->desc_status[qi->free_tail] == QI_ABORT) {
1160 qi->desc_status[qi->free_tail] = QI_FREE;
1161 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1162 qi->free_cnt++;
1166 static int qi_check_fault(struct intel_iommu *iommu, int index)
1168 u32 fault;
1169 int head, tail;
1170 struct q_inval *qi = iommu->qi;
1171 int wait_index = (index + 1) % QI_LENGTH;
1173 if (qi->desc_status[wait_index] == QI_ABORT)
1174 return -EAGAIN;
1176 fault = readl(iommu->reg + DMAR_FSTS_REG);
1179 * If IQE happens, the head points to the descriptor associated
1180 * with the error. No new descriptors are fetched until the IQE
1181 * is cleared.
1183 if (fault & DMA_FSTS_IQE) {
1184 head = readl(iommu->reg + DMAR_IQH_REG);
1185 if ((head >> DMAR_IQ_SHIFT) == index) {
1186 pr_err("VT-d detected invalid descriptor: "
1187 "low=%llx, high=%llx\n",
1188 (unsigned long long)qi->desc[index].low,
1189 (unsigned long long)qi->desc[index].high);
1190 memcpy(&qi->desc[index], &qi->desc[wait_index],
1191 sizeof(struct qi_desc));
1192 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1193 return -EINVAL;
1198 * If ITE happens, all pending wait_desc commands are aborted.
1199 * No new descriptors are fetched until the ITE is cleared.
1201 if (fault & DMA_FSTS_ITE) {
1202 head = readl(iommu->reg + DMAR_IQH_REG);
1203 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1204 head |= 1;
1205 tail = readl(iommu->reg + DMAR_IQT_REG);
1206 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1208 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1210 do {
1211 if (qi->desc_status[head] == QI_IN_USE)
1212 qi->desc_status[head] = QI_ABORT;
1213 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1214 } while (head != tail);
1216 if (qi->desc_status[wait_index] == QI_ABORT)
1217 return -EAGAIN;
1220 if (fault & DMA_FSTS_ICE)
1221 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1223 return 0;
1227 * Submit the queued invalidation descriptor to the remapping
1228 * hardware unit and wait for its completion.
1230 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1232 int rc;
1233 struct q_inval *qi = iommu->qi;
1234 struct qi_desc *hw, wait_desc;
1235 int wait_index, index;
1236 unsigned long flags;
1238 if (!qi)
1239 return 0;
1241 hw = qi->desc;
1243 restart:
1244 rc = 0;
1246 raw_spin_lock_irqsave(&qi->q_lock, flags);
1247 while (qi->free_cnt < 3) {
1248 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1249 cpu_relax();
1250 raw_spin_lock_irqsave(&qi->q_lock, flags);
1253 index = qi->free_head;
1254 wait_index = (index + 1) % QI_LENGTH;
1256 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1258 hw[index] = *desc;
1260 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1261 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1262 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1264 hw[wait_index] = wait_desc;
1266 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1267 qi->free_cnt -= 2;
1270 * update the HW tail register indicating the presence of
1271 * new descriptors.
1273 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1275 while (qi->desc_status[wait_index] != QI_DONE) {
1277 * We will leave the interrupts disabled, to prevent interrupt
1278 * context to queue another cmd while a cmd is already submitted
1279 * and waiting for completion on this cpu. This is to avoid
1280 * a deadlock where the interrupt context can wait indefinitely
1281 * for free slots in the queue.
1283 rc = qi_check_fault(iommu, index);
1284 if (rc)
1285 break;
1287 raw_spin_unlock(&qi->q_lock);
1288 cpu_relax();
1289 raw_spin_lock(&qi->q_lock);
1292 qi->desc_status[index] = QI_DONE;
1294 reclaim_free_desc(qi);
1295 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1297 if (rc == -EAGAIN)
1298 goto restart;
1300 return rc;
1304 * Flush the global interrupt entry cache.
1306 void qi_global_iec(struct intel_iommu *iommu)
1308 struct qi_desc desc;
1310 desc.low = QI_IEC_TYPE;
1311 desc.high = 0;
1313 /* should never fail */
1314 qi_submit_sync(&desc, iommu);
1317 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1318 u64 type)
1320 struct qi_desc desc;
1322 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1323 | QI_CC_GRAN(type) | QI_CC_TYPE;
1324 desc.high = 0;
1326 qi_submit_sync(&desc, iommu);
1329 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1330 unsigned int size_order, u64 type)
1332 u8 dw = 0, dr = 0;
1334 struct qi_desc desc;
1335 int ih = 0;
1337 if (cap_write_drain(iommu->cap))
1338 dw = 1;
1340 if (cap_read_drain(iommu->cap))
1341 dr = 1;
1343 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1344 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1345 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1346 | QI_IOTLB_AM(size_order);
1348 qi_submit_sync(&desc, iommu);
1351 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1352 u16 qdep, u64 addr, unsigned mask)
1354 struct qi_desc desc;
1356 if (mask) {
1357 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1358 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1359 } else
1360 desc.high = QI_DEV_IOTLB_ADDR(addr);
1362 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1363 qdep = 0;
1365 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1366 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1368 qi_submit_sync(&desc, iommu);
1372 * Disable Queued Invalidation interface.
1374 void dmar_disable_qi(struct intel_iommu *iommu)
1376 unsigned long flags;
1377 u32 sts;
1378 cycles_t start_time = get_cycles();
1380 if (!ecap_qis(iommu->ecap))
1381 return;
1383 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1385 sts = readl(iommu->reg + DMAR_GSTS_REG);
1386 if (!(sts & DMA_GSTS_QIES))
1387 goto end;
1390 * Give a chance to HW to complete the pending invalidation requests.
1392 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1393 readl(iommu->reg + DMAR_IQH_REG)) &&
1394 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1395 cpu_relax();
1397 iommu->gcmd &= ~DMA_GCMD_QIE;
1398 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1400 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1401 !(sts & DMA_GSTS_QIES), sts);
1402 end:
1403 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1407 * Enable queued invalidation.
1409 static void __dmar_enable_qi(struct intel_iommu *iommu)
1411 u32 sts;
1412 unsigned long flags;
1413 struct q_inval *qi = iommu->qi;
1415 qi->free_head = qi->free_tail = 0;
1416 qi->free_cnt = QI_LENGTH;
1418 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1420 /* write zero to the tail reg */
1421 writel(0, iommu->reg + DMAR_IQT_REG);
1423 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1425 iommu->gcmd |= DMA_GCMD_QIE;
1426 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1428 /* Make sure hardware complete it */
1429 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1431 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1435 * Enable Queued Invalidation interface. This is a must to support
1436 * interrupt-remapping. Also used by DMA-remapping, which replaces
1437 * register based IOTLB invalidation.
1439 int dmar_enable_qi(struct intel_iommu *iommu)
1441 struct q_inval *qi;
1442 struct page *desc_page;
1444 if (!ecap_qis(iommu->ecap))
1445 return -ENOENT;
1448 * queued invalidation is already setup and enabled.
1450 if (iommu->qi)
1451 return 0;
1453 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1454 if (!iommu->qi)
1455 return -ENOMEM;
1457 qi = iommu->qi;
1460 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1461 if (!desc_page) {
1462 kfree(qi);
1463 iommu->qi = NULL;
1464 return -ENOMEM;
1467 qi->desc = page_address(desc_page);
1469 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1470 if (!qi->desc_status) {
1471 free_page((unsigned long) qi->desc);
1472 kfree(qi);
1473 iommu->qi = NULL;
1474 return -ENOMEM;
1477 raw_spin_lock_init(&qi->q_lock);
1479 __dmar_enable_qi(iommu);
1481 return 0;
1484 /* iommu interrupt handling. Most stuff are MSI-like. */
1486 enum faulttype {
1487 DMA_REMAP,
1488 INTR_REMAP,
1489 UNKNOWN,
1492 static const char *dma_remap_fault_reasons[] =
1494 "Software",
1495 "Present bit in root entry is clear",
1496 "Present bit in context entry is clear",
1497 "Invalid context entry",
1498 "Access beyond MGAW",
1499 "PTE Write access is not set",
1500 "PTE Read access is not set",
1501 "Next page table ptr is invalid",
1502 "Root table address invalid",
1503 "Context table ptr is invalid",
1504 "non-zero reserved fields in RTP",
1505 "non-zero reserved fields in CTP",
1506 "non-zero reserved fields in PTE",
1507 "PCE for translation request specifies blocking",
1510 static const char *irq_remap_fault_reasons[] =
1512 "Detected reserved fields in the decoded interrupt-remapped request",
1513 "Interrupt index exceeded the interrupt-remapping table size",
1514 "Present field in the IRTE entry is clear",
1515 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1516 "Detected reserved fields in the IRTE entry",
1517 "Blocked a compatibility format interrupt request",
1518 "Blocked an interrupt request due to source-id verification failure",
1521 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1523 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1524 ARRAY_SIZE(irq_remap_fault_reasons))) {
1525 *fault_type = INTR_REMAP;
1526 return irq_remap_fault_reasons[fault_reason - 0x20];
1527 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1528 *fault_type = DMA_REMAP;
1529 return dma_remap_fault_reasons[fault_reason];
1530 } else {
1531 *fault_type = UNKNOWN;
1532 return "Unknown";
1537 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1539 if (iommu->irq == irq)
1540 return DMAR_FECTL_REG;
1541 else if (iommu->pr_irq == irq)
1542 return DMAR_PECTL_REG;
1543 else
1544 BUG();
1547 void dmar_msi_unmask(struct irq_data *data)
1549 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1550 int reg = dmar_msi_reg(iommu, data->irq);
1551 unsigned long flag;
1553 /* unmask it */
1554 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1555 writel(0, iommu->reg + reg);
1556 /* Read a reg to force flush the post write */
1557 readl(iommu->reg + reg);
1558 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1561 void dmar_msi_mask(struct irq_data *data)
1563 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1564 int reg = dmar_msi_reg(iommu, data->irq);
1565 unsigned long flag;
1567 /* mask it */
1568 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1569 writel(DMA_FECTL_IM, iommu->reg + reg);
1570 /* Read a reg to force flush the post write */
1571 readl(iommu->reg + reg);
1572 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1575 void dmar_msi_write(int irq, struct msi_msg *msg)
1577 struct intel_iommu *iommu = irq_get_handler_data(irq);
1578 int reg = dmar_msi_reg(iommu, irq);
1579 unsigned long flag;
1581 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1582 writel(msg->data, iommu->reg + reg + 4);
1583 writel(msg->address_lo, iommu->reg + reg + 8);
1584 writel(msg->address_hi, iommu->reg + reg + 12);
1585 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1588 void dmar_msi_read(int irq, struct msi_msg *msg)
1590 struct intel_iommu *iommu = irq_get_handler_data(irq);
1591 int reg = dmar_msi_reg(iommu, irq);
1592 unsigned long flag;
1594 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1595 msg->data = readl(iommu->reg + reg + 4);
1596 msg->address_lo = readl(iommu->reg + reg + 8);
1597 msg->address_hi = readl(iommu->reg + reg + 12);
1598 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1601 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1602 u8 fault_reason, u16 source_id, unsigned long long addr)
1604 const char *reason;
1605 int fault_type;
1607 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1609 if (fault_type == INTR_REMAP)
1610 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1611 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1612 PCI_FUNC(source_id & 0xFF), addr >> 48,
1613 fault_reason, reason);
1614 else
1615 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1616 type ? "DMA Read" : "DMA Write",
1617 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1618 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1619 return 0;
1622 #define PRIMARY_FAULT_REG_LEN (16)
1623 irqreturn_t dmar_fault(int irq, void *dev_id)
1625 struct intel_iommu *iommu = dev_id;
1626 int reg, fault_index;
1627 u32 fault_status;
1628 unsigned long flag;
1629 static DEFINE_RATELIMIT_STATE(rs,
1630 DEFAULT_RATELIMIT_INTERVAL,
1631 DEFAULT_RATELIMIT_BURST);
1633 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1634 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1635 if (fault_status && __ratelimit(&rs))
1636 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1638 /* TBD: ignore advanced fault log currently */
1639 if (!(fault_status & DMA_FSTS_PPF))
1640 goto unlock_exit;
1642 fault_index = dma_fsts_fault_record_index(fault_status);
1643 reg = cap_fault_reg_offset(iommu->cap);
1644 while (1) {
1645 /* Disable printing, simply clear the fault when ratelimited */
1646 bool ratelimited = !__ratelimit(&rs);
1647 u8 fault_reason;
1648 u16 source_id;
1649 u64 guest_addr;
1650 int type;
1651 u32 data;
1653 /* highest 32 bits */
1654 data = readl(iommu->reg + reg +
1655 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1656 if (!(data & DMA_FRCD_F))
1657 break;
1659 if (!ratelimited) {
1660 fault_reason = dma_frcd_fault_reason(data);
1661 type = dma_frcd_type(data);
1663 data = readl(iommu->reg + reg +
1664 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1665 source_id = dma_frcd_source_id(data);
1667 guest_addr = dmar_readq(iommu->reg + reg +
1668 fault_index * PRIMARY_FAULT_REG_LEN);
1669 guest_addr = dma_frcd_page_addr(guest_addr);
1672 /* clear the fault */
1673 writel(DMA_FRCD_F, iommu->reg + reg +
1674 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1676 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1678 if (!ratelimited)
1679 dmar_fault_do_one(iommu, type, fault_reason,
1680 source_id, guest_addr);
1682 fault_index++;
1683 if (fault_index >= cap_num_fault_regs(iommu->cap))
1684 fault_index = 0;
1685 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1688 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1689 iommu->reg + DMAR_FSTS_REG);
1691 unlock_exit:
1692 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1693 return IRQ_HANDLED;
1696 int dmar_set_interrupt(struct intel_iommu *iommu)
1698 int irq, ret;
1701 * Check if the fault interrupt is already initialized.
1703 if (iommu->irq)
1704 return 0;
1706 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1707 if (irq > 0) {
1708 iommu->irq = irq;
1709 } else {
1710 pr_err("No free IRQ vectors\n");
1711 return -EINVAL;
1714 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1715 if (ret)
1716 pr_err("Can't request irq\n");
1717 return ret;
1720 int __init enable_drhd_fault_handling(void)
1722 struct dmar_drhd_unit *drhd;
1723 struct intel_iommu *iommu;
1726 * Enable fault control interrupt.
1728 for_each_iommu(iommu, drhd) {
1729 u32 fault_status;
1730 int ret = dmar_set_interrupt(iommu);
1732 if (ret) {
1733 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1734 (unsigned long long)drhd->reg_base_addr, ret);
1735 return -1;
1739 * Clear any previous faults.
1741 dmar_fault(iommu->irq, iommu);
1742 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1743 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1746 return 0;
1750 * Re-enable Queued Invalidation interface.
1752 int dmar_reenable_qi(struct intel_iommu *iommu)
1754 if (!ecap_qis(iommu->ecap))
1755 return -ENOENT;
1757 if (!iommu->qi)
1758 return -ENOENT;
1761 * First disable queued invalidation.
1763 dmar_disable_qi(iommu);
1765 * Then enable queued invalidation again. Since there is no pending
1766 * invalidation requests now, it's safe to re-enable queued
1767 * invalidation.
1769 __dmar_enable_qi(iommu);
1771 return 0;
1775 * Check interrupt remapping support in DMAR table description.
1777 int __init dmar_ir_support(void)
1779 struct acpi_table_dmar *dmar;
1780 dmar = (struct acpi_table_dmar *)dmar_tbl;
1781 if (!dmar)
1782 return 0;
1783 return dmar->flags & 0x1;
1786 /* Check whether DMAR units are in use */
1787 static inline bool dmar_in_use(void)
1789 return irq_remapping_enabled || intel_iommu_enabled;
1792 static int __init dmar_free_unused_resources(void)
1794 struct dmar_drhd_unit *dmaru, *dmaru_n;
1796 if (dmar_in_use())
1797 return 0;
1799 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1800 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1802 down_write(&dmar_global_lock);
1803 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1804 list_del(&dmaru->list);
1805 dmar_free_drhd(dmaru);
1807 up_write(&dmar_global_lock);
1809 return 0;
1812 late_initcall(dmar_free_unused_resources);
1813 IOMMU_INIT_POST(detect_intel_iommu);
1816 * DMAR Hotplug Support
1817 * For more details, please refer to Intel(R) Virtualization Technology
1818 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1819 * "Remapping Hardware Unit Hot Plug".
1821 static guid_t dmar_hp_guid =
1822 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1823 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1826 * Currently there's only one revision and BIOS will not check the revision id,
1827 * so use 0 for safety.
1829 #define DMAR_DSM_REV_ID 0
1830 #define DMAR_DSM_FUNC_DRHD 1
1831 #define DMAR_DSM_FUNC_ATSR 2
1832 #define DMAR_DSM_FUNC_RHSA 3
1834 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1836 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
1839 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1840 dmar_res_handler_t handler, void *arg)
1842 int ret = -ENODEV;
1843 union acpi_object *obj;
1844 struct acpi_dmar_header *start;
1845 struct dmar_res_callback callback;
1846 static int res_type[] = {
1847 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1848 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1849 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1852 if (!dmar_detect_dsm(handle, func))
1853 return 0;
1855 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
1856 func, NULL, ACPI_TYPE_BUFFER);
1857 if (!obj)
1858 return -ENODEV;
1860 memset(&callback, 0, sizeof(callback));
1861 callback.cb[res_type[func]] = handler;
1862 callback.arg[res_type[func]] = arg;
1863 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1864 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1866 ACPI_FREE(obj);
1868 return ret;
1871 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1873 int ret;
1874 struct dmar_drhd_unit *dmaru;
1876 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1877 if (!dmaru)
1878 return -ENODEV;
1880 ret = dmar_ir_hotplug(dmaru, true);
1881 if (ret == 0)
1882 ret = dmar_iommu_hotplug(dmaru, true);
1884 return ret;
1887 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1889 int i, ret;
1890 struct device *dev;
1891 struct dmar_drhd_unit *dmaru;
1893 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1894 if (!dmaru)
1895 return 0;
1898 * All PCI devices managed by this unit should have been destroyed.
1900 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1901 for_each_active_dev_scope(dmaru->devices,
1902 dmaru->devices_cnt, i, dev)
1903 return -EBUSY;
1906 ret = dmar_ir_hotplug(dmaru, false);
1907 if (ret == 0)
1908 ret = dmar_iommu_hotplug(dmaru, false);
1910 return ret;
1913 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1915 struct dmar_drhd_unit *dmaru;
1917 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1918 if (dmaru) {
1919 list_del_rcu(&dmaru->list);
1920 synchronize_rcu();
1921 dmar_free_drhd(dmaru);
1924 return 0;
1927 static int dmar_hotplug_insert(acpi_handle handle)
1929 int ret;
1930 int drhd_count = 0;
1932 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1933 &dmar_validate_one_drhd, (void *)1);
1934 if (ret)
1935 goto out;
1937 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1938 &dmar_parse_one_drhd, (void *)&drhd_count);
1939 if (ret == 0 && drhd_count == 0) {
1940 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1941 goto out;
1942 } else if (ret) {
1943 goto release_drhd;
1946 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1947 &dmar_parse_one_rhsa, NULL);
1948 if (ret)
1949 goto release_drhd;
1951 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1952 &dmar_parse_one_atsr, NULL);
1953 if (ret)
1954 goto release_atsr;
1956 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1957 &dmar_hp_add_drhd, NULL);
1958 if (!ret)
1959 return 0;
1961 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1962 &dmar_hp_remove_drhd, NULL);
1963 release_atsr:
1964 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1965 &dmar_release_one_atsr, NULL);
1966 release_drhd:
1967 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1968 &dmar_hp_release_drhd, NULL);
1969 out:
1970 return ret;
1973 static int dmar_hotplug_remove(acpi_handle handle)
1975 int ret;
1977 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1978 &dmar_check_one_atsr, NULL);
1979 if (ret)
1980 return ret;
1982 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1983 &dmar_hp_remove_drhd, NULL);
1984 if (ret == 0) {
1985 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1986 &dmar_release_one_atsr, NULL));
1987 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1988 &dmar_hp_release_drhd, NULL));
1989 } else {
1990 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1991 &dmar_hp_add_drhd, NULL);
1994 return ret;
1997 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1998 void *context, void **retval)
2000 acpi_handle *phdl = retval;
2002 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2003 *phdl = handle;
2004 return AE_CTRL_TERMINATE;
2007 return AE_OK;
2010 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2012 int ret;
2013 acpi_handle tmp = NULL;
2014 acpi_status status;
2016 if (!dmar_in_use())
2017 return 0;
2019 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2020 tmp = handle;
2021 } else {
2022 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2023 ACPI_UINT32_MAX,
2024 dmar_get_dsm_handle,
2025 NULL, NULL, &tmp);
2026 if (ACPI_FAILURE(status)) {
2027 pr_warn("Failed to locate _DSM method.\n");
2028 return -ENXIO;
2031 if (tmp == NULL)
2032 return 0;
2034 down_write(&dmar_global_lock);
2035 if (insert)
2036 ret = dmar_hotplug_insert(tmp);
2037 else
2038 ret = dmar_hotplug_remove(tmp);
2039 up_write(&dmar_global_lock);
2041 return ret;
2044 int dmar_device_add(acpi_handle handle)
2046 return dmar_device_hotplug(handle, true);
2049 int dmar_device_remove(acpi_handle handle)
2051 return dmar_device_hotplug(handle, false);