perf/x86/intel/pt: Allow the disabling of branch tracing
[linux/fpc-iii.git] / drivers / iommu / dmar.c
blob36e3f430d2651af655480ae323dff48d919c5ecc
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48 struct dmar_res_callback {
49 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
50 void *arg[ACPI_DMAR_TYPE_RESERVED];
51 bool ignore_unhandled;
52 bool print_entry;
56 * Assumptions:
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock);
68 LIST_HEAD(dmar_drhd_units);
70 struct acpi_table_header * __initdata dmar_tbl;
71 static int dmar_dev_scope_status = 1;
72 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
74 static int alloc_iommu(struct dmar_drhd_unit *drhd);
75 static void free_iommu(struct intel_iommu *iommu);
77 extern const struct iommu_ops intel_iommu_ops;
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
83 * the very end.
85 if (drhd->include_all)
86 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
87 else
88 list_add_rcu(&drhd->list, &dmar_drhd_units);
91 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
93 struct acpi_dmar_device_scope *scope;
95 *cnt = 0;
96 while (start < end) {
97 scope = start;
98 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
99 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
101 (*cnt)++;
102 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
104 pr_warn("Unsupported device scope\n");
106 start += scope->length;
108 if (*cnt == 0)
109 return NULL;
111 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
114 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
116 int i;
117 struct device *tmp_dev;
119 if (*devices && *cnt) {
120 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
121 put_device(tmp_dev);
122 kfree(*devices);
125 *devices = NULL;
126 *cnt = 0;
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf[64];
132 static struct dmar_pci_notify_info *
133 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
135 int level = 0;
136 size_t size;
137 struct pci_dev *tmp;
138 struct dmar_pci_notify_info *info;
140 BUG_ON(dev->is_virtfn);
142 /* Only generate path[] for device addition event */
143 if (event == BUS_NOTIFY_ADD_DEVICE)
144 for (tmp = dev; tmp; tmp = tmp->bus->self)
145 level++;
147 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
148 if (size <= sizeof(dmar_pci_notify_info_buf)) {
149 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
150 } else {
151 info = kzalloc(size, GFP_KERNEL);
152 if (!info) {
153 pr_warn("Out of memory when allocating notify_info "
154 "for %s.\n", pci_name(dev));
155 if (dmar_dev_scope_status == 0)
156 dmar_dev_scope_status = -ENOMEM;
157 return NULL;
161 info->event = event;
162 info->dev = dev;
163 info->seg = pci_domain_nr(dev->bus);
164 info->level = level;
165 if (event == BUS_NOTIFY_ADD_DEVICE) {
166 for (tmp = dev; tmp; tmp = tmp->bus->self) {
167 level--;
168 info->path[level].bus = tmp->bus->number;
169 info->path[level].device = PCI_SLOT(tmp->devfn);
170 info->path[level].function = PCI_FUNC(tmp->devfn);
171 if (pci_is_root_bus(tmp->bus))
172 info->bus = tmp->bus->number;
176 return info;
179 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
181 if ((void *)info != dmar_pci_notify_info_buf)
182 kfree(info);
185 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
186 struct acpi_dmar_pci_path *path, int count)
188 int i;
190 if (info->bus != bus)
191 goto fallback;
192 if (info->level != count)
193 goto fallback;
195 for (i = 0; i < count; i++) {
196 if (path[i].device != info->path[i].device ||
197 path[i].function != info->path[i].function)
198 goto fallback;
201 return true;
203 fallback:
205 if (count != 1)
206 return false;
208 i = info->level - 1;
209 if (bus == info->path[i].bus &&
210 path[0].device == info->path[i].device &&
211 path[0].function == info->path[i].function) {
212 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
213 bus, path[0].device, path[0].function);
214 return true;
217 return false;
220 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
221 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
222 void *start, void*end, u16 segment,
223 struct dmar_dev_scope *devices,
224 int devices_cnt)
226 int i, level;
227 struct device *tmp, *dev = &info->dev->dev;
228 struct acpi_dmar_device_scope *scope;
229 struct acpi_dmar_pci_path *path;
231 if (segment != info->seg)
232 return 0;
234 for (; start < end; start += scope->length) {
235 scope = start;
236 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
237 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
238 continue;
240 path = (struct acpi_dmar_pci_path *)(scope + 1);
241 level = (scope->length - sizeof(*scope)) / sizeof(*path);
242 if (!dmar_match_pci_path(info, scope->bus, path, level))
243 continue;
246 * We expect devices with endpoint scope to have normal PCI
247 * headers, and devices with bridge scope to have bridge PCI
248 * headers. However PCI NTB devices may be listed in the
249 * DMAR table with bridge scope, even though they have a
250 * normal PCI header. NTB devices are identified by class
251 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
252 * for this special case.
254 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
255 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
256 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
257 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
258 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
259 pr_warn("Device scope type does not match for %s\n",
260 pci_name(info->dev));
261 return -EINVAL;
264 for_each_dev_scope(devices, devices_cnt, i, tmp)
265 if (tmp == NULL) {
266 devices[i].bus = info->dev->bus->number;
267 devices[i].devfn = info->dev->devfn;
268 rcu_assign_pointer(devices[i].dev,
269 get_device(dev));
270 return 1;
272 BUG_ON(i >= devices_cnt);
275 return 0;
278 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
279 struct dmar_dev_scope *devices, int count)
281 int index;
282 struct device *tmp;
284 if (info->seg != segment)
285 return 0;
287 for_each_active_dev_scope(devices, count, index, tmp)
288 if (tmp == &info->dev->dev) {
289 RCU_INIT_POINTER(devices[index].dev, NULL);
290 synchronize_rcu();
291 put_device(tmp);
292 return 1;
295 return 0;
298 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
300 int ret = 0;
301 struct dmar_drhd_unit *dmaru;
302 struct acpi_dmar_hardware_unit *drhd;
304 for_each_drhd_unit(dmaru) {
305 if (dmaru->include_all)
306 continue;
308 drhd = container_of(dmaru->hdr,
309 struct acpi_dmar_hardware_unit, header);
310 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
311 ((void *)drhd) + drhd->header.length,
312 dmaru->segment,
313 dmaru->devices, dmaru->devices_cnt);
314 if (ret != 0)
315 break;
317 if (ret >= 0)
318 ret = dmar_iommu_notify_scope_dev(info);
319 if (ret < 0 && dmar_dev_scope_status == 0)
320 dmar_dev_scope_status = ret;
322 return ret;
325 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
327 struct dmar_drhd_unit *dmaru;
329 for_each_drhd_unit(dmaru)
330 if (dmar_remove_dev_scope(info, dmaru->segment,
331 dmaru->devices, dmaru->devices_cnt))
332 break;
333 dmar_iommu_notify_scope_dev(info);
336 static int dmar_pci_bus_notifier(struct notifier_block *nb,
337 unsigned long action, void *data)
339 struct pci_dev *pdev = to_pci_dev(data);
340 struct dmar_pci_notify_info *info;
342 /* Only care about add/remove events for physical functions.
343 * For VFs we actually do the lookup based on the corresponding
344 * PF in device_to_iommu() anyway. */
345 if (pdev->is_virtfn)
346 return NOTIFY_DONE;
347 if (action != BUS_NOTIFY_ADD_DEVICE &&
348 action != BUS_NOTIFY_REMOVED_DEVICE)
349 return NOTIFY_DONE;
351 info = dmar_alloc_pci_notify_info(pdev, action);
352 if (!info)
353 return NOTIFY_DONE;
355 down_write(&dmar_global_lock);
356 if (action == BUS_NOTIFY_ADD_DEVICE)
357 dmar_pci_bus_add_dev(info);
358 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
359 dmar_pci_bus_del_dev(info);
360 up_write(&dmar_global_lock);
362 dmar_free_pci_notify_info(info);
364 return NOTIFY_OK;
367 static struct notifier_block dmar_pci_bus_nb = {
368 .notifier_call = dmar_pci_bus_notifier,
369 .priority = INT_MIN,
372 static struct dmar_drhd_unit *
373 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
375 struct dmar_drhd_unit *dmaru;
377 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
378 if (dmaru->segment == drhd->segment &&
379 dmaru->reg_base_addr == drhd->address)
380 return dmaru;
382 return NULL;
386 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
387 * structure which uniquely represent one DMA remapping hardware unit
388 * present in the platform
390 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
392 struct acpi_dmar_hardware_unit *drhd;
393 struct dmar_drhd_unit *dmaru;
394 int ret = 0;
396 drhd = (struct acpi_dmar_hardware_unit *)header;
397 dmaru = dmar_find_dmaru(drhd);
398 if (dmaru)
399 goto out;
401 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
402 if (!dmaru)
403 return -ENOMEM;
406 * If header is allocated from slab by ACPI _DSM method, we need to
407 * copy the content because the memory buffer will be freed on return.
409 dmaru->hdr = (void *)(dmaru + 1);
410 memcpy(dmaru->hdr, header, header->length);
411 dmaru->reg_base_addr = drhd->address;
412 dmaru->segment = drhd->segment;
413 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
414 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
415 ((void *)drhd) + drhd->header.length,
416 &dmaru->devices_cnt);
417 if (dmaru->devices_cnt && dmaru->devices == NULL) {
418 kfree(dmaru);
419 return -ENOMEM;
422 ret = alloc_iommu(dmaru);
423 if (ret) {
424 dmar_free_dev_scope(&dmaru->devices,
425 &dmaru->devices_cnt);
426 kfree(dmaru);
427 return ret;
429 dmar_register_drhd_unit(dmaru);
431 out:
432 if (arg)
433 (*(int *)arg)++;
435 return 0;
438 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
440 if (dmaru->devices && dmaru->devices_cnt)
441 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
442 if (dmaru->iommu)
443 free_iommu(dmaru->iommu);
444 kfree(dmaru);
447 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
448 void *arg)
450 struct acpi_dmar_andd *andd = (void *)header;
452 /* Check for NUL termination within the designated length */
453 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
454 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
455 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
456 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
457 dmi_get_system_info(DMI_BIOS_VENDOR),
458 dmi_get_system_info(DMI_BIOS_VERSION),
459 dmi_get_system_info(DMI_PRODUCT_VERSION));
460 return -EINVAL;
462 pr_info("ANDD device: %x name: %s\n", andd->device_number,
463 andd->device_name);
465 return 0;
468 #ifdef CONFIG_ACPI_NUMA
469 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
471 struct acpi_dmar_rhsa *rhsa;
472 struct dmar_drhd_unit *drhd;
474 rhsa = (struct acpi_dmar_rhsa *)header;
475 for_each_drhd_unit(drhd) {
476 if (drhd->reg_base_addr == rhsa->base_address) {
477 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
479 if (!node_online(node))
480 node = -1;
481 drhd->iommu->node = node;
482 return 0;
485 WARN_TAINT(
486 1, TAINT_FIRMWARE_WORKAROUND,
487 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
488 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
489 drhd->reg_base_addr,
490 dmi_get_system_info(DMI_BIOS_VENDOR),
491 dmi_get_system_info(DMI_BIOS_VERSION),
492 dmi_get_system_info(DMI_PRODUCT_VERSION));
494 return 0;
496 #else
497 #define dmar_parse_one_rhsa dmar_res_noop
498 #endif
500 static void __init
501 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
503 struct acpi_dmar_hardware_unit *drhd;
504 struct acpi_dmar_reserved_memory *rmrr;
505 struct acpi_dmar_atsr *atsr;
506 struct acpi_dmar_rhsa *rhsa;
508 switch (header->type) {
509 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
510 drhd = container_of(header, struct acpi_dmar_hardware_unit,
511 header);
512 pr_info("DRHD base: %#016Lx flags: %#x\n",
513 (unsigned long long)drhd->address, drhd->flags);
514 break;
515 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
516 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
517 header);
518 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
519 (unsigned long long)rmrr->base_address,
520 (unsigned long long)rmrr->end_address);
521 break;
522 case ACPI_DMAR_TYPE_ROOT_ATS:
523 atsr = container_of(header, struct acpi_dmar_atsr, header);
524 pr_info("ATSR flags: %#x\n", atsr->flags);
525 break;
526 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
527 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
528 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
529 (unsigned long long)rhsa->base_address,
530 rhsa->proximity_domain);
531 break;
532 case ACPI_DMAR_TYPE_NAMESPACE:
533 /* We don't print this here because we need to sanity-check
534 it first. So print it in dmar_parse_one_andd() instead. */
535 break;
540 * dmar_table_detect - checks to see if the platform supports DMAR devices
542 static int __init dmar_table_detect(void)
544 acpi_status status = AE_OK;
546 /* if we could find DMAR table, then there are DMAR devices */
547 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
549 if (ACPI_SUCCESS(status) && !dmar_tbl) {
550 pr_warn("Unable to map DMAR\n");
551 status = AE_NOT_FOUND;
554 return (ACPI_SUCCESS(status) ? 1 : 0);
557 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
558 size_t len, struct dmar_res_callback *cb)
560 int ret = 0;
561 struct acpi_dmar_header *iter, *next;
562 struct acpi_dmar_header *end = ((void *)start) + len;
564 for (iter = start; iter < end && ret == 0; iter = next) {
565 next = (void *)iter + iter->length;
566 if (iter->length == 0) {
567 /* Avoid looping forever on bad ACPI tables */
568 pr_debug(FW_BUG "Invalid 0-length structure\n");
569 break;
570 } else if (next > end) {
571 /* Avoid passing table end */
572 pr_warn(FW_BUG "Record passes table end\n");
573 ret = -EINVAL;
574 break;
577 if (cb->print_entry)
578 dmar_table_print_dmar_entry(iter);
580 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
581 /* continue for forward compatibility */
582 pr_debug("Unknown DMAR structure type %d\n",
583 iter->type);
584 } else if (cb->cb[iter->type]) {
585 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
586 } else if (!cb->ignore_unhandled) {
587 pr_warn("No handler for DMAR structure type %d\n",
588 iter->type);
589 ret = -EINVAL;
593 return ret;
596 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
597 struct dmar_res_callback *cb)
599 return dmar_walk_remapping_entries((void *)(dmar + 1),
600 dmar->header.length - sizeof(*dmar), cb);
604 * parse_dmar_table - parses the DMA reporting table
606 static int __init
607 parse_dmar_table(void)
609 struct acpi_table_dmar *dmar;
610 int ret = 0;
611 int drhd_count = 0;
612 struct dmar_res_callback cb = {
613 .print_entry = true,
614 .ignore_unhandled = true,
615 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
616 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
617 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
618 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
619 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
620 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
624 * Do it again, earlier dmar_tbl mapping could be mapped with
625 * fixed map.
627 dmar_table_detect();
630 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
631 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
633 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
635 dmar = (struct acpi_table_dmar *)dmar_tbl;
636 if (!dmar)
637 return -ENODEV;
639 if (dmar->width < PAGE_SHIFT - 1) {
640 pr_warn("Invalid DMAR haw\n");
641 return -EINVAL;
644 pr_info("Host address width %d\n", dmar->width + 1);
645 ret = dmar_walk_dmar_table(dmar, &cb);
646 if (ret == 0 && drhd_count == 0)
647 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
649 return ret;
652 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
653 int cnt, struct pci_dev *dev)
655 int index;
656 struct device *tmp;
658 while (dev) {
659 for_each_active_dev_scope(devices, cnt, index, tmp)
660 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
661 return 1;
663 /* Check our parent */
664 dev = dev->bus->self;
667 return 0;
670 struct dmar_drhd_unit *
671 dmar_find_matched_drhd_unit(struct pci_dev *dev)
673 struct dmar_drhd_unit *dmaru;
674 struct acpi_dmar_hardware_unit *drhd;
676 dev = pci_physfn(dev);
678 rcu_read_lock();
679 for_each_drhd_unit(dmaru) {
680 drhd = container_of(dmaru->hdr,
681 struct acpi_dmar_hardware_unit,
682 header);
684 if (dmaru->include_all &&
685 drhd->segment == pci_domain_nr(dev->bus))
686 goto out;
688 if (dmar_pci_device_match(dmaru->devices,
689 dmaru->devices_cnt, dev))
690 goto out;
692 dmaru = NULL;
693 out:
694 rcu_read_unlock();
696 return dmaru;
699 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
700 struct acpi_device *adev)
702 struct dmar_drhd_unit *dmaru;
703 struct acpi_dmar_hardware_unit *drhd;
704 struct acpi_dmar_device_scope *scope;
705 struct device *tmp;
706 int i;
707 struct acpi_dmar_pci_path *path;
709 for_each_drhd_unit(dmaru) {
710 drhd = container_of(dmaru->hdr,
711 struct acpi_dmar_hardware_unit,
712 header);
714 for (scope = (void *)(drhd + 1);
715 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
716 scope = ((void *)scope) + scope->length) {
717 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
718 continue;
719 if (scope->enumeration_id != device_number)
720 continue;
722 path = (void *)(scope + 1);
723 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
724 dev_name(&adev->dev), dmaru->reg_base_addr,
725 scope->bus, path->device, path->function);
726 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
727 if (tmp == NULL) {
728 dmaru->devices[i].bus = scope->bus;
729 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
730 path->function);
731 rcu_assign_pointer(dmaru->devices[i].dev,
732 get_device(&adev->dev));
733 return;
735 BUG_ON(i >= dmaru->devices_cnt);
738 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
739 device_number, dev_name(&adev->dev));
742 static int __init dmar_acpi_dev_scope_init(void)
744 struct acpi_dmar_andd *andd;
746 if (dmar_tbl == NULL)
747 return -ENODEV;
749 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
750 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
751 andd = ((void *)andd) + andd->header.length) {
752 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
753 acpi_handle h;
754 struct acpi_device *adev;
756 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
757 andd->device_name,
758 &h))) {
759 pr_err("Failed to find handle for ACPI object %s\n",
760 andd->device_name);
761 continue;
763 if (acpi_bus_get_device(h, &adev)) {
764 pr_err("Failed to get device for ACPI object %s\n",
765 andd->device_name);
766 continue;
768 dmar_acpi_insert_dev_scope(andd->device_number, adev);
771 return 0;
774 int __init dmar_dev_scope_init(void)
776 struct pci_dev *dev = NULL;
777 struct dmar_pci_notify_info *info;
779 if (dmar_dev_scope_status != 1)
780 return dmar_dev_scope_status;
782 if (list_empty(&dmar_drhd_units)) {
783 dmar_dev_scope_status = -ENODEV;
784 } else {
785 dmar_dev_scope_status = 0;
787 dmar_acpi_dev_scope_init();
789 for_each_pci_dev(dev) {
790 if (dev->is_virtfn)
791 continue;
793 info = dmar_alloc_pci_notify_info(dev,
794 BUS_NOTIFY_ADD_DEVICE);
795 if (!info) {
796 return dmar_dev_scope_status;
797 } else {
798 dmar_pci_bus_add_dev(info);
799 dmar_free_pci_notify_info(info);
803 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
806 return dmar_dev_scope_status;
810 int __init dmar_table_init(void)
812 static int dmar_table_initialized;
813 int ret;
815 if (dmar_table_initialized == 0) {
816 ret = parse_dmar_table();
817 if (ret < 0) {
818 if (ret != -ENODEV)
819 pr_info("Parse DMAR table failure.\n");
820 } else if (list_empty(&dmar_drhd_units)) {
821 pr_info("No DMAR devices found\n");
822 ret = -ENODEV;
825 if (ret < 0)
826 dmar_table_initialized = ret;
827 else
828 dmar_table_initialized = 1;
831 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
834 static void warn_invalid_dmar(u64 addr, const char *message)
836 WARN_TAINT_ONCE(
837 1, TAINT_FIRMWARE_WORKAROUND,
838 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
839 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
840 addr, message,
841 dmi_get_system_info(DMI_BIOS_VENDOR),
842 dmi_get_system_info(DMI_BIOS_VERSION),
843 dmi_get_system_info(DMI_PRODUCT_VERSION));
846 static int __ref
847 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
849 struct acpi_dmar_hardware_unit *drhd;
850 void __iomem *addr;
851 u64 cap, ecap;
853 drhd = (void *)entry;
854 if (!drhd->address) {
855 warn_invalid_dmar(0, "");
856 return -EINVAL;
859 if (arg)
860 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
861 else
862 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
863 if (!addr) {
864 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
865 return -EINVAL;
868 cap = dmar_readq(addr + DMAR_CAP_REG);
869 ecap = dmar_readq(addr + DMAR_ECAP_REG);
871 if (arg)
872 iounmap(addr);
873 else
874 early_iounmap(addr, VTD_PAGE_SIZE);
876 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
877 warn_invalid_dmar(drhd->address, " returns all ones");
878 return -EINVAL;
881 return 0;
884 int __init detect_intel_iommu(void)
886 int ret;
887 struct dmar_res_callback validate_drhd_cb = {
888 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
889 .ignore_unhandled = true,
892 down_write(&dmar_global_lock);
893 ret = dmar_table_detect();
894 if (ret)
895 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
896 &validate_drhd_cb);
897 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
898 iommu_detected = 1;
899 /* Make sure ACS will be enabled */
900 pci_request_acs();
903 #ifdef CONFIG_X86
904 if (ret)
905 x86_init.iommu.iommu_init = intel_iommu_init;
906 #endif
908 if (dmar_tbl) {
909 acpi_put_table(dmar_tbl);
910 dmar_tbl = NULL;
912 up_write(&dmar_global_lock);
914 return ret ? 1 : -ENODEV;
918 static void unmap_iommu(struct intel_iommu *iommu)
920 iounmap(iommu->reg);
921 release_mem_region(iommu->reg_phys, iommu->reg_size);
925 * map_iommu: map the iommu's registers
926 * @iommu: the iommu to map
927 * @phys_addr: the physical address of the base resgister
929 * Memory map the iommu's registers. Start w/ a single page, and
930 * possibly expand if that turns out to be insufficent.
932 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
934 int map_size, err=0;
936 iommu->reg_phys = phys_addr;
937 iommu->reg_size = VTD_PAGE_SIZE;
939 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
940 pr_err("Can't reserve memory\n");
941 err = -EBUSY;
942 goto out;
945 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
946 if (!iommu->reg) {
947 pr_err("Can't map the region\n");
948 err = -ENOMEM;
949 goto release;
952 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
953 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
955 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
956 err = -EINVAL;
957 warn_invalid_dmar(phys_addr, " returns all ones");
958 goto unmap;
961 /* the registers might be more than one page */
962 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
963 cap_max_fault_reg_offset(iommu->cap));
964 map_size = VTD_PAGE_ALIGN(map_size);
965 if (map_size > iommu->reg_size) {
966 iounmap(iommu->reg);
967 release_mem_region(iommu->reg_phys, iommu->reg_size);
968 iommu->reg_size = map_size;
969 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
970 iommu->name)) {
971 pr_err("Can't reserve memory\n");
972 err = -EBUSY;
973 goto out;
975 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
976 if (!iommu->reg) {
977 pr_err("Can't map the region\n");
978 err = -ENOMEM;
979 goto release;
982 err = 0;
983 goto out;
985 unmap:
986 iounmap(iommu->reg);
987 release:
988 release_mem_region(iommu->reg_phys, iommu->reg_size);
989 out:
990 return err;
993 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
995 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
996 DMAR_UNITS_SUPPORTED);
997 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
998 iommu->seq_id = -1;
999 } else {
1000 set_bit(iommu->seq_id, dmar_seq_ids);
1001 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1004 return iommu->seq_id;
1007 static void dmar_free_seq_id(struct intel_iommu *iommu)
1009 if (iommu->seq_id >= 0) {
1010 clear_bit(iommu->seq_id, dmar_seq_ids);
1011 iommu->seq_id = -1;
1015 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1017 struct intel_iommu *iommu;
1018 u32 ver, sts;
1019 int agaw = 0;
1020 int msagaw = 0;
1021 int err;
1023 if (!drhd->reg_base_addr) {
1024 warn_invalid_dmar(0, "");
1025 return -EINVAL;
1028 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1029 if (!iommu)
1030 return -ENOMEM;
1032 if (dmar_alloc_seq_id(iommu) < 0) {
1033 pr_err("Failed to allocate seq_id\n");
1034 err = -ENOSPC;
1035 goto error;
1038 err = map_iommu(iommu, drhd->reg_base_addr);
1039 if (err) {
1040 pr_err("Failed to map %s\n", iommu->name);
1041 goto error_free_seq_id;
1044 err = -EINVAL;
1045 agaw = iommu_calculate_agaw(iommu);
1046 if (agaw < 0) {
1047 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1048 iommu->seq_id);
1049 goto err_unmap;
1051 msagaw = iommu_calculate_max_sagaw(iommu);
1052 if (msagaw < 0) {
1053 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1054 iommu->seq_id);
1055 goto err_unmap;
1057 iommu->agaw = agaw;
1058 iommu->msagaw = msagaw;
1059 iommu->segment = drhd->segment;
1061 iommu->node = -1;
1063 ver = readl(iommu->reg + DMAR_VER_REG);
1064 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1065 iommu->name,
1066 (unsigned long long)drhd->reg_base_addr,
1067 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1068 (unsigned long long)iommu->cap,
1069 (unsigned long long)iommu->ecap);
1071 /* Reflect status in gcmd */
1072 sts = readl(iommu->reg + DMAR_GSTS_REG);
1073 if (sts & DMA_GSTS_IRES)
1074 iommu->gcmd |= DMA_GCMD_IRE;
1075 if (sts & DMA_GSTS_TES)
1076 iommu->gcmd |= DMA_GCMD_TE;
1077 if (sts & DMA_GSTS_QIES)
1078 iommu->gcmd |= DMA_GCMD_QIE;
1080 raw_spin_lock_init(&iommu->register_lock);
1082 if (intel_iommu_enabled) {
1083 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1084 intel_iommu_groups,
1085 "%s", iommu->name);
1086 if (err)
1087 goto err_unmap;
1089 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1091 err = iommu_device_register(&iommu->iommu);
1092 if (err)
1093 goto err_unmap;
1096 drhd->iommu = iommu;
1098 return 0;
1100 err_unmap:
1101 unmap_iommu(iommu);
1102 error_free_seq_id:
1103 dmar_free_seq_id(iommu);
1104 error:
1105 kfree(iommu);
1106 return err;
1109 static void free_iommu(struct intel_iommu *iommu)
1111 if (intel_iommu_enabled) {
1112 iommu_device_unregister(&iommu->iommu);
1113 iommu_device_sysfs_remove(&iommu->iommu);
1116 if (iommu->irq) {
1117 if (iommu->pr_irq) {
1118 free_irq(iommu->pr_irq, iommu);
1119 dmar_free_hwirq(iommu->pr_irq);
1120 iommu->pr_irq = 0;
1122 free_irq(iommu->irq, iommu);
1123 dmar_free_hwirq(iommu->irq);
1124 iommu->irq = 0;
1127 if (iommu->qi) {
1128 free_page((unsigned long)iommu->qi->desc);
1129 kfree(iommu->qi->desc_status);
1130 kfree(iommu->qi);
1133 if (iommu->reg)
1134 unmap_iommu(iommu);
1136 dmar_free_seq_id(iommu);
1137 kfree(iommu);
1141 * Reclaim all the submitted descriptors which have completed its work.
1143 static inline void reclaim_free_desc(struct q_inval *qi)
1145 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1146 qi->desc_status[qi->free_tail] == QI_ABORT) {
1147 qi->desc_status[qi->free_tail] = QI_FREE;
1148 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1149 qi->free_cnt++;
1153 static int qi_check_fault(struct intel_iommu *iommu, int index)
1155 u32 fault;
1156 int head, tail;
1157 struct q_inval *qi = iommu->qi;
1158 int wait_index = (index + 1) % QI_LENGTH;
1160 if (qi->desc_status[wait_index] == QI_ABORT)
1161 return -EAGAIN;
1163 fault = readl(iommu->reg + DMAR_FSTS_REG);
1166 * If IQE happens, the head points to the descriptor associated
1167 * with the error. No new descriptors are fetched until the IQE
1168 * is cleared.
1170 if (fault & DMA_FSTS_IQE) {
1171 head = readl(iommu->reg + DMAR_IQH_REG);
1172 if ((head >> DMAR_IQ_SHIFT) == index) {
1173 pr_err("VT-d detected invalid descriptor: "
1174 "low=%llx, high=%llx\n",
1175 (unsigned long long)qi->desc[index].low,
1176 (unsigned long long)qi->desc[index].high);
1177 memcpy(&qi->desc[index], &qi->desc[wait_index],
1178 sizeof(struct qi_desc));
1179 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1180 return -EINVAL;
1185 * If ITE happens, all pending wait_desc commands are aborted.
1186 * No new descriptors are fetched until the ITE is cleared.
1188 if (fault & DMA_FSTS_ITE) {
1189 head = readl(iommu->reg + DMAR_IQH_REG);
1190 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1191 head |= 1;
1192 tail = readl(iommu->reg + DMAR_IQT_REG);
1193 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1195 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1197 do {
1198 if (qi->desc_status[head] == QI_IN_USE)
1199 qi->desc_status[head] = QI_ABORT;
1200 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1201 } while (head != tail);
1203 if (qi->desc_status[wait_index] == QI_ABORT)
1204 return -EAGAIN;
1207 if (fault & DMA_FSTS_ICE)
1208 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1210 return 0;
1214 * Submit the queued invalidation descriptor to the remapping
1215 * hardware unit and wait for its completion.
1217 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1219 int rc;
1220 struct q_inval *qi = iommu->qi;
1221 struct qi_desc *hw, wait_desc;
1222 int wait_index, index;
1223 unsigned long flags;
1225 if (!qi)
1226 return 0;
1228 hw = qi->desc;
1230 restart:
1231 rc = 0;
1233 raw_spin_lock_irqsave(&qi->q_lock, flags);
1234 while (qi->free_cnt < 3) {
1235 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1236 cpu_relax();
1237 raw_spin_lock_irqsave(&qi->q_lock, flags);
1240 index = qi->free_head;
1241 wait_index = (index + 1) % QI_LENGTH;
1243 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1245 hw[index] = *desc;
1247 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1248 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1249 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1251 hw[wait_index] = wait_desc;
1253 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1254 qi->free_cnt -= 2;
1257 * update the HW tail register indicating the presence of
1258 * new descriptors.
1260 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1262 while (qi->desc_status[wait_index] != QI_DONE) {
1264 * We will leave the interrupts disabled, to prevent interrupt
1265 * context to queue another cmd while a cmd is already submitted
1266 * and waiting for completion on this cpu. This is to avoid
1267 * a deadlock where the interrupt context can wait indefinitely
1268 * for free slots in the queue.
1270 rc = qi_check_fault(iommu, index);
1271 if (rc)
1272 break;
1274 raw_spin_unlock(&qi->q_lock);
1275 cpu_relax();
1276 raw_spin_lock(&qi->q_lock);
1279 qi->desc_status[index] = QI_DONE;
1281 reclaim_free_desc(qi);
1282 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1284 if (rc == -EAGAIN)
1285 goto restart;
1287 return rc;
1291 * Flush the global interrupt entry cache.
1293 void qi_global_iec(struct intel_iommu *iommu)
1295 struct qi_desc desc;
1297 desc.low = QI_IEC_TYPE;
1298 desc.high = 0;
1300 /* should never fail */
1301 qi_submit_sync(&desc, iommu);
1304 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1305 u64 type)
1307 struct qi_desc desc;
1309 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1310 | QI_CC_GRAN(type) | QI_CC_TYPE;
1311 desc.high = 0;
1313 qi_submit_sync(&desc, iommu);
1316 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1317 unsigned int size_order, u64 type)
1319 u8 dw = 0, dr = 0;
1321 struct qi_desc desc;
1322 int ih = 0;
1324 if (cap_write_drain(iommu->cap))
1325 dw = 1;
1327 if (cap_read_drain(iommu->cap))
1328 dr = 1;
1330 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1331 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1332 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1333 | QI_IOTLB_AM(size_order);
1335 qi_submit_sync(&desc, iommu);
1338 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1339 u64 addr, unsigned mask)
1341 struct qi_desc desc;
1343 if (mask) {
1344 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1345 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1346 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1347 } else
1348 desc.high = QI_DEV_IOTLB_ADDR(addr);
1350 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1351 qdep = 0;
1353 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1354 QI_DIOTLB_TYPE;
1356 qi_submit_sync(&desc, iommu);
1360 * Disable Queued Invalidation interface.
1362 void dmar_disable_qi(struct intel_iommu *iommu)
1364 unsigned long flags;
1365 u32 sts;
1366 cycles_t start_time = get_cycles();
1368 if (!ecap_qis(iommu->ecap))
1369 return;
1371 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1373 sts = readl(iommu->reg + DMAR_GSTS_REG);
1374 if (!(sts & DMA_GSTS_QIES))
1375 goto end;
1378 * Give a chance to HW to complete the pending invalidation requests.
1380 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1381 readl(iommu->reg + DMAR_IQH_REG)) &&
1382 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1383 cpu_relax();
1385 iommu->gcmd &= ~DMA_GCMD_QIE;
1386 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1388 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1389 !(sts & DMA_GSTS_QIES), sts);
1390 end:
1391 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1395 * Enable queued invalidation.
1397 static void __dmar_enable_qi(struct intel_iommu *iommu)
1399 u32 sts;
1400 unsigned long flags;
1401 struct q_inval *qi = iommu->qi;
1403 qi->free_head = qi->free_tail = 0;
1404 qi->free_cnt = QI_LENGTH;
1406 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1408 /* write zero to the tail reg */
1409 writel(0, iommu->reg + DMAR_IQT_REG);
1411 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1413 iommu->gcmd |= DMA_GCMD_QIE;
1414 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1416 /* Make sure hardware complete it */
1417 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1419 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1423 * Enable Queued Invalidation interface. This is a must to support
1424 * interrupt-remapping. Also used by DMA-remapping, which replaces
1425 * register based IOTLB invalidation.
1427 int dmar_enable_qi(struct intel_iommu *iommu)
1429 struct q_inval *qi;
1430 struct page *desc_page;
1432 if (!ecap_qis(iommu->ecap))
1433 return -ENOENT;
1436 * queued invalidation is already setup and enabled.
1438 if (iommu->qi)
1439 return 0;
1441 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1442 if (!iommu->qi)
1443 return -ENOMEM;
1445 qi = iommu->qi;
1448 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1449 if (!desc_page) {
1450 kfree(qi);
1451 iommu->qi = NULL;
1452 return -ENOMEM;
1455 qi->desc = page_address(desc_page);
1457 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1458 if (!qi->desc_status) {
1459 free_page((unsigned long) qi->desc);
1460 kfree(qi);
1461 iommu->qi = NULL;
1462 return -ENOMEM;
1465 raw_spin_lock_init(&qi->q_lock);
1467 __dmar_enable_qi(iommu);
1469 return 0;
1472 /* iommu interrupt handling. Most stuff are MSI-like. */
1474 enum faulttype {
1475 DMA_REMAP,
1476 INTR_REMAP,
1477 UNKNOWN,
1480 static const char *dma_remap_fault_reasons[] =
1482 "Software",
1483 "Present bit in root entry is clear",
1484 "Present bit in context entry is clear",
1485 "Invalid context entry",
1486 "Access beyond MGAW",
1487 "PTE Write access is not set",
1488 "PTE Read access is not set",
1489 "Next page table ptr is invalid",
1490 "Root table address invalid",
1491 "Context table ptr is invalid",
1492 "non-zero reserved fields in RTP",
1493 "non-zero reserved fields in CTP",
1494 "non-zero reserved fields in PTE",
1495 "PCE for translation request specifies blocking",
1498 static const char *irq_remap_fault_reasons[] =
1500 "Detected reserved fields in the decoded interrupt-remapped request",
1501 "Interrupt index exceeded the interrupt-remapping table size",
1502 "Present field in the IRTE entry is clear",
1503 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1504 "Detected reserved fields in the IRTE entry",
1505 "Blocked a compatibility format interrupt request",
1506 "Blocked an interrupt request due to source-id verification failure",
1509 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1511 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1512 ARRAY_SIZE(irq_remap_fault_reasons))) {
1513 *fault_type = INTR_REMAP;
1514 return irq_remap_fault_reasons[fault_reason - 0x20];
1515 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1516 *fault_type = DMA_REMAP;
1517 return dma_remap_fault_reasons[fault_reason];
1518 } else {
1519 *fault_type = UNKNOWN;
1520 return "Unknown";
1525 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1527 if (iommu->irq == irq)
1528 return DMAR_FECTL_REG;
1529 else if (iommu->pr_irq == irq)
1530 return DMAR_PECTL_REG;
1531 else
1532 BUG();
1535 void dmar_msi_unmask(struct irq_data *data)
1537 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1538 int reg = dmar_msi_reg(iommu, data->irq);
1539 unsigned long flag;
1541 /* unmask it */
1542 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1543 writel(0, iommu->reg + reg);
1544 /* Read a reg to force flush the post write */
1545 readl(iommu->reg + reg);
1546 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1549 void dmar_msi_mask(struct irq_data *data)
1551 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1552 int reg = dmar_msi_reg(iommu, data->irq);
1553 unsigned long flag;
1555 /* mask it */
1556 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1557 writel(DMA_FECTL_IM, iommu->reg + reg);
1558 /* Read a reg to force flush the post write */
1559 readl(iommu->reg + reg);
1560 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1563 void dmar_msi_write(int irq, struct msi_msg *msg)
1565 struct intel_iommu *iommu = irq_get_handler_data(irq);
1566 int reg = dmar_msi_reg(iommu, irq);
1567 unsigned long flag;
1569 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1570 writel(msg->data, iommu->reg + reg + 4);
1571 writel(msg->address_lo, iommu->reg + reg + 8);
1572 writel(msg->address_hi, iommu->reg + reg + 12);
1573 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1576 void dmar_msi_read(int irq, struct msi_msg *msg)
1578 struct intel_iommu *iommu = irq_get_handler_data(irq);
1579 int reg = dmar_msi_reg(iommu, irq);
1580 unsigned long flag;
1582 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1583 msg->data = readl(iommu->reg + reg + 4);
1584 msg->address_lo = readl(iommu->reg + reg + 8);
1585 msg->address_hi = readl(iommu->reg + reg + 12);
1586 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1589 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1590 u8 fault_reason, u16 source_id, unsigned long long addr)
1592 const char *reason;
1593 int fault_type;
1595 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1597 if (fault_type == INTR_REMAP)
1598 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1599 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1600 PCI_FUNC(source_id & 0xFF), addr >> 48,
1601 fault_reason, reason);
1602 else
1603 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1604 type ? "DMA Read" : "DMA Write",
1605 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1606 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1607 return 0;
1610 #define PRIMARY_FAULT_REG_LEN (16)
1611 irqreturn_t dmar_fault(int irq, void *dev_id)
1613 struct intel_iommu *iommu = dev_id;
1614 int reg, fault_index;
1615 u32 fault_status;
1616 unsigned long flag;
1617 bool ratelimited;
1618 static DEFINE_RATELIMIT_STATE(rs,
1619 DEFAULT_RATELIMIT_INTERVAL,
1620 DEFAULT_RATELIMIT_BURST);
1622 /* Disable printing, simply clear the fault when ratelimited */
1623 ratelimited = !__ratelimit(&rs);
1625 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1626 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1627 if (fault_status && !ratelimited)
1628 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1630 /* TBD: ignore advanced fault log currently */
1631 if (!(fault_status & DMA_FSTS_PPF))
1632 goto unlock_exit;
1634 fault_index = dma_fsts_fault_record_index(fault_status);
1635 reg = cap_fault_reg_offset(iommu->cap);
1636 while (1) {
1637 u8 fault_reason;
1638 u16 source_id;
1639 u64 guest_addr;
1640 int type;
1641 u32 data;
1643 /* highest 32 bits */
1644 data = readl(iommu->reg + reg +
1645 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1646 if (!(data & DMA_FRCD_F))
1647 break;
1649 if (!ratelimited) {
1650 fault_reason = dma_frcd_fault_reason(data);
1651 type = dma_frcd_type(data);
1653 data = readl(iommu->reg + reg +
1654 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1655 source_id = dma_frcd_source_id(data);
1657 guest_addr = dmar_readq(iommu->reg + reg +
1658 fault_index * PRIMARY_FAULT_REG_LEN);
1659 guest_addr = dma_frcd_page_addr(guest_addr);
1662 /* clear the fault */
1663 writel(DMA_FRCD_F, iommu->reg + reg +
1664 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1666 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1668 if (!ratelimited)
1669 dmar_fault_do_one(iommu, type, fault_reason,
1670 source_id, guest_addr);
1672 fault_index++;
1673 if (fault_index >= cap_num_fault_regs(iommu->cap))
1674 fault_index = 0;
1675 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1678 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1680 unlock_exit:
1681 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1682 return IRQ_HANDLED;
1685 int dmar_set_interrupt(struct intel_iommu *iommu)
1687 int irq, ret;
1690 * Check if the fault interrupt is already initialized.
1692 if (iommu->irq)
1693 return 0;
1695 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1696 if (irq > 0) {
1697 iommu->irq = irq;
1698 } else {
1699 pr_err("No free IRQ vectors\n");
1700 return -EINVAL;
1703 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1704 if (ret)
1705 pr_err("Can't request irq\n");
1706 return ret;
1709 int __init enable_drhd_fault_handling(void)
1711 struct dmar_drhd_unit *drhd;
1712 struct intel_iommu *iommu;
1715 * Enable fault control interrupt.
1717 for_each_iommu(iommu, drhd) {
1718 u32 fault_status;
1719 int ret = dmar_set_interrupt(iommu);
1721 if (ret) {
1722 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1723 (unsigned long long)drhd->reg_base_addr, ret);
1724 return -1;
1728 * Clear any previous faults.
1730 dmar_fault(iommu->irq, iommu);
1731 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1732 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1735 return 0;
1739 * Re-enable Queued Invalidation interface.
1741 int dmar_reenable_qi(struct intel_iommu *iommu)
1743 if (!ecap_qis(iommu->ecap))
1744 return -ENOENT;
1746 if (!iommu->qi)
1747 return -ENOENT;
1750 * First disable queued invalidation.
1752 dmar_disable_qi(iommu);
1754 * Then enable queued invalidation again. Since there is no pending
1755 * invalidation requests now, it's safe to re-enable queued
1756 * invalidation.
1758 __dmar_enable_qi(iommu);
1760 return 0;
1764 * Check interrupt remapping support in DMAR table description.
1766 int __init dmar_ir_support(void)
1768 struct acpi_table_dmar *dmar;
1769 dmar = (struct acpi_table_dmar *)dmar_tbl;
1770 if (!dmar)
1771 return 0;
1772 return dmar->flags & 0x1;
1775 /* Check whether DMAR units are in use */
1776 static inline bool dmar_in_use(void)
1778 return irq_remapping_enabled || intel_iommu_enabled;
1781 static int __init dmar_free_unused_resources(void)
1783 struct dmar_drhd_unit *dmaru, *dmaru_n;
1785 if (dmar_in_use())
1786 return 0;
1788 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1789 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1791 down_write(&dmar_global_lock);
1792 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1793 list_del(&dmaru->list);
1794 dmar_free_drhd(dmaru);
1796 up_write(&dmar_global_lock);
1798 return 0;
1801 late_initcall(dmar_free_unused_resources);
1802 IOMMU_INIT_POST(detect_intel_iommu);
1805 * DMAR Hotplug Support
1806 * For more details, please refer to Intel(R) Virtualization Technology
1807 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1808 * "Remapping Hardware Unit Hot Plug".
1810 static u8 dmar_hp_uuid[] = {
1811 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1812 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1816 * Currently there's only one revision and BIOS will not check the revision id,
1817 * so use 0 for safety.
1819 #define DMAR_DSM_REV_ID 0
1820 #define DMAR_DSM_FUNC_DRHD 1
1821 #define DMAR_DSM_FUNC_ATSR 2
1822 #define DMAR_DSM_FUNC_RHSA 3
1824 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1826 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1829 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1830 dmar_res_handler_t handler, void *arg)
1832 int ret = -ENODEV;
1833 union acpi_object *obj;
1834 struct acpi_dmar_header *start;
1835 struct dmar_res_callback callback;
1836 static int res_type[] = {
1837 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1838 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1839 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1842 if (!dmar_detect_dsm(handle, func))
1843 return 0;
1845 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1846 func, NULL, ACPI_TYPE_BUFFER);
1847 if (!obj)
1848 return -ENODEV;
1850 memset(&callback, 0, sizeof(callback));
1851 callback.cb[res_type[func]] = handler;
1852 callback.arg[res_type[func]] = arg;
1853 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1854 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1856 ACPI_FREE(obj);
1858 return ret;
1861 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1863 int ret;
1864 struct dmar_drhd_unit *dmaru;
1866 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1867 if (!dmaru)
1868 return -ENODEV;
1870 ret = dmar_ir_hotplug(dmaru, true);
1871 if (ret == 0)
1872 ret = dmar_iommu_hotplug(dmaru, true);
1874 return ret;
1877 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1879 int i, ret;
1880 struct device *dev;
1881 struct dmar_drhd_unit *dmaru;
1883 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1884 if (!dmaru)
1885 return 0;
1888 * All PCI devices managed by this unit should have been destroyed.
1890 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1891 for_each_active_dev_scope(dmaru->devices,
1892 dmaru->devices_cnt, i, dev)
1893 return -EBUSY;
1896 ret = dmar_ir_hotplug(dmaru, false);
1897 if (ret == 0)
1898 ret = dmar_iommu_hotplug(dmaru, false);
1900 return ret;
1903 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1905 struct dmar_drhd_unit *dmaru;
1907 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1908 if (dmaru) {
1909 list_del_rcu(&dmaru->list);
1910 synchronize_rcu();
1911 dmar_free_drhd(dmaru);
1914 return 0;
1917 static int dmar_hotplug_insert(acpi_handle handle)
1919 int ret;
1920 int drhd_count = 0;
1922 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1923 &dmar_validate_one_drhd, (void *)1);
1924 if (ret)
1925 goto out;
1927 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1928 &dmar_parse_one_drhd, (void *)&drhd_count);
1929 if (ret == 0 && drhd_count == 0) {
1930 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1931 goto out;
1932 } else if (ret) {
1933 goto release_drhd;
1936 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1937 &dmar_parse_one_rhsa, NULL);
1938 if (ret)
1939 goto release_drhd;
1941 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1942 &dmar_parse_one_atsr, NULL);
1943 if (ret)
1944 goto release_atsr;
1946 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1947 &dmar_hp_add_drhd, NULL);
1948 if (!ret)
1949 return 0;
1951 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1952 &dmar_hp_remove_drhd, NULL);
1953 release_atsr:
1954 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1955 &dmar_release_one_atsr, NULL);
1956 release_drhd:
1957 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1958 &dmar_hp_release_drhd, NULL);
1959 out:
1960 return ret;
1963 static int dmar_hotplug_remove(acpi_handle handle)
1965 int ret;
1967 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1968 &dmar_check_one_atsr, NULL);
1969 if (ret)
1970 return ret;
1972 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1973 &dmar_hp_remove_drhd, NULL);
1974 if (ret == 0) {
1975 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1976 &dmar_release_one_atsr, NULL));
1977 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1978 &dmar_hp_release_drhd, NULL));
1979 } else {
1980 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1981 &dmar_hp_add_drhd, NULL);
1984 return ret;
1987 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1988 void *context, void **retval)
1990 acpi_handle *phdl = retval;
1992 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1993 *phdl = handle;
1994 return AE_CTRL_TERMINATE;
1997 return AE_OK;
2000 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2002 int ret;
2003 acpi_handle tmp = NULL;
2004 acpi_status status;
2006 if (!dmar_in_use())
2007 return 0;
2009 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2010 tmp = handle;
2011 } else {
2012 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2013 ACPI_UINT32_MAX,
2014 dmar_get_dsm_handle,
2015 NULL, NULL, &tmp);
2016 if (ACPI_FAILURE(status)) {
2017 pr_warn("Failed to locate _DSM method.\n");
2018 return -ENXIO;
2021 if (tmp == NULL)
2022 return 0;
2024 down_write(&dmar_global_lock);
2025 if (insert)
2026 ret = dmar_hotplug_insert(tmp);
2027 else
2028 ret = dmar_hotplug_remove(tmp);
2029 up_write(&dmar_global_lock);
2031 return ret;
2034 int dmar_device_add(acpi_handle handle)
2036 return dmar_device_hotplug(handle, true);
2039 int dmar_device_remove(acpi_handle handle)
2041 return dmar_device_hotplug(handle, false);