Staging: ks7010: ks7010_sdio.h: Trailing whitespace
[linux/fpc-iii.git] / drivers / iommu / dmar.c
bloba88576d50740b2dbdbe6e65b2bfe1985f01c81ca
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48 struct dmar_res_callback {
49 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
50 void *arg[ACPI_DMAR_TYPE_RESERVED];
51 bool ignore_unhandled;
52 bool print_entry;
56 * Assumptions:
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock);
68 LIST_HEAD(dmar_drhd_units);
70 struct acpi_table_header * __initdata dmar_tbl;
71 static int dmar_dev_scope_status = 1;
72 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
74 static int alloc_iommu(struct dmar_drhd_unit *drhd);
75 static void free_iommu(struct intel_iommu *iommu);
77 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
80 * add INCLUDE_ALL at the tail, so scan the list will find it at
81 * the very end.
83 if (drhd->include_all)
84 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
85 else
86 list_add_rcu(&drhd->list, &dmar_drhd_units);
89 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
91 struct acpi_dmar_device_scope *scope;
93 *cnt = 0;
94 while (start < end) {
95 scope = start;
96 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
97 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
98 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
99 (*cnt)++;
100 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
101 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
102 pr_warn("Unsupported device scope\n");
104 start += scope->length;
106 if (*cnt == 0)
107 return NULL;
109 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
112 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
114 int i;
115 struct device *tmp_dev;
117 if (*devices && *cnt) {
118 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
119 put_device(tmp_dev);
120 kfree(*devices);
123 *devices = NULL;
124 *cnt = 0;
127 /* Optimize out kzalloc()/kfree() for normal cases */
128 static char dmar_pci_notify_info_buf[64];
130 static struct dmar_pci_notify_info *
131 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
133 int level = 0;
134 size_t size;
135 struct pci_dev *tmp;
136 struct dmar_pci_notify_info *info;
138 BUG_ON(dev->is_virtfn);
140 /* Only generate path[] for device addition event */
141 if (event == BUS_NOTIFY_ADD_DEVICE)
142 for (tmp = dev; tmp; tmp = tmp->bus->self)
143 level++;
145 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
146 if (size <= sizeof(dmar_pci_notify_info_buf)) {
147 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
148 } else {
149 info = kzalloc(size, GFP_KERNEL);
150 if (!info) {
151 pr_warn("Out of memory when allocating notify_info "
152 "for %s.\n", pci_name(dev));
153 if (dmar_dev_scope_status == 0)
154 dmar_dev_scope_status = -ENOMEM;
155 return NULL;
159 info->event = event;
160 info->dev = dev;
161 info->seg = pci_domain_nr(dev->bus);
162 info->level = level;
163 if (event == BUS_NOTIFY_ADD_DEVICE) {
164 for (tmp = dev; tmp; tmp = tmp->bus->self) {
165 level--;
166 info->path[level].bus = tmp->bus->number;
167 info->path[level].device = PCI_SLOT(tmp->devfn);
168 info->path[level].function = PCI_FUNC(tmp->devfn);
169 if (pci_is_root_bus(tmp->bus))
170 info->bus = tmp->bus->number;
174 return info;
177 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
179 if ((void *)info != dmar_pci_notify_info_buf)
180 kfree(info);
183 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
184 struct acpi_dmar_pci_path *path, int count)
186 int i;
188 if (info->bus != bus)
189 goto fallback;
190 if (info->level != count)
191 goto fallback;
193 for (i = 0; i < count; i++) {
194 if (path[i].device != info->path[i].device ||
195 path[i].function != info->path[i].function)
196 goto fallback;
199 return true;
201 fallback:
203 if (count != 1)
204 return false;
206 i = info->level - 1;
207 if (bus == info->path[i].bus &&
208 path[0].device == info->path[i].device &&
209 path[0].function == info->path[i].function) {
210 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
211 bus, path[0].device, path[0].function);
212 return true;
215 return false;
218 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
219 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
220 void *start, void*end, u16 segment,
221 struct dmar_dev_scope *devices,
222 int devices_cnt)
224 int i, level;
225 struct device *tmp, *dev = &info->dev->dev;
226 struct acpi_dmar_device_scope *scope;
227 struct acpi_dmar_pci_path *path;
229 if (segment != info->seg)
230 return 0;
232 for (; start < end; start += scope->length) {
233 scope = start;
234 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
235 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
236 continue;
238 path = (struct acpi_dmar_pci_path *)(scope + 1);
239 level = (scope->length - sizeof(*scope)) / sizeof(*path);
240 if (!dmar_match_pci_path(info, scope->bus, path, level))
241 continue;
244 * We expect devices with endpoint scope to have normal PCI
245 * headers, and devices with bridge scope to have bridge PCI
246 * headers. However PCI NTB devices may be listed in the
247 * DMAR table with bridge scope, even though they have a
248 * normal PCI header. NTB devices are identified by class
249 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
250 * for this special case.
252 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
253 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
254 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
255 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
256 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
257 pr_warn("Device scope type does not match for %s\n",
258 pci_name(info->dev));
259 return -EINVAL;
262 for_each_dev_scope(devices, devices_cnt, i, tmp)
263 if (tmp == NULL) {
264 devices[i].bus = info->dev->bus->number;
265 devices[i].devfn = info->dev->devfn;
266 rcu_assign_pointer(devices[i].dev,
267 get_device(dev));
268 return 1;
270 BUG_ON(i >= devices_cnt);
273 return 0;
276 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
277 struct dmar_dev_scope *devices, int count)
279 int index;
280 struct device *tmp;
282 if (info->seg != segment)
283 return 0;
285 for_each_active_dev_scope(devices, count, index, tmp)
286 if (tmp == &info->dev->dev) {
287 RCU_INIT_POINTER(devices[index].dev, NULL);
288 synchronize_rcu();
289 put_device(tmp);
290 return 1;
293 return 0;
296 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
298 int ret = 0;
299 struct dmar_drhd_unit *dmaru;
300 struct acpi_dmar_hardware_unit *drhd;
302 for_each_drhd_unit(dmaru) {
303 if (dmaru->include_all)
304 continue;
306 drhd = container_of(dmaru->hdr,
307 struct acpi_dmar_hardware_unit, header);
308 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
309 ((void *)drhd) + drhd->header.length,
310 dmaru->segment,
311 dmaru->devices, dmaru->devices_cnt);
312 if (ret != 0)
313 break;
315 if (ret >= 0)
316 ret = dmar_iommu_notify_scope_dev(info);
317 if (ret < 0 && dmar_dev_scope_status == 0)
318 dmar_dev_scope_status = ret;
320 return ret;
323 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
325 struct dmar_drhd_unit *dmaru;
327 for_each_drhd_unit(dmaru)
328 if (dmar_remove_dev_scope(info, dmaru->segment,
329 dmaru->devices, dmaru->devices_cnt))
330 break;
331 dmar_iommu_notify_scope_dev(info);
334 static int dmar_pci_bus_notifier(struct notifier_block *nb,
335 unsigned long action, void *data)
337 struct pci_dev *pdev = to_pci_dev(data);
338 struct dmar_pci_notify_info *info;
340 /* Only care about add/remove events for physical functions.
341 * For VFs we actually do the lookup based on the corresponding
342 * PF in device_to_iommu() anyway. */
343 if (pdev->is_virtfn)
344 return NOTIFY_DONE;
345 if (action != BUS_NOTIFY_ADD_DEVICE &&
346 action != BUS_NOTIFY_REMOVED_DEVICE)
347 return NOTIFY_DONE;
349 info = dmar_alloc_pci_notify_info(pdev, action);
350 if (!info)
351 return NOTIFY_DONE;
353 down_write(&dmar_global_lock);
354 if (action == BUS_NOTIFY_ADD_DEVICE)
355 dmar_pci_bus_add_dev(info);
356 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
357 dmar_pci_bus_del_dev(info);
358 up_write(&dmar_global_lock);
360 dmar_free_pci_notify_info(info);
362 return NOTIFY_OK;
365 static struct notifier_block dmar_pci_bus_nb = {
366 .notifier_call = dmar_pci_bus_notifier,
367 .priority = INT_MIN,
370 static struct dmar_drhd_unit *
371 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
373 struct dmar_drhd_unit *dmaru;
375 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
376 if (dmaru->segment == drhd->segment &&
377 dmaru->reg_base_addr == drhd->address)
378 return dmaru;
380 return NULL;
384 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
385 * structure which uniquely represent one DMA remapping hardware unit
386 * present in the platform
388 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
390 struct acpi_dmar_hardware_unit *drhd;
391 struct dmar_drhd_unit *dmaru;
392 int ret = 0;
394 drhd = (struct acpi_dmar_hardware_unit *)header;
395 dmaru = dmar_find_dmaru(drhd);
396 if (dmaru)
397 goto out;
399 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
400 if (!dmaru)
401 return -ENOMEM;
404 * If header is allocated from slab by ACPI _DSM method, we need to
405 * copy the content because the memory buffer will be freed on return.
407 dmaru->hdr = (void *)(dmaru + 1);
408 memcpy(dmaru->hdr, header, header->length);
409 dmaru->reg_base_addr = drhd->address;
410 dmaru->segment = drhd->segment;
411 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
412 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
413 ((void *)drhd) + drhd->header.length,
414 &dmaru->devices_cnt);
415 if (dmaru->devices_cnt && dmaru->devices == NULL) {
416 kfree(dmaru);
417 return -ENOMEM;
420 ret = alloc_iommu(dmaru);
421 if (ret) {
422 dmar_free_dev_scope(&dmaru->devices,
423 &dmaru->devices_cnt);
424 kfree(dmaru);
425 return ret;
427 dmar_register_drhd_unit(dmaru);
429 out:
430 if (arg)
431 (*(int *)arg)++;
433 return 0;
436 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
438 if (dmaru->devices && dmaru->devices_cnt)
439 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
440 if (dmaru->iommu)
441 free_iommu(dmaru->iommu);
442 kfree(dmaru);
445 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
446 void *arg)
448 struct acpi_dmar_andd *andd = (void *)header;
450 /* Check for NUL termination within the designated length */
451 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
452 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
453 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
454 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
455 dmi_get_system_info(DMI_BIOS_VENDOR),
456 dmi_get_system_info(DMI_BIOS_VERSION),
457 dmi_get_system_info(DMI_PRODUCT_VERSION));
458 return -EINVAL;
460 pr_info("ANDD device: %x name: %s\n", andd->device_number,
461 andd->device_name);
463 return 0;
466 #ifdef CONFIG_ACPI_NUMA
467 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
469 struct acpi_dmar_rhsa *rhsa;
470 struct dmar_drhd_unit *drhd;
472 rhsa = (struct acpi_dmar_rhsa *)header;
473 for_each_drhd_unit(drhd) {
474 if (drhd->reg_base_addr == rhsa->base_address) {
475 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
477 if (!node_online(node))
478 node = -1;
479 drhd->iommu->node = node;
480 return 0;
483 WARN_TAINT(
484 1, TAINT_FIRMWARE_WORKAROUND,
485 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
486 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
487 drhd->reg_base_addr,
488 dmi_get_system_info(DMI_BIOS_VENDOR),
489 dmi_get_system_info(DMI_BIOS_VERSION),
490 dmi_get_system_info(DMI_PRODUCT_VERSION));
492 return 0;
494 #else
495 #define dmar_parse_one_rhsa dmar_res_noop
496 #endif
498 static void __init
499 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
501 struct acpi_dmar_hardware_unit *drhd;
502 struct acpi_dmar_reserved_memory *rmrr;
503 struct acpi_dmar_atsr *atsr;
504 struct acpi_dmar_rhsa *rhsa;
506 switch (header->type) {
507 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
508 drhd = container_of(header, struct acpi_dmar_hardware_unit,
509 header);
510 pr_info("DRHD base: %#016Lx flags: %#x\n",
511 (unsigned long long)drhd->address, drhd->flags);
512 break;
513 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
514 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
515 header);
516 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
517 (unsigned long long)rmrr->base_address,
518 (unsigned long long)rmrr->end_address);
519 break;
520 case ACPI_DMAR_TYPE_ROOT_ATS:
521 atsr = container_of(header, struct acpi_dmar_atsr, header);
522 pr_info("ATSR flags: %#x\n", atsr->flags);
523 break;
524 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
525 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
526 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
527 (unsigned long long)rhsa->base_address,
528 rhsa->proximity_domain);
529 break;
530 case ACPI_DMAR_TYPE_NAMESPACE:
531 /* We don't print this here because we need to sanity-check
532 it first. So print it in dmar_parse_one_andd() instead. */
533 break;
538 * dmar_table_detect - checks to see if the platform supports DMAR devices
540 static int __init dmar_table_detect(void)
542 acpi_status status = AE_OK;
544 /* if we could find DMAR table, then there are DMAR devices */
545 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
547 if (ACPI_SUCCESS(status) && !dmar_tbl) {
548 pr_warn("Unable to map DMAR\n");
549 status = AE_NOT_FOUND;
552 return (ACPI_SUCCESS(status) ? 1 : 0);
555 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
556 size_t len, struct dmar_res_callback *cb)
558 int ret = 0;
559 struct acpi_dmar_header *iter, *next;
560 struct acpi_dmar_header *end = ((void *)start) + len;
562 for (iter = start; iter < end && ret == 0; iter = next) {
563 next = (void *)iter + iter->length;
564 if (iter->length == 0) {
565 /* Avoid looping forever on bad ACPI tables */
566 pr_debug(FW_BUG "Invalid 0-length structure\n");
567 break;
568 } else if (next > end) {
569 /* Avoid passing table end */
570 pr_warn(FW_BUG "Record passes table end\n");
571 ret = -EINVAL;
572 break;
575 if (cb->print_entry)
576 dmar_table_print_dmar_entry(iter);
578 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
579 /* continue for forward compatibility */
580 pr_debug("Unknown DMAR structure type %d\n",
581 iter->type);
582 } else if (cb->cb[iter->type]) {
583 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
584 } else if (!cb->ignore_unhandled) {
585 pr_warn("No handler for DMAR structure type %d\n",
586 iter->type);
587 ret = -EINVAL;
591 return ret;
594 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
595 struct dmar_res_callback *cb)
597 return dmar_walk_remapping_entries((void *)(dmar + 1),
598 dmar->header.length - sizeof(*dmar), cb);
602 * parse_dmar_table - parses the DMA reporting table
604 static int __init
605 parse_dmar_table(void)
607 struct acpi_table_dmar *dmar;
608 int ret = 0;
609 int drhd_count = 0;
610 struct dmar_res_callback cb = {
611 .print_entry = true,
612 .ignore_unhandled = true,
613 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
614 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
615 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
616 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
617 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
618 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
622 * Do it again, earlier dmar_tbl mapping could be mapped with
623 * fixed map.
625 dmar_table_detect();
628 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
629 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
631 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
633 dmar = (struct acpi_table_dmar *)dmar_tbl;
634 if (!dmar)
635 return -ENODEV;
637 if (dmar->width < PAGE_SHIFT - 1) {
638 pr_warn("Invalid DMAR haw\n");
639 return -EINVAL;
642 pr_info("Host address width %d\n", dmar->width + 1);
643 ret = dmar_walk_dmar_table(dmar, &cb);
644 if (ret == 0 && drhd_count == 0)
645 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
647 return ret;
650 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
651 int cnt, struct pci_dev *dev)
653 int index;
654 struct device *tmp;
656 while (dev) {
657 for_each_active_dev_scope(devices, cnt, index, tmp)
658 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
659 return 1;
661 /* Check our parent */
662 dev = dev->bus->self;
665 return 0;
668 struct dmar_drhd_unit *
669 dmar_find_matched_drhd_unit(struct pci_dev *dev)
671 struct dmar_drhd_unit *dmaru;
672 struct acpi_dmar_hardware_unit *drhd;
674 dev = pci_physfn(dev);
676 rcu_read_lock();
677 for_each_drhd_unit(dmaru) {
678 drhd = container_of(dmaru->hdr,
679 struct acpi_dmar_hardware_unit,
680 header);
682 if (dmaru->include_all &&
683 drhd->segment == pci_domain_nr(dev->bus))
684 goto out;
686 if (dmar_pci_device_match(dmaru->devices,
687 dmaru->devices_cnt, dev))
688 goto out;
690 dmaru = NULL;
691 out:
692 rcu_read_unlock();
694 return dmaru;
697 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
698 struct acpi_device *adev)
700 struct dmar_drhd_unit *dmaru;
701 struct acpi_dmar_hardware_unit *drhd;
702 struct acpi_dmar_device_scope *scope;
703 struct device *tmp;
704 int i;
705 struct acpi_dmar_pci_path *path;
707 for_each_drhd_unit(dmaru) {
708 drhd = container_of(dmaru->hdr,
709 struct acpi_dmar_hardware_unit,
710 header);
712 for (scope = (void *)(drhd + 1);
713 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
714 scope = ((void *)scope) + scope->length) {
715 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
716 continue;
717 if (scope->enumeration_id != device_number)
718 continue;
720 path = (void *)(scope + 1);
721 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
722 dev_name(&adev->dev), dmaru->reg_base_addr,
723 scope->bus, path->device, path->function);
724 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
725 if (tmp == NULL) {
726 dmaru->devices[i].bus = scope->bus;
727 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
728 path->function);
729 rcu_assign_pointer(dmaru->devices[i].dev,
730 get_device(&adev->dev));
731 return;
733 BUG_ON(i >= dmaru->devices_cnt);
736 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
737 device_number, dev_name(&adev->dev));
740 static int __init dmar_acpi_dev_scope_init(void)
742 struct acpi_dmar_andd *andd;
744 if (dmar_tbl == NULL)
745 return -ENODEV;
747 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
748 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
749 andd = ((void *)andd) + andd->header.length) {
750 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
751 acpi_handle h;
752 struct acpi_device *adev;
754 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
755 andd->device_name,
756 &h))) {
757 pr_err("Failed to find handle for ACPI object %s\n",
758 andd->device_name);
759 continue;
761 if (acpi_bus_get_device(h, &adev)) {
762 pr_err("Failed to get device for ACPI object %s\n",
763 andd->device_name);
764 continue;
766 dmar_acpi_insert_dev_scope(andd->device_number, adev);
769 return 0;
772 int __init dmar_dev_scope_init(void)
774 struct pci_dev *dev = NULL;
775 struct dmar_pci_notify_info *info;
777 if (dmar_dev_scope_status != 1)
778 return dmar_dev_scope_status;
780 if (list_empty(&dmar_drhd_units)) {
781 dmar_dev_scope_status = -ENODEV;
782 } else {
783 dmar_dev_scope_status = 0;
785 dmar_acpi_dev_scope_init();
787 for_each_pci_dev(dev) {
788 if (dev->is_virtfn)
789 continue;
791 info = dmar_alloc_pci_notify_info(dev,
792 BUS_NOTIFY_ADD_DEVICE);
793 if (!info) {
794 return dmar_dev_scope_status;
795 } else {
796 dmar_pci_bus_add_dev(info);
797 dmar_free_pci_notify_info(info);
801 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
804 return dmar_dev_scope_status;
808 int __init dmar_table_init(void)
810 static int dmar_table_initialized;
811 int ret;
813 if (dmar_table_initialized == 0) {
814 ret = parse_dmar_table();
815 if (ret < 0) {
816 if (ret != -ENODEV)
817 pr_info("Parse DMAR table failure.\n");
818 } else if (list_empty(&dmar_drhd_units)) {
819 pr_info("No DMAR devices found\n");
820 ret = -ENODEV;
823 if (ret < 0)
824 dmar_table_initialized = ret;
825 else
826 dmar_table_initialized = 1;
829 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
832 static void warn_invalid_dmar(u64 addr, const char *message)
834 WARN_TAINT_ONCE(
835 1, TAINT_FIRMWARE_WORKAROUND,
836 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
837 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
838 addr, message,
839 dmi_get_system_info(DMI_BIOS_VENDOR),
840 dmi_get_system_info(DMI_BIOS_VERSION),
841 dmi_get_system_info(DMI_PRODUCT_VERSION));
844 static int __ref
845 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
847 struct acpi_dmar_hardware_unit *drhd;
848 void __iomem *addr;
849 u64 cap, ecap;
851 drhd = (void *)entry;
852 if (!drhd->address) {
853 warn_invalid_dmar(0, "");
854 return -EINVAL;
857 if (arg)
858 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
859 else
860 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
861 if (!addr) {
862 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
863 return -EINVAL;
866 cap = dmar_readq(addr + DMAR_CAP_REG);
867 ecap = dmar_readq(addr + DMAR_ECAP_REG);
869 if (arg)
870 iounmap(addr);
871 else
872 early_iounmap(addr, VTD_PAGE_SIZE);
874 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
875 warn_invalid_dmar(drhd->address, " returns all ones");
876 return -EINVAL;
879 return 0;
882 int __init detect_intel_iommu(void)
884 int ret;
885 struct dmar_res_callback validate_drhd_cb = {
886 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
887 .ignore_unhandled = true,
890 down_write(&dmar_global_lock);
891 ret = dmar_table_detect();
892 if (ret)
893 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
894 &validate_drhd_cb);
895 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
896 iommu_detected = 1;
897 /* Make sure ACS will be enabled */
898 pci_request_acs();
901 #ifdef CONFIG_X86
902 if (ret)
903 x86_init.iommu.iommu_init = intel_iommu_init;
904 #endif
906 acpi_put_table(dmar_tbl);
907 dmar_tbl = NULL;
908 up_write(&dmar_global_lock);
910 return ret ? 1 : -ENODEV;
914 static void unmap_iommu(struct intel_iommu *iommu)
916 iounmap(iommu->reg);
917 release_mem_region(iommu->reg_phys, iommu->reg_size);
921 * map_iommu: map the iommu's registers
922 * @iommu: the iommu to map
923 * @phys_addr: the physical address of the base resgister
925 * Memory map the iommu's registers. Start w/ a single page, and
926 * possibly expand if that turns out to be insufficent.
928 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
930 int map_size, err=0;
932 iommu->reg_phys = phys_addr;
933 iommu->reg_size = VTD_PAGE_SIZE;
935 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
936 pr_err("Can't reserve memory\n");
937 err = -EBUSY;
938 goto out;
941 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
942 if (!iommu->reg) {
943 pr_err("Can't map the region\n");
944 err = -ENOMEM;
945 goto release;
948 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
949 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
951 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
952 err = -EINVAL;
953 warn_invalid_dmar(phys_addr, " returns all ones");
954 goto unmap;
957 /* the registers might be more than one page */
958 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
959 cap_max_fault_reg_offset(iommu->cap));
960 map_size = VTD_PAGE_ALIGN(map_size);
961 if (map_size > iommu->reg_size) {
962 iounmap(iommu->reg);
963 release_mem_region(iommu->reg_phys, iommu->reg_size);
964 iommu->reg_size = map_size;
965 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
966 iommu->name)) {
967 pr_err("Can't reserve memory\n");
968 err = -EBUSY;
969 goto out;
971 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
972 if (!iommu->reg) {
973 pr_err("Can't map the region\n");
974 err = -ENOMEM;
975 goto release;
978 err = 0;
979 goto out;
981 unmap:
982 iounmap(iommu->reg);
983 release:
984 release_mem_region(iommu->reg_phys, iommu->reg_size);
985 out:
986 return err;
989 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
991 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
992 DMAR_UNITS_SUPPORTED);
993 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
994 iommu->seq_id = -1;
995 } else {
996 set_bit(iommu->seq_id, dmar_seq_ids);
997 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1000 return iommu->seq_id;
1003 static void dmar_free_seq_id(struct intel_iommu *iommu)
1005 if (iommu->seq_id >= 0) {
1006 clear_bit(iommu->seq_id, dmar_seq_ids);
1007 iommu->seq_id = -1;
1011 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1013 struct intel_iommu *iommu;
1014 u32 ver, sts;
1015 int agaw = 0;
1016 int msagaw = 0;
1017 int err;
1019 if (!drhd->reg_base_addr) {
1020 warn_invalid_dmar(0, "");
1021 return -EINVAL;
1024 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1025 if (!iommu)
1026 return -ENOMEM;
1028 if (dmar_alloc_seq_id(iommu) < 0) {
1029 pr_err("Failed to allocate seq_id\n");
1030 err = -ENOSPC;
1031 goto error;
1034 err = map_iommu(iommu, drhd->reg_base_addr);
1035 if (err) {
1036 pr_err("Failed to map %s\n", iommu->name);
1037 goto error_free_seq_id;
1040 err = -EINVAL;
1041 agaw = iommu_calculate_agaw(iommu);
1042 if (agaw < 0) {
1043 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1044 iommu->seq_id);
1045 goto err_unmap;
1047 msagaw = iommu_calculate_max_sagaw(iommu);
1048 if (msagaw < 0) {
1049 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1050 iommu->seq_id);
1051 goto err_unmap;
1053 iommu->agaw = agaw;
1054 iommu->msagaw = msagaw;
1055 iommu->segment = drhd->segment;
1057 iommu->node = -1;
1059 ver = readl(iommu->reg + DMAR_VER_REG);
1060 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1061 iommu->name,
1062 (unsigned long long)drhd->reg_base_addr,
1063 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1064 (unsigned long long)iommu->cap,
1065 (unsigned long long)iommu->ecap);
1067 /* Reflect status in gcmd */
1068 sts = readl(iommu->reg + DMAR_GSTS_REG);
1069 if (sts & DMA_GSTS_IRES)
1070 iommu->gcmd |= DMA_GCMD_IRE;
1071 if (sts & DMA_GSTS_TES)
1072 iommu->gcmd |= DMA_GCMD_TE;
1073 if (sts & DMA_GSTS_QIES)
1074 iommu->gcmd |= DMA_GCMD_QIE;
1076 raw_spin_lock_init(&iommu->register_lock);
1078 if (intel_iommu_enabled) {
1079 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1080 intel_iommu_groups,
1081 "%s", iommu->name);
1083 if (IS_ERR(iommu->iommu_dev)) {
1084 err = PTR_ERR(iommu->iommu_dev);
1085 goto err_unmap;
1089 drhd->iommu = iommu;
1091 return 0;
1093 err_unmap:
1094 unmap_iommu(iommu);
1095 error_free_seq_id:
1096 dmar_free_seq_id(iommu);
1097 error:
1098 kfree(iommu);
1099 return err;
1102 static void free_iommu(struct intel_iommu *iommu)
1104 iommu_device_destroy(iommu->iommu_dev);
1106 if (iommu->irq) {
1107 if (iommu->pr_irq) {
1108 free_irq(iommu->pr_irq, iommu);
1109 dmar_free_hwirq(iommu->pr_irq);
1110 iommu->pr_irq = 0;
1112 free_irq(iommu->irq, iommu);
1113 dmar_free_hwirq(iommu->irq);
1114 iommu->irq = 0;
1117 if (iommu->qi) {
1118 free_page((unsigned long)iommu->qi->desc);
1119 kfree(iommu->qi->desc_status);
1120 kfree(iommu->qi);
1123 if (iommu->reg)
1124 unmap_iommu(iommu);
1126 dmar_free_seq_id(iommu);
1127 kfree(iommu);
1131 * Reclaim all the submitted descriptors which have completed its work.
1133 static inline void reclaim_free_desc(struct q_inval *qi)
1135 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1136 qi->desc_status[qi->free_tail] == QI_ABORT) {
1137 qi->desc_status[qi->free_tail] = QI_FREE;
1138 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1139 qi->free_cnt++;
1143 static int qi_check_fault(struct intel_iommu *iommu, int index)
1145 u32 fault;
1146 int head, tail;
1147 struct q_inval *qi = iommu->qi;
1148 int wait_index = (index + 1) % QI_LENGTH;
1150 if (qi->desc_status[wait_index] == QI_ABORT)
1151 return -EAGAIN;
1153 fault = readl(iommu->reg + DMAR_FSTS_REG);
1156 * If IQE happens, the head points to the descriptor associated
1157 * with the error. No new descriptors are fetched until the IQE
1158 * is cleared.
1160 if (fault & DMA_FSTS_IQE) {
1161 head = readl(iommu->reg + DMAR_IQH_REG);
1162 if ((head >> DMAR_IQ_SHIFT) == index) {
1163 pr_err("VT-d detected invalid descriptor: "
1164 "low=%llx, high=%llx\n",
1165 (unsigned long long)qi->desc[index].low,
1166 (unsigned long long)qi->desc[index].high);
1167 memcpy(&qi->desc[index], &qi->desc[wait_index],
1168 sizeof(struct qi_desc));
1169 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1170 return -EINVAL;
1175 * If ITE happens, all pending wait_desc commands are aborted.
1176 * No new descriptors are fetched until the ITE is cleared.
1178 if (fault & DMA_FSTS_ITE) {
1179 head = readl(iommu->reg + DMAR_IQH_REG);
1180 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1181 head |= 1;
1182 tail = readl(iommu->reg + DMAR_IQT_REG);
1183 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1185 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1187 do {
1188 if (qi->desc_status[head] == QI_IN_USE)
1189 qi->desc_status[head] = QI_ABORT;
1190 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1191 } while (head != tail);
1193 if (qi->desc_status[wait_index] == QI_ABORT)
1194 return -EAGAIN;
1197 if (fault & DMA_FSTS_ICE)
1198 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1200 return 0;
1204 * Submit the queued invalidation descriptor to the remapping
1205 * hardware unit and wait for its completion.
1207 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1209 int rc;
1210 struct q_inval *qi = iommu->qi;
1211 struct qi_desc *hw, wait_desc;
1212 int wait_index, index;
1213 unsigned long flags;
1215 if (!qi)
1216 return 0;
1218 hw = qi->desc;
1220 restart:
1221 rc = 0;
1223 raw_spin_lock_irqsave(&qi->q_lock, flags);
1224 while (qi->free_cnt < 3) {
1225 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1226 cpu_relax();
1227 raw_spin_lock_irqsave(&qi->q_lock, flags);
1230 index = qi->free_head;
1231 wait_index = (index + 1) % QI_LENGTH;
1233 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1235 hw[index] = *desc;
1237 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1238 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1239 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1241 hw[wait_index] = wait_desc;
1243 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1244 qi->free_cnt -= 2;
1247 * update the HW tail register indicating the presence of
1248 * new descriptors.
1250 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1252 while (qi->desc_status[wait_index] != QI_DONE) {
1254 * We will leave the interrupts disabled, to prevent interrupt
1255 * context to queue another cmd while a cmd is already submitted
1256 * and waiting for completion on this cpu. This is to avoid
1257 * a deadlock where the interrupt context can wait indefinitely
1258 * for free slots in the queue.
1260 rc = qi_check_fault(iommu, index);
1261 if (rc)
1262 break;
1264 raw_spin_unlock(&qi->q_lock);
1265 cpu_relax();
1266 raw_spin_lock(&qi->q_lock);
1269 qi->desc_status[index] = QI_DONE;
1271 reclaim_free_desc(qi);
1272 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1274 if (rc == -EAGAIN)
1275 goto restart;
1277 return rc;
1281 * Flush the global interrupt entry cache.
1283 void qi_global_iec(struct intel_iommu *iommu)
1285 struct qi_desc desc;
1287 desc.low = QI_IEC_TYPE;
1288 desc.high = 0;
1290 /* should never fail */
1291 qi_submit_sync(&desc, iommu);
1294 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1295 u64 type)
1297 struct qi_desc desc;
1299 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1300 | QI_CC_GRAN(type) | QI_CC_TYPE;
1301 desc.high = 0;
1303 qi_submit_sync(&desc, iommu);
1306 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1307 unsigned int size_order, u64 type)
1309 u8 dw = 0, dr = 0;
1311 struct qi_desc desc;
1312 int ih = 0;
1314 if (cap_write_drain(iommu->cap))
1315 dw = 1;
1317 if (cap_read_drain(iommu->cap))
1318 dr = 1;
1320 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1321 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1322 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1323 | QI_IOTLB_AM(size_order);
1325 qi_submit_sync(&desc, iommu);
1328 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1329 u64 addr, unsigned mask)
1331 struct qi_desc desc;
1333 if (mask) {
1334 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1335 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1336 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1337 } else
1338 desc.high = QI_DEV_IOTLB_ADDR(addr);
1340 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1341 qdep = 0;
1343 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1344 QI_DIOTLB_TYPE;
1346 qi_submit_sync(&desc, iommu);
1350 * Disable Queued Invalidation interface.
1352 void dmar_disable_qi(struct intel_iommu *iommu)
1354 unsigned long flags;
1355 u32 sts;
1356 cycles_t start_time = get_cycles();
1358 if (!ecap_qis(iommu->ecap))
1359 return;
1361 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1363 sts = readl(iommu->reg + DMAR_GSTS_REG);
1364 if (!(sts & DMA_GSTS_QIES))
1365 goto end;
1368 * Give a chance to HW to complete the pending invalidation requests.
1370 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1371 readl(iommu->reg + DMAR_IQH_REG)) &&
1372 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1373 cpu_relax();
1375 iommu->gcmd &= ~DMA_GCMD_QIE;
1376 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1378 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1379 !(sts & DMA_GSTS_QIES), sts);
1380 end:
1381 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1385 * Enable queued invalidation.
1387 static void __dmar_enable_qi(struct intel_iommu *iommu)
1389 u32 sts;
1390 unsigned long flags;
1391 struct q_inval *qi = iommu->qi;
1393 qi->free_head = qi->free_tail = 0;
1394 qi->free_cnt = QI_LENGTH;
1396 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1398 /* write zero to the tail reg */
1399 writel(0, iommu->reg + DMAR_IQT_REG);
1401 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1403 iommu->gcmd |= DMA_GCMD_QIE;
1404 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1406 /* Make sure hardware complete it */
1407 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1409 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1413 * Enable Queued Invalidation interface. This is a must to support
1414 * interrupt-remapping. Also used by DMA-remapping, which replaces
1415 * register based IOTLB invalidation.
1417 int dmar_enable_qi(struct intel_iommu *iommu)
1419 struct q_inval *qi;
1420 struct page *desc_page;
1422 if (!ecap_qis(iommu->ecap))
1423 return -ENOENT;
1426 * queued invalidation is already setup and enabled.
1428 if (iommu->qi)
1429 return 0;
1431 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1432 if (!iommu->qi)
1433 return -ENOMEM;
1435 qi = iommu->qi;
1438 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1439 if (!desc_page) {
1440 kfree(qi);
1441 iommu->qi = NULL;
1442 return -ENOMEM;
1445 qi->desc = page_address(desc_page);
1447 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1448 if (!qi->desc_status) {
1449 free_page((unsigned long) qi->desc);
1450 kfree(qi);
1451 iommu->qi = NULL;
1452 return -ENOMEM;
1455 raw_spin_lock_init(&qi->q_lock);
1457 __dmar_enable_qi(iommu);
1459 return 0;
1462 /* iommu interrupt handling. Most stuff are MSI-like. */
1464 enum faulttype {
1465 DMA_REMAP,
1466 INTR_REMAP,
1467 UNKNOWN,
1470 static const char *dma_remap_fault_reasons[] =
1472 "Software",
1473 "Present bit in root entry is clear",
1474 "Present bit in context entry is clear",
1475 "Invalid context entry",
1476 "Access beyond MGAW",
1477 "PTE Write access is not set",
1478 "PTE Read access is not set",
1479 "Next page table ptr is invalid",
1480 "Root table address invalid",
1481 "Context table ptr is invalid",
1482 "non-zero reserved fields in RTP",
1483 "non-zero reserved fields in CTP",
1484 "non-zero reserved fields in PTE",
1485 "PCE for translation request specifies blocking",
1488 static const char *irq_remap_fault_reasons[] =
1490 "Detected reserved fields in the decoded interrupt-remapped request",
1491 "Interrupt index exceeded the interrupt-remapping table size",
1492 "Present field in the IRTE entry is clear",
1493 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1494 "Detected reserved fields in the IRTE entry",
1495 "Blocked a compatibility format interrupt request",
1496 "Blocked an interrupt request due to source-id verification failure",
1499 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1501 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1502 ARRAY_SIZE(irq_remap_fault_reasons))) {
1503 *fault_type = INTR_REMAP;
1504 return irq_remap_fault_reasons[fault_reason - 0x20];
1505 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1506 *fault_type = DMA_REMAP;
1507 return dma_remap_fault_reasons[fault_reason];
1508 } else {
1509 *fault_type = UNKNOWN;
1510 return "Unknown";
1515 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1517 if (iommu->irq == irq)
1518 return DMAR_FECTL_REG;
1519 else if (iommu->pr_irq == irq)
1520 return DMAR_PECTL_REG;
1521 else
1522 BUG();
1525 void dmar_msi_unmask(struct irq_data *data)
1527 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1528 int reg = dmar_msi_reg(iommu, data->irq);
1529 unsigned long flag;
1531 /* unmask it */
1532 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1533 writel(0, iommu->reg + reg);
1534 /* Read a reg to force flush the post write */
1535 readl(iommu->reg + reg);
1536 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1539 void dmar_msi_mask(struct irq_data *data)
1541 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1542 int reg = dmar_msi_reg(iommu, data->irq);
1543 unsigned long flag;
1545 /* mask it */
1546 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1547 writel(DMA_FECTL_IM, iommu->reg + reg);
1548 /* Read a reg to force flush the post write */
1549 readl(iommu->reg + reg);
1550 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1553 void dmar_msi_write(int irq, struct msi_msg *msg)
1555 struct intel_iommu *iommu = irq_get_handler_data(irq);
1556 int reg = dmar_msi_reg(iommu, irq);
1557 unsigned long flag;
1559 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1560 writel(msg->data, iommu->reg + reg + 4);
1561 writel(msg->address_lo, iommu->reg + reg + 8);
1562 writel(msg->address_hi, iommu->reg + reg + 12);
1563 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1566 void dmar_msi_read(int irq, struct msi_msg *msg)
1568 struct intel_iommu *iommu = irq_get_handler_data(irq);
1569 int reg = dmar_msi_reg(iommu, irq);
1570 unsigned long flag;
1572 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1573 msg->data = readl(iommu->reg + reg + 4);
1574 msg->address_lo = readl(iommu->reg + reg + 8);
1575 msg->address_hi = readl(iommu->reg + reg + 12);
1576 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1579 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1580 u8 fault_reason, u16 source_id, unsigned long long addr)
1582 const char *reason;
1583 int fault_type;
1585 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1587 if (fault_type == INTR_REMAP)
1588 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1589 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1590 PCI_FUNC(source_id & 0xFF), addr >> 48,
1591 fault_reason, reason);
1592 else
1593 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1594 type ? "DMA Read" : "DMA Write",
1595 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1596 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1597 return 0;
1600 #define PRIMARY_FAULT_REG_LEN (16)
1601 irqreturn_t dmar_fault(int irq, void *dev_id)
1603 struct intel_iommu *iommu = dev_id;
1604 int reg, fault_index;
1605 u32 fault_status;
1606 unsigned long flag;
1607 bool ratelimited;
1608 static DEFINE_RATELIMIT_STATE(rs,
1609 DEFAULT_RATELIMIT_INTERVAL,
1610 DEFAULT_RATELIMIT_BURST);
1612 /* Disable printing, simply clear the fault when ratelimited */
1613 ratelimited = !__ratelimit(&rs);
1615 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1616 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1617 if (fault_status && !ratelimited)
1618 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1620 /* TBD: ignore advanced fault log currently */
1621 if (!(fault_status & DMA_FSTS_PPF))
1622 goto unlock_exit;
1624 fault_index = dma_fsts_fault_record_index(fault_status);
1625 reg = cap_fault_reg_offset(iommu->cap);
1626 while (1) {
1627 u8 fault_reason;
1628 u16 source_id;
1629 u64 guest_addr;
1630 int type;
1631 u32 data;
1633 /* highest 32 bits */
1634 data = readl(iommu->reg + reg +
1635 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1636 if (!(data & DMA_FRCD_F))
1637 break;
1639 if (!ratelimited) {
1640 fault_reason = dma_frcd_fault_reason(data);
1641 type = dma_frcd_type(data);
1643 data = readl(iommu->reg + reg +
1644 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1645 source_id = dma_frcd_source_id(data);
1647 guest_addr = dmar_readq(iommu->reg + reg +
1648 fault_index * PRIMARY_FAULT_REG_LEN);
1649 guest_addr = dma_frcd_page_addr(guest_addr);
1652 /* clear the fault */
1653 writel(DMA_FRCD_F, iommu->reg + reg +
1654 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1656 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1658 if (!ratelimited)
1659 dmar_fault_do_one(iommu, type, fault_reason,
1660 source_id, guest_addr);
1662 fault_index++;
1663 if (fault_index >= cap_num_fault_regs(iommu->cap))
1664 fault_index = 0;
1665 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1668 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1670 unlock_exit:
1671 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1672 return IRQ_HANDLED;
1675 int dmar_set_interrupt(struct intel_iommu *iommu)
1677 int irq, ret;
1680 * Check if the fault interrupt is already initialized.
1682 if (iommu->irq)
1683 return 0;
1685 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1686 if (irq > 0) {
1687 iommu->irq = irq;
1688 } else {
1689 pr_err("No free IRQ vectors\n");
1690 return -EINVAL;
1693 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1694 if (ret)
1695 pr_err("Can't request irq\n");
1696 return ret;
1699 int __init enable_drhd_fault_handling(void)
1701 struct dmar_drhd_unit *drhd;
1702 struct intel_iommu *iommu;
1705 * Enable fault control interrupt.
1707 for_each_iommu(iommu, drhd) {
1708 u32 fault_status;
1709 int ret = dmar_set_interrupt(iommu);
1711 if (ret) {
1712 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1713 (unsigned long long)drhd->reg_base_addr, ret);
1714 return -1;
1718 * Clear any previous faults.
1720 dmar_fault(iommu->irq, iommu);
1721 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1722 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1725 return 0;
1729 * Re-enable Queued Invalidation interface.
1731 int dmar_reenable_qi(struct intel_iommu *iommu)
1733 if (!ecap_qis(iommu->ecap))
1734 return -ENOENT;
1736 if (!iommu->qi)
1737 return -ENOENT;
1740 * First disable queued invalidation.
1742 dmar_disable_qi(iommu);
1744 * Then enable queued invalidation again. Since there is no pending
1745 * invalidation requests now, it's safe to re-enable queued
1746 * invalidation.
1748 __dmar_enable_qi(iommu);
1750 return 0;
1754 * Check interrupt remapping support in DMAR table description.
1756 int __init dmar_ir_support(void)
1758 struct acpi_table_dmar *dmar;
1759 dmar = (struct acpi_table_dmar *)dmar_tbl;
1760 if (!dmar)
1761 return 0;
1762 return dmar->flags & 0x1;
1765 /* Check whether DMAR units are in use */
1766 static inline bool dmar_in_use(void)
1768 return irq_remapping_enabled || intel_iommu_enabled;
1771 static int __init dmar_free_unused_resources(void)
1773 struct dmar_drhd_unit *dmaru, *dmaru_n;
1775 if (dmar_in_use())
1776 return 0;
1778 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1779 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1781 down_write(&dmar_global_lock);
1782 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1783 list_del(&dmaru->list);
1784 dmar_free_drhd(dmaru);
1786 up_write(&dmar_global_lock);
1788 return 0;
1791 late_initcall(dmar_free_unused_resources);
1792 IOMMU_INIT_POST(detect_intel_iommu);
1795 * DMAR Hotplug Support
1796 * For more details, please refer to Intel(R) Virtualization Technology
1797 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1798 * "Remapping Hardware Unit Hot Plug".
1800 static u8 dmar_hp_uuid[] = {
1801 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1802 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1806 * Currently there's only one revision and BIOS will not check the revision id,
1807 * so use 0 for safety.
1809 #define DMAR_DSM_REV_ID 0
1810 #define DMAR_DSM_FUNC_DRHD 1
1811 #define DMAR_DSM_FUNC_ATSR 2
1812 #define DMAR_DSM_FUNC_RHSA 3
1814 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1816 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1819 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1820 dmar_res_handler_t handler, void *arg)
1822 int ret = -ENODEV;
1823 union acpi_object *obj;
1824 struct acpi_dmar_header *start;
1825 struct dmar_res_callback callback;
1826 static int res_type[] = {
1827 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1828 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1829 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1832 if (!dmar_detect_dsm(handle, func))
1833 return 0;
1835 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1836 func, NULL, ACPI_TYPE_BUFFER);
1837 if (!obj)
1838 return -ENODEV;
1840 memset(&callback, 0, sizeof(callback));
1841 callback.cb[res_type[func]] = handler;
1842 callback.arg[res_type[func]] = arg;
1843 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1844 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1846 ACPI_FREE(obj);
1848 return ret;
1851 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1853 int ret;
1854 struct dmar_drhd_unit *dmaru;
1856 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1857 if (!dmaru)
1858 return -ENODEV;
1860 ret = dmar_ir_hotplug(dmaru, true);
1861 if (ret == 0)
1862 ret = dmar_iommu_hotplug(dmaru, true);
1864 return ret;
1867 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1869 int i, ret;
1870 struct device *dev;
1871 struct dmar_drhd_unit *dmaru;
1873 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1874 if (!dmaru)
1875 return 0;
1878 * All PCI devices managed by this unit should have been destroyed.
1880 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1881 for_each_active_dev_scope(dmaru->devices,
1882 dmaru->devices_cnt, i, dev)
1883 return -EBUSY;
1886 ret = dmar_ir_hotplug(dmaru, false);
1887 if (ret == 0)
1888 ret = dmar_iommu_hotplug(dmaru, false);
1890 return ret;
1893 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1895 struct dmar_drhd_unit *dmaru;
1897 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1898 if (dmaru) {
1899 list_del_rcu(&dmaru->list);
1900 synchronize_rcu();
1901 dmar_free_drhd(dmaru);
1904 return 0;
1907 static int dmar_hotplug_insert(acpi_handle handle)
1909 int ret;
1910 int drhd_count = 0;
1912 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1913 &dmar_validate_one_drhd, (void *)1);
1914 if (ret)
1915 goto out;
1917 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1918 &dmar_parse_one_drhd, (void *)&drhd_count);
1919 if (ret == 0 && drhd_count == 0) {
1920 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1921 goto out;
1922 } else if (ret) {
1923 goto release_drhd;
1926 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1927 &dmar_parse_one_rhsa, NULL);
1928 if (ret)
1929 goto release_drhd;
1931 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1932 &dmar_parse_one_atsr, NULL);
1933 if (ret)
1934 goto release_atsr;
1936 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1937 &dmar_hp_add_drhd, NULL);
1938 if (!ret)
1939 return 0;
1941 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1942 &dmar_hp_remove_drhd, NULL);
1943 release_atsr:
1944 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1945 &dmar_release_one_atsr, NULL);
1946 release_drhd:
1947 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1948 &dmar_hp_release_drhd, NULL);
1949 out:
1950 return ret;
1953 static int dmar_hotplug_remove(acpi_handle handle)
1955 int ret;
1957 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1958 &dmar_check_one_atsr, NULL);
1959 if (ret)
1960 return ret;
1962 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1963 &dmar_hp_remove_drhd, NULL);
1964 if (ret == 0) {
1965 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1966 &dmar_release_one_atsr, NULL));
1967 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1968 &dmar_hp_release_drhd, NULL));
1969 } else {
1970 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1971 &dmar_hp_add_drhd, NULL);
1974 return ret;
1977 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1978 void *context, void **retval)
1980 acpi_handle *phdl = retval;
1982 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1983 *phdl = handle;
1984 return AE_CTRL_TERMINATE;
1987 return AE_OK;
1990 static int dmar_device_hotplug(acpi_handle handle, bool insert)
1992 int ret;
1993 acpi_handle tmp = NULL;
1994 acpi_status status;
1996 if (!dmar_in_use())
1997 return 0;
1999 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2000 tmp = handle;
2001 } else {
2002 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2003 ACPI_UINT32_MAX,
2004 dmar_get_dsm_handle,
2005 NULL, NULL, &tmp);
2006 if (ACPI_FAILURE(status)) {
2007 pr_warn("Failed to locate _DSM method.\n");
2008 return -ENXIO;
2011 if (tmp == NULL)
2012 return 0;
2014 down_write(&dmar_global_lock);
2015 if (insert)
2016 ret = dmar_hotplug_insert(tmp);
2017 else
2018 ret = dmar_hotplug_remove(tmp);
2019 up_write(&dmar_global_lock);
2021 return ret;
2024 int dmar_device_add(acpi_handle handle)
2026 return dmar_device_hotplug(handle, true);
2029 int dmar_device_remove(acpi_handle handle)
2031 return dmar_device_hotplug(handle, false);