xenbus_client.c: correct exit path for xenbus_map_ring_valloc_hvm
[linux/fpc-iii.git] / drivers / iommu / dmar.c
bloba7967ceb79e6f20336354c14ae9511eb3e79c20b
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
50 LIST_HEAD(dmar_drhd_units);
52 struct acpi_table_header * __initdata dmar_tbl;
53 static acpi_size dmar_tbl_size;
55 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
67 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
87 if (!bus) {
88 pr_warn("Device scope bus [%d] not found\n", scope->bus);
89 break;
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
93 /* warning will be printed below */
94 break;
96 path ++;
97 count --;
98 bus = pdev->subordinate;
100 if (!pdev) {
101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 pr_warn("Device scope type does not match for %s\n",
111 pci_name(pdev));
112 return -EINVAL;
114 *dev = pdev;
115 return 0;
118 int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
132 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
133 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
134 pr_warn("Unsupported device scope\n");
136 start += scope->length;
138 if (*cnt == 0)
139 return 0;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
157 index ++;
159 start += scope->length;
162 return 0;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
182 dmaru->hdr = header;
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
192 dmar_register_drhd_unit(dmaru);
193 return 0;
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
204 return 0;
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
214 return ret;
217 #ifdef CONFIG_ACPI_NUMA
218 static int __init
219 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
221 struct acpi_dmar_rhsa *rhsa;
222 struct dmar_drhd_unit *drhd;
224 rhsa = (struct acpi_dmar_rhsa *)header;
225 for_each_drhd_unit(drhd) {
226 if (drhd->reg_base_addr == rhsa->base_address) {
227 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
229 if (!node_online(node))
230 node = -1;
231 drhd->iommu->node = node;
232 return 0;
235 WARN_TAINT(
236 1, TAINT_FIRMWARE_WORKAROUND,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
239 drhd->reg_base_addr,
240 dmi_get_system_info(DMI_BIOS_VENDOR),
241 dmi_get_system_info(DMI_BIOS_VERSION),
242 dmi_get_system_info(DMI_PRODUCT_VERSION));
244 return 0;
246 #endif
248 static void __init
249 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
251 struct acpi_dmar_hardware_unit *drhd;
252 struct acpi_dmar_reserved_memory *rmrr;
253 struct acpi_dmar_atsr *atsr;
254 struct acpi_dmar_rhsa *rhsa;
256 switch (header->type) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
258 drhd = container_of(header, struct acpi_dmar_hardware_unit,
259 header);
260 pr_info("DRHD base: %#016Lx flags: %#x\n",
261 (unsigned long long)drhd->address, drhd->flags);
262 break;
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
264 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
265 header);
266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
267 (unsigned long long)rmrr->base_address,
268 (unsigned long long)rmrr->end_address);
269 break;
270 case ACPI_DMAR_TYPE_ATSR:
271 atsr = container_of(header, struct acpi_dmar_atsr, header);
272 pr_info("ATSR flags: %#x\n", atsr->flags);
273 break;
274 case ACPI_DMAR_HARDWARE_AFFINITY:
275 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
277 (unsigned long long)rhsa->base_address,
278 rhsa->proximity_domain);
279 break;
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
286 static int __init dmar_table_detect(void)
288 acpi_status status = AE_OK;
290 /* if we could find DMAR table, then there are DMAR devices */
291 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl,
293 &dmar_tbl_size);
295 if (ACPI_SUCCESS(status) && !dmar_tbl) {
296 pr_warn("Unable to map DMAR\n");
297 status = AE_NOT_FOUND;
300 return (ACPI_SUCCESS(status) ? 1 : 0);
304 * parse_dmar_table - parses the DMA reporting table
306 static int __init
307 parse_dmar_table(void)
309 struct acpi_table_dmar *dmar;
310 struct acpi_dmar_header *entry_header;
311 int ret = 0;
314 * Do it again, earlier dmar_tbl mapping could be mapped with
315 * fixed map.
317 dmar_table_detect();
320 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
321 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
323 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
325 dmar = (struct acpi_table_dmar *)dmar_tbl;
326 if (!dmar)
327 return -ENODEV;
329 if (dmar->width < PAGE_SHIFT - 1) {
330 pr_warn("Invalid DMAR haw\n");
331 return -EINVAL;
334 pr_info("Host address width %d\n", dmar->width + 1);
336 entry_header = (struct acpi_dmar_header *)(dmar + 1);
337 while (((unsigned long)entry_header) <
338 (((unsigned long)dmar) + dmar_tbl->length)) {
339 /* Avoid looping forever on bad ACPI tables */
340 if (entry_header->length == 0) {
341 pr_warn("Invalid 0-length structure\n");
342 ret = -EINVAL;
343 break;
346 dmar_table_print_dmar_entry(entry_header);
348 switch (entry_header->type) {
349 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
350 ret = dmar_parse_one_drhd(entry_header);
351 break;
352 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
353 ret = dmar_parse_one_rmrr(entry_header);
354 break;
355 case ACPI_DMAR_TYPE_ATSR:
356 ret = dmar_parse_one_atsr(entry_header);
357 break;
358 case ACPI_DMAR_HARDWARE_AFFINITY:
359 #ifdef CONFIG_ACPI_NUMA
360 ret = dmar_parse_one_rhsa(entry_header);
361 #endif
362 break;
363 default:
364 pr_warn("Unknown DMAR structure type %d\n",
365 entry_header->type);
366 ret = 0; /* for forward compatibility */
367 break;
369 if (ret)
370 break;
372 entry_header = ((void *)entry_header + entry_header->length);
374 return ret;
377 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
378 struct pci_dev *dev)
380 int index;
382 while (dev) {
383 for (index = 0; index < cnt; index++)
384 if (dev == devices[index])
385 return 1;
387 /* Check our parent */
388 dev = dev->bus->self;
391 return 0;
394 struct dmar_drhd_unit *
395 dmar_find_matched_drhd_unit(struct pci_dev *dev)
397 struct dmar_drhd_unit *dmaru = NULL;
398 struct acpi_dmar_hardware_unit *drhd;
400 dev = pci_physfn(dev);
402 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
403 drhd = container_of(dmaru->hdr,
404 struct acpi_dmar_hardware_unit,
405 header);
407 if (dmaru->include_all &&
408 drhd->segment == pci_domain_nr(dev->bus))
409 return dmaru;
411 if (dmar_pci_device_match(dmaru->devices,
412 dmaru->devices_cnt, dev))
413 return dmaru;
416 return NULL;
419 int __init dmar_dev_scope_init(void)
421 static int dmar_dev_scope_initialized;
422 struct dmar_drhd_unit *drhd, *drhd_n;
423 int ret = -ENODEV;
425 if (dmar_dev_scope_initialized)
426 return dmar_dev_scope_initialized;
428 if (list_empty(&dmar_drhd_units))
429 goto fail;
431 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
432 ret = dmar_parse_dev(drhd);
433 if (ret)
434 goto fail;
437 ret = dmar_parse_rmrr_atsr_dev();
438 if (ret)
439 goto fail;
441 dmar_dev_scope_initialized = 1;
442 return 0;
444 fail:
445 dmar_dev_scope_initialized = ret;
446 return ret;
450 int __init dmar_table_init(void)
452 static int dmar_table_initialized;
453 int ret;
455 if (dmar_table_initialized)
456 return 0;
458 dmar_table_initialized = 1;
460 ret = parse_dmar_table();
461 if (ret) {
462 if (ret != -ENODEV)
463 pr_info("parse DMAR table failure.\n");
464 return ret;
467 if (list_empty(&dmar_drhd_units)) {
468 pr_info("No DMAR devices found\n");
469 return -ENODEV;
472 return 0;
475 static void warn_invalid_dmar(u64 addr, const char *message)
477 WARN_TAINT_ONCE(
478 1, TAINT_FIRMWARE_WORKAROUND,
479 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
480 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
481 addr, message,
482 dmi_get_system_info(DMI_BIOS_VENDOR),
483 dmi_get_system_info(DMI_BIOS_VERSION),
484 dmi_get_system_info(DMI_PRODUCT_VERSION));
487 int __init check_zero_address(void)
489 struct acpi_table_dmar *dmar;
490 struct acpi_dmar_header *entry_header;
491 struct acpi_dmar_hardware_unit *drhd;
493 dmar = (struct acpi_table_dmar *)dmar_tbl;
494 entry_header = (struct acpi_dmar_header *)(dmar + 1);
496 while (((unsigned long)entry_header) <
497 (((unsigned long)dmar) + dmar_tbl->length)) {
498 /* Avoid looping forever on bad ACPI tables */
499 if (entry_header->length == 0) {
500 pr_warn("Invalid 0-length structure\n");
501 return 0;
504 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
505 void __iomem *addr;
506 u64 cap, ecap;
508 drhd = (void *)entry_header;
509 if (!drhd->address) {
510 warn_invalid_dmar(0, "");
511 goto failed;
514 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
515 if (!addr ) {
516 printk("IOMMU: can't validate: %llx\n", drhd->address);
517 goto failed;
519 cap = dmar_readq(addr + DMAR_CAP_REG);
520 ecap = dmar_readq(addr + DMAR_ECAP_REG);
521 early_iounmap(addr, VTD_PAGE_SIZE);
522 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
523 warn_invalid_dmar(drhd->address,
524 " returns all ones");
525 goto failed;
529 entry_header = ((void *)entry_header + entry_header->length);
531 return 1;
533 failed:
534 return 0;
537 int __init detect_intel_iommu(void)
539 int ret;
541 ret = dmar_table_detect();
542 if (ret)
543 ret = check_zero_address();
545 struct acpi_table_dmar *dmar;
547 dmar = (struct acpi_table_dmar *) dmar_tbl;
549 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
550 dmar->flags & 0x1)
551 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
553 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
554 iommu_detected = 1;
555 /* Make sure ACS will be enabled */
556 pci_request_acs();
559 #ifdef CONFIG_X86
560 if (ret)
561 x86_init.iommu.iommu_init = intel_iommu_init;
562 #endif
564 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
565 dmar_tbl = NULL;
567 return ret ? 1 : -ENODEV;
571 static void unmap_iommu(struct intel_iommu *iommu)
573 iounmap(iommu->reg);
574 release_mem_region(iommu->reg_phys, iommu->reg_size);
578 * map_iommu: map the iommu's registers
579 * @iommu: the iommu to map
580 * @phys_addr: the physical address of the base resgister
582 * Memory map the iommu's registers. Start w/ a single page, and
583 * possibly expand if that turns out to be insufficent.
585 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
587 int map_size, err=0;
589 iommu->reg_phys = phys_addr;
590 iommu->reg_size = VTD_PAGE_SIZE;
592 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
593 pr_err("IOMMU: can't reserve memory\n");
594 err = -EBUSY;
595 goto out;
598 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
599 if (!iommu->reg) {
600 pr_err("IOMMU: can't map the region\n");
601 err = -ENOMEM;
602 goto release;
605 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
606 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
608 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
609 err = -EINVAL;
610 warn_invalid_dmar(phys_addr, " returns all ones");
611 goto unmap;
614 /* the registers might be more than one page */
615 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
616 cap_max_fault_reg_offset(iommu->cap));
617 map_size = VTD_PAGE_ALIGN(map_size);
618 if (map_size > iommu->reg_size) {
619 iounmap(iommu->reg);
620 release_mem_region(iommu->reg_phys, iommu->reg_size);
621 iommu->reg_size = map_size;
622 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
623 iommu->name)) {
624 pr_err("IOMMU: can't reserve memory\n");
625 err = -EBUSY;
626 goto out;
628 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
629 if (!iommu->reg) {
630 pr_err("IOMMU: can't map the region\n");
631 err = -ENOMEM;
632 goto release;
635 err = 0;
636 goto out;
638 unmap:
639 iounmap(iommu->reg);
640 release:
641 release_mem_region(iommu->reg_phys, iommu->reg_size);
642 out:
643 return err;
646 int alloc_iommu(struct dmar_drhd_unit *drhd)
648 struct intel_iommu *iommu;
649 u32 ver, sts;
650 static int iommu_allocated = 0;
651 int agaw = 0;
652 int msagaw = 0;
653 int err;
655 if (!drhd->reg_base_addr) {
656 warn_invalid_dmar(0, "");
657 return -EINVAL;
660 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
661 if (!iommu)
662 return -ENOMEM;
664 iommu->seq_id = iommu_allocated++;
665 sprintf (iommu->name, "dmar%d", iommu->seq_id);
667 err = map_iommu(iommu, drhd->reg_base_addr);
668 if (err) {
669 pr_err("IOMMU: failed to map %s\n", iommu->name);
670 goto error;
673 err = -EINVAL;
674 agaw = iommu_calculate_agaw(iommu);
675 if (agaw < 0) {
676 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
677 iommu->seq_id);
678 goto err_unmap;
680 msagaw = iommu_calculate_max_sagaw(iommu);
681 if (msagaw < 0) {
682 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
683 iommu->seq_id);
684 goto err_unmap;
686 iommu->agaw = agaw;
687 iommu->msagaw = msagaw;
689 iommu->node = -1;
691 ver = readl(iommu->reg + DMAR_VER_REG);
692 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
693 iommu->seq_id,
694 (unsigned long long)drhd->reg_base_addr,
695 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
696 (unsigned long long)iommu->cap,
697 (unsigned long long)iommu->ecap);
699 /* Reflect status in gcmd */
700 sts = readl(iommu->reg + DMAR_GSTS_REG);
701 if (sts & DMA_GSTS_IRES)
702 iommu->gcmd |= DMA_GCMD_IRE;
703 if (sts & DMA_GSTS_TES)
704 iommu->gcmd |= DMA_GCMD_TE;
705 if (sts & DMA_GSTS_QIES)
706 iommu->gcmd |= DMA_GCMD_QIE;
708 raw_spin_lock_init(&iommu->register_lock);
710 drhd->iommu = iommu;
711 return 0;
713 err_unmap:
714 unmap_iommu(iommu);
715 error:
716 kfree(iommu);
717 return err;
720 void free_iommu(struct intel_iommu *iommu)
722 if (!iommu)
723 return;
725 free_dmar_iommu(iommu);
727 if (iommu->reg)
728 unmap_iommu(iommu);
730 kfree(iommu);
734 * Reclaim all the submitted descriptors which have completed its work.
736 static inline void reclaim_free_desc(struct q_inval *qi)
738 while (qi->desc_status[qi->free_tail] == QI_DONE ||
739 qi->desc_status[qi->free_tail] == QI_ABORT) {
740 qi->desc_status[qi->free_tail] = QI_FREE;
741 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
742 qi->free_cnt++;
746 static int qi_check_fault(struct intel_iommu *iommu, int index)
748 u32 fault;
749 int head, tail;
750 struct q_inval *qi = iommu->qi;
751 int wait_index = (index + 1) % QI_LENGTH;
753 if (qi->desc_status[wait_index] == QI_ABORT)
754 return -EAGAIN;
756 fault = readl(iommu->reg + DMAR_FSTS_REG);
759 * If IQE happens, the head points to the descriptor associated
760 * with the error. No new descriptors are fetched until the IQE
761 * is cleared.
763 if (fault & DMA_FSTS_IQE) {
764 head = readl(iommu->reg + DMAR_IQH_REG);
765 if ((head >> DMAR_IQ_SHIFT) == index) {
766 pr_err("VT-d detected invalid descriptor: "
767 "low=%llx, high=%llx\n",
768 (unsigned long long)qi->desc[index].low,
769 (unsigned long long)qi->desc[index].high);
770 memcpy(&qi->desc[index], &qi->desc[wait_index],
771 sizeof(struct qi_desc));
772 __iommu_flush_cache(iommu, &qi->desc[index],
773 sizeof(struct qi_desc));
774 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
775 return -EINVAL;
780 * If ITE happens, all pending wait_desc commands are aborted.
781 * No new descriptors are fetched until the ITE is cleared.
783 if (fault & DMA_FSTS_ITE) {
784 head = readl(iommu->reg + DMAR_IQH_REG);
785 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
786 head |= 1;
787 tail = readl(iommu->reg + DMAR_IQT_REG);
788 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
790 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
792 do {
793 if (qi->desc_status[head] == QI_IN_USE)
794 qi->desc_status[head] = QI_ABORT;
795 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
796 } while (head != tail);
798 if (qi->desc_status[wait_index] == QI_ABORT)
799 return -EAGAIN;
802 if (fault & DMA_FSTS_ICE)
803 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
805 return 0;
809 * Submit the queued invalidation descriptor to the remapping
810 * hardware unit and wait for its completion.
812 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
814 int rc;
815 struct q_inval *qi = iommu->qi;
816 struct qi_desc *hw, wait_desc;
817 int wait_index, index;
818 unsigned long flags;
820 if (!qi)
821 return 0;
823 hw = qi->desc;
825 restart:
826 rc = 0;
828 raw_spin_lock_irqsave(&qi->q_lock, flags);
829 while (qi->free_cnt < 3) {
830 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
831 cpu_relax();
832 raw_spin_lock_irqsave(&qi->q_lock, flags);
835 index = qi->free_head;
836 wait_index = (index + 1) % QI_LENGTH;
838 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
840 hw[index] = *desc;
842 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
843 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
844 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
846 hw[wait_index] = wait_desc;
848 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
849 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
851 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
852 qi->free_cnt -= 2;
855 * update the HW tail register indicating the presence of
856 * new descriptors.
858 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
860 while (qi->desc_status[wait_index] != QI_DONE) {
862 * We will leave the interrupts disabled, to prevent interrupt
863 * context to queue another cmd while a cmd is already submitted
864 * and waiting for completion on this cpu. This is to avoid
865 * a deadlock where the interrupt context can wait indefinitely
866 * for free slots in the queue.
868 rc = qi_check_fault(iommu, index);
869 if (rc)
870 break;
872 raw_spin_unlock(&qi->q_lock);
873 cpu_relax();
874 raw_spin_lock(&qi->q_lock);
877 qi->desc_status[index] = QI_DONE;
879 reclaim_free_desc(qi);
880 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
882 if (rc == -EAGAIN)
883 goto restart;
885 return rc;
889 * Flush the global interrupt entry cache.
891 void qi_global_iec(struct intel_iommu *iommu)
893 struct qi_desc desc;
895 desc.low = QI_IEC_TYPE;
896 desc.high = 0;
898 /* should never fail */
899 qi_submit_sync(&desc, iommu);
902 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
903 u64 type)
905 struct qi_desc desc;
907 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
908 | QI_CC_GRAN(type) | QI_CC_TYPE;
909 desc.high = 0;
911 qi_submit_sync(&desc, iommu);
914 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
915 unsigned int size_order, u64 type)
917 u8 dw = 0, dr = 0;
919 struct qi_desc desc;
920 int ih = 0;
922 if (cap_write_drain(iommu->cap))
923 dw = 1;
925 if (cap_read_drain(iommu->cap))
926 dr = 1;
928 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
929 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
930 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
931 | QI_IOTLB_AM(size_order);
933 qi_submit_sync(&desc, iommu);
936 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
937 u64 addr, unsigned mask)
939 struct qi_desc desc;
941 if (mask) {
942 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
943 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
944 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
945 } else
946 desc.high = QI_DEV_IOTLB_ADDR(addr);
948 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
949 qdep = 0;
951 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
952 QI_DIOTLB_TYPE;
954 qi_submit_sync(&desc, iommu);
958 * Disable Queued Invalidation interface.
960 void dmar_disable_qi(struct intel_iommu *iommu)
962 unsigned long flags;
963 u32 sts;
964 cycles_t start_time = get_cycles();
966 if (!ecap_qis(iommu->ecap))
967 return;
969 raw_spin_lock_irqsave(&iommu->register_lock, flags);
971 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
972 if (!(sts & DMA_GSTS_QIES))
973 goto end;
976 * Give a chance to HW to complete the pending invalidation requests.
978 while ((readl(iommu->reg + DMAR_IQT_REG) !=
979 readl(iommu->reg + DMAR_IQH_REG)) &&
980 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
981 cpu_relax();
983 iommu->gcmd &= ~DMA_GCMD_QIE;
984 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
986 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
987 !(sts & DMA_GSTS_QIES), sts);
988 end:
989 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
993 * Enable queued invalidation.
995 static void __dmar_enable_qi(struct intel_iommu *iommu)
997 u32 sts;
998 unsigned long flags;
999 struct q_inval *qi = iommu->qi;
1001 qi->free_head = qi->free_tail = 0;
1002 qi->free_cnt = QI_LENGTH;
1004 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1006 /* write zero to the tail reg */
1007 writel(0, iommu->reg + DMAR_IQT_REG);
1009 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1011 iommu->gcmd |= DMA_GCMD_QIE;
1012 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1014 /* Make sure hardware complete it */
1015 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1017 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1021 * Enable Queued Invalidation interface. This is a must to support
1022 * interrupt-remapping. Also used by DMA-remapping, which replaces
1023 * register based IOTLB invalidation.
1025 int dmar_enable_qi(struct intel_iommu *iommu)
1027 struct q_inval *qi;
1028 struct page *desc_page;
1030 if (!ecap_qis(iommu->ecap))
1031 return -ENOENT;
1034 * queued invalidation is already setup and enabled.
1036 if (iommu->qi)
1037 return 0;
1039 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1040 if (!iommu->qi)
1041 return -ENOMEM;
1043 qi = iommu->qi;
1046 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1047 if (!desc_page) {
1048 kfree(qi);
1049 iommu->qi = 0;
1050 return -ENOMEM;
1053 qi->desc = page_address(desc_page);
1055 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1056 if (!qi->desc_status) {
1057 free_page((unsigned long) qi->desc);
1058 kfree(qi);
1059 iommu->qi = 0;
1060 return -ENOMEM;
1063 qi->free_head = qi->free_tail = 0;
1064 qi->free_cnt = QI_LENGTH;
1066 raw_spin_lock_init(&qi->q_lock);
1068 __dmar_enable_qi(iommu);
1070 return 0;
1073 /* iommu interrupt handling. Most stuff are MSI-like. */
1075 enum faulttype {
1076 DMA_REMAP,
1077 INTR_REMAP,
1078 UNKNOWN,
1081 static const char *dma_remap_fault_reasons[] =
1083 "Software",
1084 "Present bit in root entry is clear",
1085 "Present bit in context entry is clear",
1086 "Invalid context entry",
1087 "Access beyond MGAW",
1088 "PTE Write access is not set",
1089 "PTE Read access is not set",
1090 "Next page table ptr is invalid",
1091 "Root table address invalid",
1092 "Context table ptr is invalid",
1093 "non-zero reserved fields in RTP",
1094 "non-zero reserved fields in CTP",
1095 "non-zero reserved fields in PTE",
1096 "PCE for translation request specifies blocking",
1099 static const char *irq_remap_fault_reasons[] =
1101 "Detected reserved fields in the decoded interrupt-remapped request",
1102 "Interrupt index exceeded the interrupt-remapping table size",
1103 "Present field in the IRTE entry is clear",
1104 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1105 "Detected reserved fields in the IRTE entry",
1106 "Blocked a compatibility format interrupt request",
1107 "Blocked an interrupt request due to source-id verification failure",
1110 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1112 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1114 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1115 ARRAY_SIZE(irq_remap_fault_reasons))) {
1116 *fault_type = INTR_REMAP;
1117 return irq_remap_fault_reasons[fault_reason - 0x20];
1118 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1119 *fault_type = DMA_REMAP;
1120 return dma_remap_fault_reasons[fault_reason];
1121 } else {
1122 *fault_type = UNKNOWN;
1123 return "Unknown";
1127 void dmar_msi_unmask(struct irq_data *data)
1129 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1130 unsigned long flag;
1132 /* unmask it */
1133 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1134 writel(0, iommu->reg + DMAR_FECTL_REG);
1135 /* Read a reg to force flush the post write */
1136 readl(iommu->reg + DMAR_FECTL_REG);
1137 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1140 void dmar_msi_mask(struct irq_data *data)
1142 unsigned long flag;
1143 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1145 /* mask it */
1146 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1147 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1148 /* Read a reg to force flush the post write */
1149 readl(iommu->reg + DMAR_FECTL_REG);
1150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1153 void dmar_msi_write(int irq, struct msi_msg *msg)
1155 struct intel_iommu *iommu = irq_get_handler_data(irq);
1156 unsigned long flag;
1158 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1159 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1160 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1161 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1162 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1165 void dmar_msi_read(int irq, struct msi_msg *msg)
1167 struct intel_iommu *iommu = irq_get_handler_data(irq);
1168 unsigned long flag;
1170 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1171 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1172 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1173 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1177 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1178 u8 fault_reason, u16 source_id, unsigned long long addr)
1180 const char *reason;
1181 int fault_type;
1183 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1185 if (fault_type == INTR_REMAP)
1186 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1187 "fault index %llx\n"
1188 "INTR-REMAP:[fault reason %02d] %s\n",
1189 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1190 PCI_FUNC(source_id & 0xFF), addr >> 48,
1191 fault_reason, reason);
1192 else
1193 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1194 "fault addr %llx \n"
1195 "DMAR:[fault reason %02d] %s\n",
1196 (type ? "DMA Read" : "DMA Write"),
1197 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1198 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1199 return 0;
1202 #define PRIMARY_FAULT_REG_LEN (16)
1203 irqreturn_t dmar_fault(int irq, void *dev_id)
1205 struct intel_iommu *iommu = dev_id;
1206 int reg, fault_index;
1207 u32 fault_status;
1208 unsigned long flag;
1210 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1211 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1212 if (fault_status)
1213 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1215 /* TBD: ignore advanced fault log currently */
1216 if (!(fault_status & DMA_FSTS_PPF))
1217 goto unlock_exit;
1219 fault_index = dma_fsts_fault_record_index(fault_status);
1220 reg = cap_fault_reg_offset(iommu->cap);
1221 while (1) {
1222 u8 fault_reason;
1223 u16 source_id;
1224 u64 guest_addr;
1225 int type;
1226 u32 data;
1228 /* highest 32 bits */
1229 data = readl(iommu->reg + reg +
1230 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1231 if (!(data & DMA_FRCD_F))
1232 break;
1234 fault_reason = dma_frcd_fault_reason(data);
1235 type = dma_frcd_type(data);
1237 data = readl(iommu->reg + reg +
1238 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1239 source_id = dma_frcd_source_id(data);
1241 guest_addr = dmar_readq(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN);
1243 guest_addr = dma_frcd_page_addr(guest_addr);
1244 /* clear the fault */
1245 writel(DMA_FRCD_F, iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1248 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1250 dmar_fault_do_one(iommu, type, fault_reason,
1251 source_id, guest_addr);
1253 fault_index++;
1254 if (fault_index >= cap_num_fault_regs(iommu->cap))
1255 fault_index = 0;
1256 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1259 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1261 unlock_exit:
1262 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1263 return IRQ_HANDLED;
1266 int dmar_set_interrupt(struct intel_iommu *iommu)
1268 int irq, ret;
1271 * Check if the fault interrupt is already initialized.
1273 if (iommu->irq)
1274 return 0;
1276 irq = create_irq();
1277 if (!irq) {
1278 pr_err("IOMMU: no free vectors\n");
1279 return -EINVAL;
1282 irq_set_handler_data(irq, iommu);
1283 iommu->irq = irq;
1285 ret = arch_setup_dmar_msi(irq);
1286 if (ret) {
1287 irq_set_handler_data(irq, NULL);
1288 iommu->irq = 0;
1289 destroy_irq(irq);
1290 return ret;
1293 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1294 if (ret)
1295 pr_err("IOMMU: can't request irq\n");
1296 return ret;
1299 int __init enable_drhd_fault_handling(void)
1301 struct dmar_drhd_unit *drhd;
1304 * Enable fault control interrupt.
1306 for_each_drhd_unit(drhd) {
1307 int ret;
1308 struct intel_iommu *iommu = drhd->iommu;
1309 u32 fault_status;
1310 ret = dmar_set_interrupt(iommu);
1312 if (ret) {
1313 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1314 (unsigned long long)drhd->reg_base_addr, ret);
1315 return -1;
1319 * Clear any previous faults.
1321 dmar_fault(iommu->irq, iommu);
1322 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1323 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1326 return 0;
1330 * Re-enable Queued Invalidation interface.
1332 int dmar_reenable_qi(struct intel_iommu *iommu)
1334 if (!ecap_qis(iommu->ecap))
1335 return -ENOENT;
1337 if (!iommu->qi)
1338 return -ENOENT;
1341 * First disable queued invalidation.
1343 dmar_disable_qi(iommu);
1345 * Then enable queued invalidation again. Since there is no pending
1346 * invalidation requests now, it's safe to re-enable queued
1347 * invalidation.
1349 __dmar_enable_qi(iommu);
1351 return 0;
1355 * Check interrupt remapping support in DMAR table description.
1357 int __init dmar_ir_support(void)
1359 struct acpi_table_dmar *dmar;
1360 dmar = (struct acpi_table_dmar *)dmar_tbl;
1361 if (!dmar)
1362 return 0;
1363 return dmar->flags & 0x1;
1365 IOMMU_INIT_POST(detect_intel_iommu);