perf intel-pt: Factor out intel_pt_8b_tsc()
[linux/fpc-iii.git] / drivers / iommu / intel-iommu.c
blob09b8ff0d856a59c017af2b39cd2203e855a456fe
1 /*
2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
22 #define dev_fmt(fmt) pr_fmt(fmt)
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/memory.h>
37 #include <linux/cpu.h>
38 #include <linux/timer.h>
39 #include <linux/io.h>
40 #include <linux/iova.h>
41 #include <linux/iommu.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/syscore_ops.h>
44 #include <linux/tboot.h>
45 #include <linux/dmi.h>
46 #include <linux/pci-ats.h>
47 #include <linux/memblock.h>
48 #include <linux/dma-contiguous.h>
49 #include <linux/dma-direct.h>
50 #include <linux/crash_dump.h>
51 #include <linux/numa.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/cacheflush.h>
54 #include <asm/iommu.h>
56 #include "irq_remapping.h"
57 #include "intel-pasid.h"
59 #define ROOT_SIZE VTD_PAGE_SIZE
60 #define CONTEXT_SIZE VTD_PAGE_SIZE
62 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
63 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
64 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
65 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
67 #define IOAPIC_RANGE_START (0xfee00000)
68 #define IOAPIC_RANGE_END (0xfeefffff)
69 #define IOVA_START_ADDR (0x1000)
71 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
73 #define MAX_AGAW_WIDTH 64
74 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
76 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
77 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
79 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
80 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
81 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
82 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
83 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
85 /* IO virtual address start page frame number */
86 #define IOVA_START_PFN (1)
88 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
90 /* page table handling */
91 #define LEVEL_STRIDE (9)
92 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
95 * This bitmap is used to advertise the page sizes our hardware support
96 * to the IOMMU core, which will then use this information to split
97 * physically contiguous memory regions it is mapping into page sizes
98 * that we support.
100 * Traditionally the IOMMU core just handed us the mappings directly,
101 * after making sure the size is an order of a 4KiB page and that the
102 * mapping has natural alignment.
104 * To retain this behavior, we currently advertise that we support
105 * all page sizes that are an order of 4KiB.
107 * If at some point we'd like to utilize the IOMMU core's new behavior,
108 * we could change this to advertise the real page sizes we support.
110 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
112 static inline int agaw_to_level(int agaw)
114 return agaw + 2;
117 static inline int agaw_to_width(int agaw)
119 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
122 static inline int width_to_agaw(int width)
124 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
127 static inline unsigned int level_to_offset_bits(int level)
129 return (level - 1) * LEVEL_STRIDE;
132 static inline int pfn_level_offset(unsigned long pfn, int level)
134 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
137 static inline unsigned long level_mask(int level)
139 return -1UL << level_to_offset_bits(level);
142 static inline unsigned long level_size(int level)
144 return 1UL << level_to_offset_bits(level);
147 static inline unsigned long align_to_level(unsigned long pfn, int level)
149 return (pfn + level_size(level) - 1) & level_mask(level);
152 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
154 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
157 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
158 are never going to work. */
159 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
161 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
166 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
168 static inline unsigned long page_to_dma_pfn(struct page *pg)
170 return mm_to_dma_pfn(page_to_pfn(pg));
172 static inline unsigned long virt_to_dma_pfn(void *p)
174 return page_to_dma_pfn(virt_to_page(p));
177 /* global iommu list, set NULL for ignored DMAR units */
178 static struct intel_iommu **g_iommus;
180 static void __init check_tylersburg_isoch(void);
181 static int rwbf_quirk;
184 * set to 1 to panic kernel if can't successfully enable VT-d
185 * (used when kernel is launched w/ TXT)
187 static int force_on = 0;
188 int intel_iommu_tboot_noforce;
189 static int no_platform_optin;
191 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
194 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
195 * if marked present.
197 static phys_addr_t root_entry_lctp(struct root_entry *re)
199 if (!(re->lo & 1))
200 return 0;
202 return re->lo & VTD_PAGE_MASK;
206 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
207 * if marked present.
209 static phys_addr_t root_entry_uctp(struct root_entry *re)
211 if (!(re->hi & 1))
212 return 0;
214 return re->hi & VTD_PAGE_MASK;
217 static inline void context_clear_pasid_enable(struct context_entry *context)
219 context->lo &= ~(1ULL << 11);
222 static inline bool context_pasid_enabled(struct context_entry *context)
224 return !!(context->lo & (1ULL << 11));
227 static inline void context_set_copied(struct context_entry *context)
229 context->hi |= (1ull << 3);
232 static inline bool context_copied(struct context_entry *context)
234 return !!(context->hi & (1ULL << 3));
237 static inline bool __context_present(struct context_entry *context)
239 return (context->lo & 1);
242 bool context_present(struct context_entry *context)
244 return context_pasid_enabled(context) ?
245 __context_present(context) :
246 __context_present(context) && !context_copied(context);
249 static inline void context_set_present(struct context_entry *context)
251 context->lo |= 1;
254 static inline void context_set_fault_enable(struct context_entry *context)
256 context->lo &= (((u64)-1) << 2) | 1;
259 static inline void context_set_translation_type(struct context_entry *context,
260 unsigned long value)
262 context->lo &= (((u64)-1) << 4) | 3;
263 context->lo |= (value & 3) << 2;
266 static inline void context_set_address_root(struct context_entry *context,
267 unsigned long value)
269 context->lo &= ~VTD_PAGE_MASK;
270 context->lo |= value & VTD_PAGE_MASK;
273 static inline void context_set_address_width(struct context_entry *context,
274 unsigned long value)
276 context->hi |= value & 7;
279 static inline void context_set_domain_id(struct context_entry *context,
280 unsigned long value)
282 context->hi |= (value & ((1 << 16) - 1)) << 8;
285 static inline int context_domain_id(struct context_entry *c)
287 return((c->hi >> 8) & 0xffff);
290 static inline void context_clear_entry(struct context_entry *context)
292 context->lo = 0;
293 context->hi = 0;
297 * This domain is a statically identity mapping domain.
298 * 1. This domain creats a static 1:1 mapping to all usable memory.
299 * 2. It maps to each iommu if successful.
300 * 3. Each iommu mapps to this domain if successful.
302 static struct dmar_domain *si_domain;
303 static int hw_pass_through = 1;
306 * Domain represents a virtual machine, more than one devices
307 * across iommus may be owned in one domain, e.g. kvm guest.
309 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
311 /* si_domain contains mulitple devices */
312 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
314 #define for_each_domain_iommu(idx, domain) \
315 for (idx = 0; idx < g_num_of_iommus; idx++) \
316 if (domain->iommu_refcnt[idx])
318 struct dmar_rmrr_unit {
319 struct list_head list; /* list of rmrr units */
320 struct acpi_dmar_header *hdr; /* ACPI header */
321 u64 base_address; /* reserved base address*/
322 u64 end_address; /* reserved end address */
323 struct dmar_dev_scope *devices; /* target devices */
324 int devices_cnt; /* target device count */
325 struct iommu_resv_region *resv; /* reserved region handle */
328 struct dmar_atsr_unit {
329 struct list_head list; /* list of ATSR units */
330 struct acpi_dmar_header *hdr; /* ACPI header */
331 struct dmar_dev_scope *devices; /* target devices */
332 int devices_cnt; /* target device count */
333 u8 include_all:1; /* include all ports */
336 static LIST_HEAD(dmar_atsr_units);
337 static LIST_HEAD(dmar_rmrr_units);
339 #define for_each_rmrr_units(rmrr) \
340 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
342 /* bitmap for indexing intel_iommus */
343 static int g_num_of_iommus;
345 static void domain_exit(struct dmar_domain *domain);
346 static void domain_remove_dev_info(struct dmar_domain *domain);
347 static void dmar_remove_one_dev_info(struct device *dev);
348 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
349 static void domain_context_clear(struct intel_iommu *iommu,
350 struct device *dev);
351 static int domain_detach_iommu(struct dmar_domain *domain,
352 struct intel_iommu *iommu);
354 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
355 int dmar_disabled = 0;
356 #else
357 int dmar_disabled = 1;
358 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
360 int intel_iommu_enabled = 0;
361 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
363 static int dmar_map_gfx = 1;
364 static int dmar_forcedac;
365 static int intel_iommu_strict;
366 static int intel_iommu_superpage = 1;
367 static int intel_iommu_sm;
368 static int iommu_identity_mapping;
370 #define IDENTMAP_ALL 1
371 #define IDENTMAP_GFX 2
372 #define IDENTMAP_AZALIA 4
374 #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
375 #define pasid_supported(iommu) (sm_supported(iommu) && \
376 ecap_pasid((iommu)->ecap))
378 int intel_iommu_gfx_mapped;
379 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
381 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
382 static DEFINE_SPINLOCK(device_domain_lock);
383 static LIST_HEAD(device_domain_list);
386 * Iterate over elements in device_domain_list and call the specified
387 * callback @fn against each element.
389 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
390 void *data), void *data)
392 int ret = 0;
393 unsigned long flags;
394 struct device_domain_info *info;
396 spin_lock_irqsave(&device_domain_lock, flags);
397 list_for_each_entry(info, &device_domain_list, global) {
398 ret = fn(info, data);
399 if (ret) {
400 spin_unlock_irqrestore(&device_domain_lock, flags);
401 return ret;
404 spin_unlock_irqrestore(&device_domain_lock, flags);
406 return 0;
409 const struct iommu_ops intel_iommu_ops;
411 static bool translation_pre_enabled(struct intel_iommu *iommu)
413 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
416 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
418 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
421 static void init_translation_status(struct intel_iommu *iommu)
423 u32 gsts;
425 gsts = readl(iommu->reg + DMAR_GSTS_REG);
426 if (gsts & DMA_GSTS_TES)
427 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
430 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
431 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
433 return container_of(dom, struct dmar_domain, domain);
436 static int __init intel_iommu_setup(char *str)
438 if (!str)
439 return -EINVAL;
440 while (*str) {
441 if (!strncmp(str, "on", 2)) {
442 dmar_disabled = 0;
443 pr_info("IOMMU enabled\n");
444 } else if (!strncmp(str, "off", 3)) {
445 dmar_disabled = 1;
446 no_platform_optin = 1;
447 pr_info("IOMMU disabled\n");
448 } else if (!strncmp(str, "igfx_off", 8)) {
449 dmar_map_gfx = 0;
450 pr_info("Disable GFX device mapping\n");
451 } else if (!strncmp(str, "forcedac", 8)) {
452 pr_info("Forcing DAC for PCI devices\n");
453 dmar_forcedac = 1;
454 } else if (!strncmp(str, "strict", 6)) {
455 pr_info("Disable batched IOTLB flush\n");
456 intel_iommu_strict = 1;
457 } else if (!strncmp(str, "sp_off", 6)) {
458 pr_info("Disable supported super page\n");
459 intel_iommu_superpage = 0;
460 } else if (!strncmp(str, "sm_on", 5)) {
461 pr_info("Intel-IOMMU: scalable mode supported\n");
462 intel_iommu_sm = 1;
463 } else if (!strncmp(str, "tboot_noforce", 13)) {
464 printk(KERN_INFO
465 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
466 intel_iommu_tboot_noforce = 1;
469 str += strcspn(str, ",");
470 while (*str == ',')
471 str++;
473 return 0;
475 __setup("intel_iommu=", intel_iommu_setup);
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
480 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
482 struct dmar_domain **domains;
483 int idx = did >> 8;
485 domains = iommu->domains[idx];
486 if (!domains)
487 return NULL;
489 return domains[did & 0xff];
492 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
493 struct dmar_domain *domain)
495 struct dmar_domain **domains;
496 int idx = did >> 8;
498 if (!iommu->domains[idx]) {
499 size_t size = 256 * sizeof(struct dmar_domain *);
500 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
503 domains = iommu->domains[idx];
504 if (WARN_ON(!domains))
505 return;
506 else
507 domains[did & 0xff] = domain;
510 void *alloc_pgtable_page(int node)
512 struct page *page;
513 void *vaddr = NULL;
515 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
516 if (page)
517 vaddr = page_address(page);
518 return vaddr;
521 void free_pgtable_page(void *vaddr)
523 free_page((unsigned long)vaddr);
526 static inline void *alloc_domain_mem(void)
528 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
531 static void free_domain_mem(void *vaddr)
533 kmem_cache_free(iommu_domain_cache, vaddr);
536 static inline void * alloc_devinfo_mem(void)
538 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
541 static inline void free_devinfo_mem(void *vaddr)
543 kmem_cache_free(iommu_devinfo_cache, vaddr);
546 static inline int domain_type_is_vm(struct dmar_domain *domain)
548 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
551 static inline int domain_type_is_si(struct dmar_domain *domain)
553 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
556 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
558 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
559 DOMAIN_FLAG_STATIC_IDENTITY);
562 static inline int domain_pfn_supported(struct dmar_domain *domain,
563 unsigned long pfn)
565 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
567 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
570 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
572 unsigned long sagaw;
573 int agaw = -1;
575 sagaw = cap_sagaw(iommu->cap);
576 for (agaw = width_to_agaw(max_gaw);
577 agaw >= 0; agaw--) {
578 if (test_bit(agaw, &sagaw))
579 break;
582 return agaw;
586 * Calculate max SAGAW for each iommu.
588 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
590 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
594 * calculate agaw for each iommu.
595 * "SAGAW" may be different across iommus, use a default agaw, and
596 * get a supported less agaw for iommus that don't support the default agaw.
598 int iommu_calculate_agaw(struct intel_iommu *iommu)
600 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
603 /* This functionin only returns single iommu in a domain */
604 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
606 int iommu_id;
608 /* si_domain and vm domain should not get here. */
609 BUG_ON(domain_type_is_vm_or_si(domain));
610 for_each_domain_iommu(iommu_id, domain)
611 break;
613 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
614 return NULL;
616 return g_iommus[iommu_id];
619 static void domain_update_iommu_coherency(struct dmar_domain *domain)
621 struct dmar_drhd_unit *drhd;
622 struct intel_iommu *iommu;
623 bool found = false;
624 int i;
626 domain->iommu_coherency = 1;
628 for_each_domain_iommu(i, domain) {
629 found = true;
630 if (!ecap_coherent(g_iommus[i]->ecap)) {
631 domain->iommu_coherency = 0;
632 break;
635 if (found)
636 return;
638 /* No hardware attached; use lowest common denominator */
639 rcu_read_lock();
640 for_each_active_iommu(iommu, drhd) {
641 if (!ecap_coherent(iommu->ecap)) {
642 domain->iommu_coherency = 0;
643 break;
646 rcu_read_unlock();
649 static int domain_update_iommu_snooping(struct intel_iommu *skip)
651 struct dmar_drhd_unit *drhd;
652 struct intel_iommu *iommu;
653 int ret = 1;
655 rcu_read_lock();
656 for_each_active_iommu(iommu, drhd) {
657 if (iommu != skip) {
658 if (!ecap_sc_support(iommu->ecap)) {
659 ret = 0;
660 break;
664 rcu_read_unlock();
666 return ret;
669 static int domain_update_iommu_superpage(struct intel_iommu *skip)
671 struct dmar_drhd_unit *drhd;
672 struct intel_iommu *iommu;
673 int mask = 0xf;
675 if (!intel_iommu_superpage) {
676 return 0;
679 /* set iommu_superpage to the smallest common denominator */
680 rcu_read_lock();
681 for_each_active_iommu(iommu, drhd) {
682 if (iommu != skip) {
683 mask &= cap_super_page_val(iommu->cap);
684 if (!mask)
685 break;
688 rcu_read_unlock();
690 return fls(mask);
693 /* Some capabilities may be different across iommus */
694 static void domain_update_iommu_cap(struct dmar_domain *domain)
696 domain_update_iommu_coherency(domain);
697 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
698 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
701 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
702 u8 devfn, int alloc)
704 struct root_entry *root = &iommu->root_entry[bus];
705 struct context_entry *context;
706 u64 *entry;
708 entry = &root->lo;
709 if (sm_supported(iommu)) {
710 if (devfn >= 0x80) {
711 devfn -= 0x80;
712 entry = &root->hi;
714 devfn *= 2;
716 if (*entry & 1)
717 context = phys_to_virt(*entry & VTD_PAGE_MASK);
718 else {
719 unsigned long phy_addr;
720 if (!alloc)
721 return NULL;
723 context = alloc_pgtable_page(iommu->node);
724 if (!context)
725 return NULL;
727 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
728 phy_addr = virt_to_phys((void *)context);
729 *entry = phy_addr | 1;
730 __iommu_flush_cache(iommu, entry, sizeof(*entry));
732 return &context[devfn];
735 static int iommu_dummy(struct device *dev)
737 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
740 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
742 struct dmar_drhd_unit *drhd = NULL;
743 struct intel_iommu *iommu;
744 struct device *tmp;
745 struct pci_dev *ptmp, *pdev = NULL;
746 u16 segment = 0;
747 int i;
749 if (iommu_dummy(dev))
750 return NULL;
752 if (dev_is_pci(dev)) {
753 struct pci_dev *pf_pdev;
755 pdev = to_pci_dev(dev);
757 #ifdef CONFIG_X86
758 /* VMD child devices currently cannot be handled individually */
759 if (is_vmd(pdev->bus))
760 return NULL;
761 #endif
763 /* VFs aren't listed in scope tables; we need to look up
764 * the PF instead to find the IOMMU. */
765 pf_pdev = pci_physfn(pdev);
766 dev = &pf_pdev->dev;
767 segment = pci_domain_nr(pdev->bus);
768 } else if (has_acpi_companion(dev))
769 dev = &ACPI_COMPANION(dev)->dev;
771 rcu_read_lock();
772 for_each_active_iommu(iommu, drhd) {
773 if (pdev && segment != drhd->segment)
774 continue;
776 for_each_active_dev_scope(drhd->devices,
777 drhd->devices_cnt, i, tmp) {
778 if (tmp == dev) {
779 /* For a VF use its original BDF# not that of the PF
780 * which we used for the IOMMU lookup. Strictly speaking
781 * we could do this for all PCI devices; we only need to
782 * get the BDF# from the scope table for ACPI matches. */
783 if (pdev && pdev->is_virtfn)
784 goto got_pdev;
786 *bus = drhd->devices[i].bus;
787 *devfn = drhd->devices[i].devfn;
788 goto out;
791 if (!pdev || !dev_is_pci(tmp))
792 continue;
794 ptmp = to_pci_dev(tmp);
795 if (ptmp->subordinate &&
796 ptmp->subordinate->number <= pdev->bus->number &&
797 ptmp->subordinate->busn_res.end >= pdev->bus->number)
798 goto got_pdev;
801 if (pdev && drhd->include_all) {
802 got_pdev:
803 *bus = pdev->bus->number;
804 *devfn = pdev->devfn;
805 goto out;
808 iommu = NULL;
809 out:
810 rcu_read_unlock();
812 return iommu;
815 static void domain_flush_cache(struct dmar_domain *domain,
816 void *addr, int size)
818 if (!domain->iommu_coherency)
819 clflush_cache_range(addr, size);
822 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
824 struct context_entry *context;
825 int ret = 0;
826 unsigned long flags;
828 spin_lock_irqsave(&iommu->lock, flags);
829 context = iommu_context_addr(iommu, bus, devfn, 0);
830 if (context)
831 ret = context_present(context);
832 spin_unlock_irqrestore(&iommu->lock, flags);
833 return ret;
836 static void free_context_table(struct intel_iommu *iommu)
838 int i;
839 unsigned long flags;
840 struct context_entry *context;
842 spin_lock_irqsave(&iommu->lock, flags);
843 if (!iommu->root_entry) {
844 goto out;
846 for (i = 0; i < ROOT_ENTRY_NR; i++) {
847 context = iommu_context_addr(iommu, i, 0, 0);
848 if (context)
849 free_pgtable_page(context);
851 if (!sm_supported(iommu))
852 continue;
854 context = iommu_context_addr(iommu, i, 0x80, 0);
855 if (context)
856 free_pgtable_page(context);
859 free_pgtable_page(iommu->root_entry);
860 iommu->root_entry = NULL;
861 out:
862 spin_unlock_irqrestore(&iommu->lock, flags);
865 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
866 unsigned long pfn, int *target_level)
868 struct dma_pte *parent, *pte;
869 int level = agaw_to_level(domain->agaw);
870 int offset;
872 BUG_ON(!domain->pgd);
874 if (!domain_pfn_supported(domain, pfn))
875 /* Address beyond IOMMU's addressing capabilities. */
876 return NULL;
878 parent = domain->pgd;
880 while (1) {
881 void *tmp_page;
883 offset = pfn_level_offset(pfn, level);
884 pte = &parent[offset];
885 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
886 break;
887 if (level == *target_level)
888 break;
890 if (!dma_pte_present(pte)) {
891 uint64_t pteval;
893 tmp_page = alloc_pgtable_page(domain->nid);
895 if (!tmp_page)
896 return NULL;
898 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
899 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
900 if (cmpxchg64(&pte->val, 0ULL, pteval))
901 /* Someone else set it while we were thinking; use theirs. */
902 free_pgtable_page(tmp_page);
903 else
904 domain_flush_cache(domain, pte, sizeof(*pte));
906 if (level == 1)
907 break;
909 parent = phys_to_virt(dma_pte_addr(pte));
910 level--;
913 if (!*target_level)
914 *target_level = level;
916 return pte;
920 /* return address's pte at specific level */
921 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
922 unsigned long pfn,
923 int level, int *large_page)
925 struct dma_pte *parent, *pte;
926 int total = agaw_to_level(domain->agaw);
927 int offset;
929 parent = domain->pgd;
930 while (level <= total) {
931 offset = pfn_level_offset(pfn, total);
932 pte = &parent[offset];
933 if (level == total)
934 return pte;
936 if (!dma_pte_present(pte)) {
937 *large_page = total;
938 break;
941 if (dma_pte_superpage(pte)) {
942 *large_page = total;
943 return pte;
946 parent = phys_to_virt(dma_pte_addr(pte));
947 total--;
949 return NULL;
952 /* clear last level pte, a tlb flush should be followed */
953 static void dma_pte_clear_range(struct dmar_domain *domain,
954 unsigned long start_pfn,
955 unsigned long last_pfn)
957 unsigned int large_page;
958 struct dma_pte *first_pte, *pte;
960 BUG_ON(!domain_pfn_supported(domain, start_pfn));
961 BUG_ON(!domain_pfn_supported(domain, last_pfn));
962 BUG_ON(start_pfn > last_pfn);
964 /* we don't need lock here; nobody else touches the iova range */
965 do {
966 large_page = 1;
967 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
968 if (!pte) {
969 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
970 continue;
972 do {
973 dma_clear_pte(pte);
974 start_pfn += lvl_to_nr_pages(large_page);
975 pte++;
976 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
978 domain_flush_cache(domain, first_pte,
979 (void *)pte - (void *)first_pte);
981 } while (start_pfn && start_pfn <= last_pfn);
984 static void dma_pte_free_level(struct dmar_domain *domain, int level,
985 int retain_level, struct dma_pte *pte,
986 unsigned long pfn, unsigned long start_pfn,
987 unsigned long last_pfn)
989 pfn = max(start_pfn, pfn);
990 pte = &pte[pfn_level_offset(pfn, level)];
992 do {
993 unsigned long level_pfn;
994 struct dma_pte *level_pte;
996 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
997 goto next;
999 level_pfn = pfn & level_mask(level);
1000 level_pte = phys_to_virt(dma_pte_addr(pte));
1002 if (level > 2) {
1003 dma_pte_free_level(domain, level - 1, retain_level,
1004 level_pte, level_pfn, start_pfn,
1005 last_pfn);
1009 * Free the page table if we're below the level we want to
1010 * retain and the range covers the entire table.
1012 if (level < retain_level && !(start_pfn > level_pfn ||
1013 last_pfn < level_pfn + level_size(level) - 1)) {
1014 dma_clear_pte(pte);
1015 domain_flush_cache(domain, pte, sizeof(*pte));
1016 free_pgtable_page(level_pte);
1018 next:
1019 pfn += level_size(level);
1020 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1024 * clear last level (leaf) ptes and free page table pages below the
1025 * level we wish to keep intact.
1027 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1028 unsigned long start_pfn,
1029 unsigned long last_pfn,
1030 int retain_level)
1032 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1033 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1034 BUG_ON(start_pfn > last_pfn);
1036 dma_pte_clear_range(domain, start_pfn, last_pfn);
1038 /* We don't need lock here; nobody else touches the iova range */
1039 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1040 domain->pgd, 0, start_pfn, last_pfn);
1042 /* free pgd */
1043 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1044 free_pgtable_page(domain->pgd);
1045 domain->pgd = NULL;
1049 /* When a page at a given level is being unlinked from its parent, we don't
1050 need to *modify* it at all. All we need to do is make a list of all the
1051 pages which can be freed just as soon as we've flushed the IOTLB and we
1052 know the hardware page-walk will no longer touch them.
1053 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1054 be freed. */
1055 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1056 int level, struct dma_pte *pte,
1057 struct page *freelist)
1059 struct page *pg;
1061 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1062 pg->freelist = freelist;
1063 freelist = pg;
1065 if (level == 1)
1066 return freelist;
1068 pte = page_address(pg);
1069 do {
1070 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1071 freelist = dma_pte_list_pagetables(domain, level - 1,
1072 pte, freelist);
1073 pte++;
1074 } while (!first_pte_in_page(pte));
1076 return freelist;
1079 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1080 struct dma_pte *pte, unsigned long pfn,
1081 unsigned long start_pfn,
1082 unsigned long last_pfn,
1083 struct page *freelist)
1085 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1087 pfn = max(start_pfn, pfn);
1088 pte = &pte[pfn_level_offset(pfn, level)];
1090 do {
1091 unsigned long level_pfn;
1093 if (!dma_pte_present(pte))
1094 goto next;
1096 level_pfn = pfn & level_mask(level);
1098 /* If range covers entire pagetable, free it */
1099 if (start_pfn <= level_pfn &&
1100 last_pfn >= level_pfn + level_size(level) - 1) {
1101 /* These suborbinate page tables are going away entirely. Don't
1102 bother to clear them; we're just going to *free* them. */
1103 if (level > 1 && !dma_pte_superpage(pte))
1104 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1106 dma_clear_pte(pte);
1107 if (!first_pte)
1108 first_pte = pte;
1109 last_pte = pte;
1110 } else if (level > 1) {
1111 /* Recurse down into a level that isn't *entirely* obsolete */
1112 freelist = dma_pte_clear_level(domain, level - 1,
1113 phys_to_virt(dma_pte_addr(pte)),
1114 level_pfn, start_pfn, last_pfn,
1115 freelist);
1117 next:
1118 pfn += level_size(level);
1119 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1121 if (first_pte)
1122 domain_flush_cache(domain, first_pte,
1123 (void *)++last_pte - (void *)first_pte);
1125 return freelist;
1128 /* We can't just free the pages because the IOMMU may still be walking
1129 the page tables, and may have cached the intermediate levels. The
1130 pages can only be freed after the IOTLB flush has been done. */
1131 static struct page *domain_unmap(struct dmar_domain *domain,
1132 unsigned long start_pfn,
1133 unsigned long last_pfn)
1135 struct page *freelist;
1137 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1138 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1139 BUG_ON(start_pfn > last_pfn);
1141 /* we don't need lock here; nobody else touches the iova range */
1142 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1143 domain->pgd, 0, start_pfn, last_pfn, NULL);
1145 /* free pgd */
1146 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1147 struct page *pgd_page = virt_to_page(domain->pgd);
1148 pgd_page->freelist = freelist;
1149 freelist = pgd_page;
1151 domain->pgd = NULL;
1154 return freelist;
1157 static void dma_free_pagelist(struct page *freelist)
1159 struct page *pg;
1161 while ((pg = freelist)) {
1162 freelist = pg->freelist;
1163 free_pgtable_page(page_address(pg));
1167 static void iova_entry_free(unsigned long data)
1169 struct page *freelist = (struct page *)data;
1171 dma_free_pagelist(freelist);
1174 /* iommu handling */
1175 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1177 struct root_entry *root;
1178 unsigned long flags;
1180 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1181 if (!root) {
1182 pr_err("Allocating root entry for %s failed\n",
1183 iommu->name);
1184 return -ENOMEM;
1187 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1189 spin_lock_irqsave(&iommu->lock, flags);
1190 iommu->root_entry = root;
1191 spin_unlock_irqrestore(&iommu->lock, flags);
1193 return 0;
1196 static void iommu_set_root_entry(struct intel_iommu *iommu)
1198 u64 addr;
1199 u32 sts;
1200 unsigned long flag;
1202 addr = virt_to_phys(iommu->root_entry);
1203 if (sm_supported(iommu))
1204 addr |= DMA_RTADDR_SMT;
1206 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1207 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1209 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1213 readl, (sts & DMA_GSTS_RTPS), sts);
1215 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1218 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1220 u32 val;
1221 unsigned long flag;
1223 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1224 return;
1226 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1227 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1229 /* Make sure hardware complete it */
1230 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1231 readl, (!(val & DMA_GSTS_WBFS)), val);
1233 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1236 /* return value determine if we need a write buffer flush */
1237 static void __iommu_flush_context(struct intel_iommu *iommu,
1238 u16 did, u16 source_id, u8 function_mask,
1239 u64 type)
1241 u64 val = 0;
1242 unsigned long flag;
1244 switch (type) {
1245 case DMA_CCMD_GLOBAL_INVL:
1246 val = DMA_CCMD_GLOBAL_INVL;
1247 break;
1248 case DMA_CCMD_DOMAIN_INVL:
1249 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1250 break;
1251 case DMA_CCMD_DEVICE_INVL:
1252 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1253 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1254 break;
1255 default:
1256 BUG();
1258 val |= DMA_CCMD_ICC;
1260 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1261 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1263 /* Make sure hardware complete it */
1264 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1265 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1267 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1270 /* return value determine if we need a write buffer flush */
1271 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1272 u64 addr, unsigned int size_order, u64 type)
1274 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1275 u64 val = 0, val_iva = 0;
1276 unsigned long flag;
1278 switch (type) {
1279 case DMA_TLB_GLOBAL_FLUSH:
1280 /* global flush doesn't need set IVA_REG */
1281 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1282 break;
1283 case DMA_TLB_DSI_FLUSH:
1284 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1285 break;
1286 case DMA_TLB_PSI_FLUSH:
1287 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1288 /* IH bit is passed in as part of address */
1289 val_iva = size_order | addr;
1290 break;
1291 default:
1292 BUG();
1294 /* Note: set drain read/write */
1295 #if 0
1297 * This is probably to be super secure.. Looks like we can
1298 * ignore it without any impact.
1300 if (cap_read_drain(iommu->cap))
1301 val |= DMA_TLB_READ_DRAIN;
1302 #endif
1303 if (cap_write_drain(iommu->cap))
1304 val |= DMA_TLB_WRITE_DRAIN;
1306 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1307 /* Note: Only uses first TLB reg currently */
1308 if (val_iva)
1309 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1310 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1312 /* Make sure hardware complete it */
1313 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1314 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1316 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1318 /* check IOTLB invalidation granularity */
1319 if (DMA_TLB_IAIG(val) == 0)
1320 pr_err("Flush IOTLB failed\n");
1321 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1322 pr_debug("TLB flush request %Lx, actual %Lx\n",
1323 (unsigned long long)DMA_TLB_IIRG(type),
1324 (unsigned long long)DMA_TLB_IAIG(val));
1327 static struct device_domain_info *
1328 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1329 u8 bus, u8 devfn)
1331 struct device_domain_info *info;
1333 assert_spin_locked(&device_domain_lock);
1335 if (!iommu->qi)
1336 return NULL;
1338 list_for_each_entry(info, &domain->devices, link)
1339 if (info->iommu == iommu && info->bus == bus &&
1340 info->devfn == devfn) {
1341 if (info->ats_supported && info->dev)
1342 return info;
1343 break;
1346 return NULL;
1349 static void domain_update_iotlb(struct dmar_domain *domain)
1351 struct device_domain_info *info;
1352 bool has_iotlb_device = false;
1354 assert_spin_locked(&device_domain_lock);
1356 list_for_each_entry(info, &domain->devices, link) {
1357 struct pci_dev *pdev;
1359 if (!info->dev || !dev_is_pci(info->dev))
1360 continue;
1362 pdev = to_pci_dev(info->dev);
1363 if (pdev->ats_enabled) {
1364 has_iotlb_device = true;
1365 break;
1369 domain->has_iotlb_device = has_iotlb_device;
1372 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1374 struct pci_dev *pdev;
1376 assert_spin_locked(&device_domain_lock);
1378 if (!info || !dev_is_pci(info->dev))
1379 return;
1381 pdev = to_pci_dev(info->dev);
1382 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1383 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1384 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1385 * reserved, which should be set to 0.
1387 if (!ecap_dit(info->iommu->ecap))
1388 info->pfsid = 0;
1389 else {
1390 struct pci_dev *pf_pdev;
1392 /* pdev will be returned if device is not a vf */
1393 pf_pdev = pci_physfn(pdev);
1394 info->pfsid = pci_dev_id(pf_pdev);
1397 #ifdef CONFIG_INTEL_IOMMU_SVM
1398 /* The PCIe spec, in its wisdom, declares that the behaviour of
1399 the device if you enable PASID support after ATS support is
1400 undefined. So always enable PASID support on devices which
1401 have it, even if we can't yet know if we're ever going to
1402 use it. */
1403 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1404 info->pasid_enabled = 1;
1406 if (info->pri_supported &&
1407 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1408 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1409 info->pri_enabled = 1;
1410 #endif
1411 if (!pdev->untrusted && info->ats_supported &&
1412 pci_ats_page_aligned(pdev) &&
1413 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1414 info->ats_enabled = 1;
1415 domain_update_iotlb(info->domain);
1416 info->ats_qdep = pci_ats_queue_depth(pdev);
1420 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1422 struct pci_dev *pdev;
1424 assert_spin_locked(&device_domain_lock);
1426 if (!dev_is_pci(info->dev))
1427 return;
1429 pdev = to_pci_dev(info->dev);
1431 if (info->ats_enabled) {
1432 pci_disable_ats(pdev);
1433 info->ats_enabled = 0;
1434 domain_update_iotlb(info->domain);
1436 #ifdef CONFIG_INTEL_IOMMU_SVM
1437 if (info->pri_enabled) {
1438 pci_disable_pri(pdev);
1439 info->pri_enabled = 0;
1441 if (info->pasid_enabled) {
1442 pci_disable_pasid(pdev);
1443 info->pasid_enabled = 0;
1445 #endif
1448 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1449 u64 addr, unsigned mask)
1451 u16 sid, qdep;
1452 unsigned long flags;
1453 struct device_domain_info *info;
1455 if (!domain->has_iotlb_device)
1456 return;
1458 spin_lock_irqsave(&device_domain_lock, flags);
1459 list_for_each_entry(info, &domain->devices, link) {
1460 if (!info->ats_enabled)
1461 continue;
1463 sid = info->bus << 8 | info->devfn;
1464 qdep = info->ats_qdep;
1465 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1466 qdep, addr, mask);
1468 spin_unlock_irqrestore(&device_domain_lock, flags);
1471 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1472 struct dmar_domain *domain,
1473 unsigned long pfn, unsigned int pages,
1474 int ih, int map)
1476 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1477 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1478 u16 did = domain->iommu_did[iommu->seq_id];
1480 BUG_ON(pages == 0);
1482 if (ih)
1483 ih = 1 << 6;
1485 * Fallback to domain selective flush if no PSI support or the size is
1486 * too big.
1487 * PSI requires page size to be 2 ^ x, and the base address is naturally
1488 * aligned to the size
1490 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1491 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1492 DMA_TLB_DSI_FLUSH);
1493 else
1494 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1495 DMA_TLB_PSI_FLUSH);
1498 * In caching mode, changes of pages from non-present to present require
1499 * flush. However, device IOTLB doesn't need to be flushed in this case.
1501 if (!cap_caching_mode(iommu->cap) || !map)
1502 iommu_flush_dev_iotlb(domain, addr, mask);
1505 /* Notification for newly created mappings */
1506 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1507 struct dmar_domain *domain,
1508 unsigned long pfn, unsigned int pages)
1510 /* It's a non-present to present mapping. Only flush if caching mode */
1511 if (cap_caching_mode(iommu->cap))
1512 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1513 else
1514 iommu_flush_write_buffer(iommu);
1517 static void iommu_flush_iova(struct iova_domain *iovad)
1519 struct dmar_domain *domain;
1520 int idx;
1522 domain = container_of(iovad, struct dmar_domain, iovad);
1524 for_each_domain_iommu(idx, domain) {
1525 struct intel_iommu *iommu = g_iommus[idx];
1526 u16 did = domain->iommu_did[iommu->seq_id];
1528 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1530 if (!cap_caching_mode(iommu->cap))
1531 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1532 0, MAX_AGAW_PFN_WIDTH);
1536 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1538 u32 pmen;
1539 unsigned long flags;
1541 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1542 return;
1544 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1545 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1546 pmen &= ~DMA_PMEN_EPM;
1547 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1549 /* wait for the protected region status bit to clear */
1550 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1551 readl, !(pmen & DMA_PMEN_PRS), pmen);
1553 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1556 static void iommu_enable_translation(struct intel_iommu *iommu)
1558 u32 sts;
1559 unsigned long flags;
1561 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1562 iommu->gcmd |= DMA_GCMD_TE;
1563 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1565 /* Make sure hardware complete it */
1566 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1567 readl, (sts & DMA_GSTS_TES), sts);
1569 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1572 static void iommu_disable_translation(struct intel_iommu *iommu)
1574 u32 sts;
1575 unsigned long flag;
1577 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1578 iommu->gcmd &= ~DMA_GCMD_TE;
1579 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1581 /* Make sure hardware complete it */
1582 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1583 readl, (!(sts & DMA_GSTS_TES)), sts);
1585 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1589 static int iommu_init_domains(struct intel_iommu *iommu)
1591 u32 ndomains, nlongs;
1592 size_t size;
1594 ndomains = cap_ndoms(iommu->cap);
1595 pr_debug("%s: Number of Domains supported <%d>\n",
1596 iommu->name, ndomains);
1597 nlongs = BITS_TO_LONGS(ndomains);
1599 spin_lock_init(&iommu->lock);
1601 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1602 if (!iommu->domain_ids) {
1603 pr_err("%s: Allocating domain id array failed\n",
1604 iommu->name);
1605 return -ENOMEM;
1608 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1609 iommu->domains = kzalloc(size, GFP_KERNEL);
1611 if (iommu->domains) {
1612 size = 256 * sizeof(struct dmar_domain *);
1613 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1616 if (!iommu->domains || !iommu->domains[0]) {
1617 pr_err("%s: Allocating domain array failed\n",
1618 iommu->name);
1619 kfree(iommu->domain_ids);
1620 kfree(iommu->domains);
1621 iommu->domain_ids = NULL;
1622 iommu->domains = NULL;
1623 return -ENOMEM;
1629 * If Caching mode is set, then invalid translations are tagged
1630 * with domain-id 0, hence we need to pre-allocate it. We also
1631 * use domain-id 0 as a marker for non-allocated domain-id, so
1632 * make sure it is not used for a real domain.
1634 set_bit(0, iommu->domain_ids);
1637 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1638 * entry for first-level or pass-through translation modes should
1639 * be programmed with a domain id different from those used for
1640 * second-level or nested translation. We reserve a domain id for
1641 * this purpose.
1643 if (sm_supported(iommu))
1644 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1646 return 0;
1649 static void disable_dmar_iommu(struct intel_iommu *iommu)
1651 struct device_domain_info *info, *tmp;
1652 unsigned long flags;
1654 if (!iommu->domains || !iommu->domain_ids)
1655 return;
1657 again:
1658 spin_lock_irqsave(&device_domain_lock, flags);
1659 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1660 struct dmar_domain *domain;
1662 if (info->iommu != iommu)
1663 continue;
1665 if (!info->dev || !info->domain)
1666 continue;
1668 domain = info->domain;
1670 __dmar_remove_one_dev_info(info);
1672 if (!domain_type_is_vm_or_si(domain)) {
1674 * The domain_exit() function can't be called under
1675 * device_domain_lock, as it takes this lock itself.
1676 * So release the lock here and re-run the loop
1677 * afterwards.
1679 spin_unlock_irqrestore(&device_domain_lock, flags);
1680 domain_exit(domain);
1681 goto again;
1684 spin_unlock_irqrestore(&device_domain_lock, flags);
1686 if (iommu->gcmd & DMA_GCMD_TE)
1687 iommu_disable_translation(iommu);
1690 static void free_dmar_iommu(struct intel_iommu *iommu)
1692 if ((iommu->domains) && (iommu->domain_ids)) {
1693 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1694 int i;
1696 for (i = 0; i < elems; i++)
1697 kfree(iommu->domains[i]);
1698 kfree(iommu->domains);
1699 kfree(iommu->domain_ids);
1700 iommu->domains = NULL;
1701 iommu->domain_ids = NULL;
1704 g_iommus[iommu->seq_id] = NULL;
1706 /* free context mapping */
1707 free_context_table(iommu);
1709 #ifdef CONFIG_INTEL_IOMMU_SVM
1710 if (pasid_supported(iommu)) {
1711 if (ecap_prs(iommu->ecap))
1712 intel_svm_finish_prq(iommu);
1714 #endif
1717 static struct dmar_domain *alloc_domain(int flags)
1719 struct dmar_domain *domain;
1721 domain = alloc_domain_mem();
1722 if (!domain)
1723 return NULL;
1725 memset(domain, 0, sizeof(*domain));
1726 domain->nid = NUMA_NO_NODE;
1727 domain->flags = flags;
1728 domain->has_iotlb_device = false;
1729 INIT_LIST_HEAD(&domain->devices);
1731 return domain;
1734 /* Must be called with iommu->lock */
1735 static int domain_attach_iommu(struct dmar_domain *domain,
1736 struct intel_iommu *iommu)
1738 unsigned long ndomains;
1739 int num;
1741 assert_spin_locked(&device_domain_lock);
1742 assert_spin_locked(&iommu->lock);
1744 domain->iommu_refcnt[iommu->seq_id] += 1;
1745 domain->iommu_count += 1;
1746 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1747 ndomains = cap_ndoms(iommu->cap);
1748 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1750 if (num >= ndomains) {
1751 pr_err("%s: No free domain ids\n", iommu->name);
1752 domain->iommu_refcnt[iommu->seq_id] -= 1;
1753 domain->iommu_count -= 1;
1754 return -ENOSPC;
1757 set_bit(num, iommu->domain_ids);
1758 set_iommu_domain(iommu, num, domain);
1760 domain->iommu_did[iommu->seq_id] = num;
1761 domain->nid = iommu->node;
1763 domain_update_iommu_cap(domain);
1766 return 0;
1769 static int domain_detach_iommu(struct dmar_domain *domain,
1770 struct intel_iommu *iommu)
1772 int num, count;
1774 assert_spin_locked(&device_domain_lock);
1775 assert_spin_locked(&iommu->lock);
1777 domain->iommu_refcnt[iommu->seq_id] -= 1;
1778 count = --domain->iommu_count;
1779 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1780 num = domain->iommu_did[iommu->seq_id];
1781 clear_bit(num, iommu->domain_ids);
1782 set_iommu_domain(iommu, num, NULL);
1784 domain_update_iommu_cap(domain);
1785 domain->iommu_did[iommu->seq_id] = 0;
1788 return count;
1791 static struct iova_domain reserved_iova_list;
1792 static struct lock_class_key reserved_rbtree_key;
1794 static int dmar_init_reserved_ranges(void)
1796 struct pci_dev *pdev = NULL;
1797 struct iova *iova;
1798 int i;
1800 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1802 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1803 &reserved_rbtree_key);
1805 /* IOAPIC ranges shouldn't be accessed by DMA */
1806 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1807 IOVA_PFN(IOAPIC_RANGE_END));
1808 if (!iova) {
1809 pr_err("Reserve IOAPIC range failed\n");
1810 return -ENODEV;
1813 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1814 for_each_pci_dev(pdev) {
1815 struct resource *r;
1817 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1818 r = &pdev->resource[i];
1819 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1820 continue;
1821 iova = reserve_iova(&reserved_iova_list,
1822 IOVA_PFN(r->start),
1823 IOVA_PFN(r->end));
1824 if (!iova) {
1825 pci_err(pdev, "Reserve iova for %pR failed\n", r);
1826 return -ENODEV;
1830 return 0;
1833 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1835 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1838 static inline int guestwidth_to_adjustwidth(int gaw)
1840 int agaw;
1841 int r = (gaw - 12) % 9;
1843 if (r == 0)
1844 agaw = gaw;
1845 else
1846 agaw = gaw + 9 - r;
1847 if (agaw > 64)
1848 agaw = 64;
1849 return agaw;
1852 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1853 int guest_width)
1855 int adjust_width, agaw;
1856 unsigned long sagaw;
1857 int err;
1859 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1861 err = init_iova_flush_queue(&domain->iovad,
1862 iommu_flush_iova, iova_entry_free);
1863 if (err)
1864 return err;
1866 domain_reserve_special_ranges(domain);
1868 /* calculate AGAW */
1869 if (guest_width > cap_mgaw(iommu->cap))
1870 guest_width = cap_mgaw(iommu->cap);
1871 domain->gaw = guest_width;
1872 adjust_width = guestwidth_to_adjustwidth(guest_width);
1873 agaw = width_to_agaw(adjust_width);
1874 sagaw = cap_sagaw(iommu->cap);
1875 if (!test_bit(agaw, &sagaw)) {
1876 /* hardware doesn't support it, choose a bigger one */
1877 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1878 agaw = find_next_bit(&sagaw, 5, agaw);
1879 if (agaw >= 5)
1880 return -ENODEV;
1882 domain->agaw = agaw;
1884 if (ecap_coherent(iommu->ecap))
1885 domain->iommu_coherency = 1;
1886 else
1887 domain->iommu_coherency = 0;
1889 if (ecap_sc_support(iommu->ecap))
1890 domain->iommu_snooping = 1;
1891 else
1892 domain->iommu_snooping = 0;
1894 if (intel_iommu_superpage)
1895 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1896 else
1897 domain->iommu_superpage = 0;
1899 domain->nid = iommu->node;
1901 /* always allocate the top pgd */
1902 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1903 if (!domain->pgd)
1904 return -ENOMEM;
1905 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1906 return 0;
1909 static void domain_exit(struct dmar_domain *domain)
1911 struct page *freelist;
1913 /* Remove associated devices and clear attached or cached domains */
1914 rcu_read_lock();
1915 domain_remove_dev_info(domain);
1916 rcu_read_unlock();
1918 /* destroy iovas */
1919 put_iova_domain(&domain->iovad);
1921 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1923 dma_free_pagelist(freelist);
1925 free_domain_mem(domain);
1929 * Get the PASID directory size for scalable mode context entry.
1930 * Value of X in the PDTS field of a scalable mode context entry
1931 * indicates PASID directory with 2^(X + 7) entries.
1933 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1935 int pds, max_pde;
1937 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1938 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1939 if (pds < 7)
1940 return 0;
1942 return pds - 7;
1946 * Set the RID_PASID field of a scalable mode context entry. The
1947 * IOMMU hardware will use the PASID value set in this field for
1948 * DMA translations of DMA requests without PASID.
1950 static inline void
1951 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1953 context->hi |= pasid & ((1 << 20) - 1);
1954 context->hi |= (1 << 20);
1958 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1959 * entry.
1961 static inline void context_set_sm_dte(struct context_entry *context)
1963 context->lo |= (1 << 2);
1967 * Set the PRE(Page Request Enable) field of a scalable mode context
1968 * entry.
1970 static inline void context_set_sm_pre(struct context_entry *context)
1972 context->lo |= (1 << 4);
1975 /* Convert value to context PASID directory size field coding. */
1976 #define context_pdts(pds) (((pds) & 0x7) << 9)
1978 static int domain_context_mapping_one(struct dmar_domain *domain,
1979 struct intel_iommu *iommu,
1980 struct pasid_table *table,
1981 u8 bus, u8 devfn)
1983 u16 did = domain->iommu_did[iommu->seq_id];
1984 int translation = CONTEXT_TT_MULTI_LEVEL;
1985 struct device_domain_info *info = NULL;
1986 struct context_entry *context;
1987 unsigned long flags;
1988 int ret;
1990 WARN_ON(did == 0);
1992 if (hw_pass_through && domain_type_is_si(domain))
1993 translation = CONTEXT_TT_PASS_THROUGH;
1995 pr_debug("Set context mapping for %02x:%02x.%d\n",
1996 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1998 BUG_ON(!domain->pgd);
2000 spin_lock_irqsave(&device_domain_lock, flags);
2001 spin_lock(&iommu->lock);
2003 ret = -ENOMEM;
2004 context = iommu_context_addr(iommu, bus, devfn, 1);
2005 if (!context)
2006 goto out_unlock;
2008 ret = 0;
2009 if (context_present(context))
2010 goto out_unlock;
2013 * For kdump cases, old valid entries may be cached due to the
2014 * in-flight DMA and copied pgtable, but there is no unmapping
2015 * behaviour for them, thus we need an explicit cache flush for
2016 * the newly-mapped device. For kdump, at this point, the device
2017 * is supposed to finish reset at its driver probe stage, so no
2018 * in-flight DMA will exist, and we don't need to worry anymore
2019 * hereafter.
2021 if (context_copied(context)) {
2022 u16 did_old = context_domain_id(context);
2024 if (did_old < cap_ndoms(iommu->cap)) {
2025 iommu->flush.flush_context(iommu, did_old,
2026 (((u16)bus) << 8) | devfn,
2027 DMA_CCMD_MASK_NOBIT,
2028 DMA_CCMD_DEVICE_INVL);
2029 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2030 DMA_TLB_DSI_FLUSH);
2034 context_clear_entry(context);
2036 if (sm_supported(iommu)) {
2037 unsigned long pds;
2039 WARN_ON(!table);
2041 /* Setup the PASID DIR pointer: */
2042 pds = context_get_sm_pds(table);
2043 context->lo = (u64)virt_to_phys(table->table) |
2044 context_pdts(pds);
2046 /* Setup the RID_PASID field: */
2047 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2050 * Setup the Device-TLB enable bit and Page request
2051 * Enable bit:
2053 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2054 if (info && info->ats_supported)
2055 context_set_sm_dte(context);
2056 if (info && info->pri_supported)
2057 context_set_sm_pre(context);
2058 } else {
2059 struct dma_pte *pgd = domain->pgd;
2060 int agaw;
2062 context_set_domain_id(context, did);
2064 if (translation != CONTEXT_TT_PASS_THROUGH) {
2066 * Skip top levels of page tables for iommu which has
2067 * less agaw than default. Unnecessary for PT mode.
2069 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2070 ret = -ENOMEM;
2071 pgd = phys_to_virt(dma_pte_addr(pgd));
2072 if (!dma_pte_present(pgd))
2073 goto out_unlock;
2076 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2077 if (info && info->ats_supported)
2078 translation = CONTEXT_TT_DEV_IOTLB;
2079 else
2080 translation = CONTEXT_TT_MULTI_LEVEL;
2082 context_set_address_root(context, virt_to_phys(pgd));
2083 context_set_address_width(context, agaw);
2084 } else {
2086 * In pass through mode, AW must be programmed to
2087 * indicate the largest AGAW value supported by
2088 * hardware. And ASR is ignored by hardware.
2090 context_set_address_width(context, iommu->msagaw);
2093 context_set_translation_type(context, translation);
2096 context_set_fault_enable(context);
2097 context_set_present(context);
2098 domain_flush_cache(domain, context, sizeof(*context));
2101 * It's a non-present to present mapping. If hardware doesn't cache
2102 * non-present entry we only need to flush the write-buffer. If the
2103 * _does_ cache non-present entries, then it does so in the special
2104 * domain #0, which we have to flush:
2106 if (cap_caching_mode(iommu->cap)) {
2107 iommu->flush.flush_context(iommu, 0,
2108 (((u16)bus) << 8) | devfn,
2109 DMA_CCMD_MASK_NOBIT,
2110 DMA_CCMD_DEVICE_INVL);
2111 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2112 } else {
2113 iommu_flush_write_buffer(iommu);
2115 iommu_enable_dev_iotlb(info);
2117 ret = 0;
2119 out_unlock:
2120 spin_unlock(&iommu->lock);
2121 spin_unlock_irqrestore(&device_domain_lock, flags);
2123 return ret;
2126 struct domain_context_mapping_data {
2127 struct dmar_domain *domain;
2128 struct intel_iommu *iommu;
2129 struct pasid_table *table;
2132 static int domain_context_mapping_cb(struct pci_dev *pdev,
2133 u16 alias, void *opaque)
2135 struct domain_context_mapping_data *data = opaque;
2137 return domain_context_mapping_one(data->domain, data->iommu,
2138 data->table, PCI_BUS_NUM(alias),
2139 alias & 0xff);
2142 static int
2143 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2145 struct domain_context_mapping_data data;
2146 struct pasid_table *table;
2147 struct intel_iommu *iommu;
2148 u8 bus, devfn;
2150 iommu = device_to_iommu(dev, &bus, &devfn);
2151 if (!iommu)
2152 return -ENODEV;
2154 table = intel_pasid_get_table(dev);
2156 if (!dev_is_pci(dev))
2157 return domain_context_mapping_one(domain, iommu, table,
2158 bus, devfn);
2160 data.domain = domain;
2161 data.iommu = iommu;
2162 data.table = table;
2164 return pci_for_each_dma_alias(to_pci_dev(dev),
2165 &domain_context_mapping_cb, &data);
2168 static int domain_context_mapped_cb(struct pci_dev *pdev,
2169 u16 alias, void *opaque)
2171 struct intel_iommu *iommu = opaque;
2173 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2176 static int domain_context_mapped(struct device *dev)
2178 struct intel_iommu *iommu;
2179 u8 bus, devfn;
2181 iommu = device_to_iommu(dev, &bus, &devfn);
2182 if (!iommu)
2183 return -ENODEV;
2185 if (!dev_is_pci(dev))
2186 return device_context_mapped(iommu, bus, devfn);
2188 return !pci_for_each_dma_alias(to_pci_dev(dev),
2189 domain_context_mapped_cb, iommu);
2192 /* Returns a number of VTD pages, but aligned to MM page size */
2193 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2194 size_t size)
2196 host_addr &= ~PAGE_MASK;
2197 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2200 /* Return largest possible superpage level for a given mapping */
2201 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2202 unsigned long iov_pfn,
2203 unsigned long phy_pfn,
2204 unsigned long pages)
2206 int support, level = 1;
2207 unsigned long pfnmerge;
2209 support = domain->iommu_superpage;
2211 /* To use a large page, the virtual *and* physical addresses
2212 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2213 of them will mean we have to use smaller pages. So just
2214 merge them and check both at once. */
2215 pfnmerge = iov_pfn | phy_pfn;
2217 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2218 pages >>= VTD_STRIDE_SHIFT;
2219 if (!pages)
2220 break;
2221 pfnmerge >>= VTD_STRIDE_SHIFT;
2222 level++;
2223 support--;
2225 return level;
2228 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2229 struct scatterlist *sg, unsigned long phys_pfn,
2230 unsigned long nr_pages, int prot)
2232 struct dma_pte *first_pte = NULL, *pte = NULL;
2233 phys_addr_t uninitialized_var(pteval);
2234 unsigned long sg_res = 0;
2235 unsigned int largepage_lvl = 0;
2236 unsigned long lvl_pages = 0;
2238 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2240 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2241 return -EINVAL;
2243 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2245 if (!sg) {
2246 sg_res = nr_pages;
2247 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2250 while (nr_pages > 0) {
2251 uint64_t tmp;
2253 if (!sg_res) {
2254 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2256 sg_res = aligned_nrpages(sg->offset, sg->length);
2257 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2258 sg->dma_length = sg->length;
2259 pteval = (sg_phys(sg) - pgoff) | prot;
2260 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2263 if (!pte) {
2264 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2266 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2267 if (!pte)
2268 return -ENOMEM;
2269 /* It is large page*/
2270 if (largepage_lvl > 1) {
2271 unsigned long nr_superpages, end_pfn;
2273 pteval |= DMA_PTE_LARGE_PAGE;
2274 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2276 nr_superpages = sg_res / lvl_pages;
2277 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2280 * Ensure that old small page tables are
2281 * removed to make room for superpage(s).
2282 * We're adding new large pages, so make sure
2283 * we don't remove their parent tables.
2285 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2286 largepage_lvl + 1);
2287 } else {
2288 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2292 /* We don't need lock here, nobody else
2293 * touches the iova range
2295 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2296 if (tmp) {
2297 static int dumps = 5;
2298 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2299 iov_pfn, tmp, (unsigned long long)pteval);
2300 if (dumps) {
2301 dumps--;
2302 debug_dma_dump_mappings(NULL);
2304 WARN_ON(1);
2307 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2309 BUG_ON(nr_pages < lvl_pages);
2310 BUG_ON(sg_res < lvl_pages);
2312 nr_pages -= lvl_pages;
2313 iov_pfn += lvl_pages;
2314 phys_pfn += lvl_pages;
2315 pteval += lvl_pages * VTD_PAGE_SIZE;
2316 sg_res -= lvl_pages;
2318 /* If the next PTE would be the first in a new page, then we
2319 need to flush the cache on the entries we've just written.
2320 And then we'll need to recalculate 'pte', so clear it and
2321 let it get set again in the if (!pte) block above.
2323 If we're done (!nr_pages) we need to flush the cache too.
2325 Also if we've been setting superpages, we may need to
2326 recalculate 'pte' and switch back to smaller pages for the
2327 end of the mapping, if the trailing size is not enough to
2328 use another superpage (i.e. sg_res < lvl_pages). */
2329 pte++;
2330 if (!nr_pages || first_pte_in_page(pte) ||
2331 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2332 domain_flush_cache(domain, first_pte,
2333 (void *)pte - (void *)first_pte);
2334 pte = NULL;
2337 if (!sg_res && nr_pages)
2338 sg = sg_next(sg);
2340 return 0;
2343 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2344 struct scatterlist *sg, unsigned long phys_pfn,
2345 unsigned long nr_pages, int prot)
2347 int ret;
2348 struct intel_iommu *iommu;
2350 /* Do the real mapping first */
2351 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2352 if (ret)
2353 return ret;
2355 /* Notify about the new mapping */
2356 if (domain_type_is_vm(domain)) {
2357 /* VM typed domains can have more than one IOMMUs */
2358 int iommu_id;
2360 for_each_domain_iommu(iommu_id, domain) {
2361 iommu = g_iommus[iommu_id];
2362 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2364 } else {
2365 /* General domains only have one IOMMU */
2366 iommu = domain_get_iommu(domain);
2367 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2370 return 0;
2373 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2374 struct scatterlist *sg, unsigned long nr_pages,
2375 int prot)
2377 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2380 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2381 unsigned long phys_pfn, unsigned long nr_pages,
2382 int prot)
2384 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2387 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2389 unsigned long flags;
2390 struct context_entry *context;
2391 u16 did_old;
2393 if (!iommu)
2394 return;
2396 spin_lock_irqsave(&iommu->lock, flags);
2397 context = iommu_context_addr(iommu, bus, devfn, 0);
2398 if (!context) {
2399 spin_unlock_irqrestore(&iommu->lock, flags);
2400 return;
2402 did_old = context_domain_id(context);
2403 context_clear_entry(context);
2404 __iommu_flush_cache(iommu, context, sizeof(*context));
2405 spin_unlock_irqrestore(&iommu->lock, flags);
2406 iommu->flush.flush_context(iommu,
2407 did_old,
2408 (((u16)bus) << 8) | devfn,
2409 DMA_CCMD_MASK_NOBIT,
2410 DMA_CCMD_DEVICE_INVL);
2411 iommu->flush.flush_iotlb(iommu,
2412 did_old,
2415 DMA_TLB_DSI_FLUSH);
2418 static inline void unlink_domain_info(struct device_domain_info *info)
2420 assert_spin_locked(&device_domain_lock);
2421 list_del(&info->link);
2422 list_del(&info->global);
2423 if (info->dev)
2424 info->dev->archdata.iommu = NULL;
2427 static void domain_remove_dev_info(struct dmar_domain *domain)
2429 struct device_domain_info *info, *tmp;
2430 unsigned long flags;
2432 spin_lock_irqsave(&device_domain_lock, flags);
2433 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2434 __dmar_remove_one_dev_info(info);
2435 spin_unlock_irqrestore(&device_domain_lock, flags);
2439 * find_domain
2440 * Note: we use struct device->archdata.iommu stores the info
2442 static struct dmar_domain *find_domain(struct device *dev)
2444 struct device_domain_info *info;
2446 /* No lock here, assumes no domain exit in normal case */
2447 info = dev->archdata.iommu;
2448 if (likely(info))
2449 return info->domain;
2450 return NULL;
2453 static inline struct device_domain_info *
2454 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2456 struct device_domain_info *info;
2458 list_for_each_entry(info, &device_domain_list, global)
2459 if (info->iommu->segment == segment && info->bus == bus &&
2460 info->devfn == devfn)
2461 return info;
2463 return NULL;
2466 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2467 int bus, int devfn,
2468 struct device *dev,
2469 struct dmar_domain *domain)
2471 struct dmar_domain *found = NULL;
2472 struct device_domain_info *info;
2473 unsigned long flags;
2474 int ret;
2476 info = alloc_devinfo_mem();
2477 if (!info)
2478 return NULL;
2480 info->bus = bus;
2481 info->devfn = devfn;
2482 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2483 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2484 info->ats_qdep = 0;
2485 info->dev = dev;
2486 info->domain = domain;
2487 info->iommu = iommu;
2488 info->pasid_table = NULL;
2489 info->auxd_enabled = 0;
2490 INIT_LIST_HEAD(&info->auxiliary_domains);
2492 if (dev && dev_is_pci(dev)) {
2493 struct pci_dev *pdev = to_pci_dev(info->dev);
2495 if (!pdev->untrusted &&
2496 !pci_ats_disabled() &&
2497 ecap_dev_iotlb_support(iommu->ecap) &&
2498 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2499 dmar_find_matched_atsr_unit(pdev))
2500 info->ats_supported = 1;
2502 if (sm_supported(iommu)) {
2503 if (pasid_supported(iommu)) {
2504 int features = pci_pasid_features(pdev);
2505 if (features >= 0)
2506 info->pasid_supported = features | 1;
2509 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2510 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2511 info->pri_supported = 1;
2515 spin_lock_irqsave(&device_domain_lock, flags);
2516 if (dev)
2517 found = find_domain(dev);
2519 if (!found) {
2520 struct device_domain_info *info2;
2521 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2522 if (info2) {
2523 found = info2->domain;
2524 info2->dev = dev;
2528 if (found) {
2529 spin_unlock_irqrestore(&device_domain_lock, flags);
2530 free_devinfo_mem(info);
2531 /* Caller must free the original domain */
2532 return found;
2535 spin_lock(&iommu->lock);
2536 ret = domain_attach_iommu(domain, iommu);
2537 spin_unlock(&iommu->lock);
2539 if (ret) {
2540 spin_unlock_irqrestore(&device_domain_lock, flags);
2541 free_devinfo_mem(info);
2542 return NULL;
2545 list_add(&info->link, &domain->devices);
2546 list_add(&info->global, &device_domain_list);
2547 if (dev)
2548 dev->archdata.iommu = info;
2549 spin_unlock_irqrestore(&device_domain_lock, flags);
2551 /* PASID table is mandatory for a PCI device in scalable mode. */
2552 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2553 ret = intel_pasid_alloc_table(dev);
2554 if (ret) {
2555 dev_err(dev, "PASID table allocation failed\n");
2556 dmar_remove_one_dev_info(dev);
2557 return NULL;
2560 /* Setup the PASID entry for requests without PASID: */
2561 spin_lock(&iommu->lock);
2562 if (hw_pass_through && domain_type_is_si(domain))
2563 ret = intel_pasid_setup_pass_through(iommu, domain,
2564 dev, PASID_RID2PASID);
2565 else
2566 ret = intel_pasid_setup_second_level(iommu, domain,
2567 dev, PASID_RID2PASID);
2568 spin_unlock(&iommu->lock);
2569 if (ret) {
2570 dev_err(dev, "Setup RID2PASID failed\n");
2571 dmar_remove_one_dev_info(dev);
2572 return NULL;
2576 if (dev && domain_context_mapping(domain, dev)) {
2577 dev_err(dev, "Domain context map failed\n");
2578 dmar_remove_one_dev_info(dev);
2579 return NULL;
2582 return domain;
2585 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2587 *(u16 *)opaque = alias;
2588 return 0;
2591 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2593 struct device_domain_info *info;
2594 struct dmar_domain *domain = NULL;
2595 struct intel_iommu *iommu;
2596 u16 dma_alias;
2597 unsigned long flags;
2598 u8 bus, devfn;
2600 iommu = device_to_iommu(dev, &bus, &devfn);
2601 if (!iommu)
2602 return NULL;
2604 if (dev_is_pci(dev)) {
2605 struct pci_dev *pdev = to_pci_dev(dev);
2607 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2609 spin_lock_irqsave(&device_domain_lock, flags);
2610 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2611 PCI_BUS_NUM(dma_alias),
2612 dma_alias & 0xff);
2613 if (info) {
2614 iommu = info->iommu;
2615 domain = info->domain;
2617 spin_unlock_irqrestore(&device_domain_lock, flags);
2619 /* DMA alias already has a domain, use it */
2620 if (info)
2621 goto out;
2624 /* Allocate and initialize new domain for the device */
2625 domain = alloc_domain(0);
2626 if (!domain)
2627 return NULL;
2628 if (domain_init(domain, iommu, gaw)) {
2629 domain_exit(domain);
2630 return NULL;
2633 out:
2635 return domain;
2638 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2639 struct dmar_domain *domain)
2641 struct intel_iommu *iommu;
2642 struct dmar_domain *tmp;
2643 u16 req_id, dma_alias;
2644 u8 bus, devfn;
2646 iommu = device_to_iommu(dev, &bus, &devfn);
2647 if (!iommu)
2648 return NULL;
2650 req_id = ((u16)bus << 8) | devfn;
2652 if (dev_is_pci(dev)) {
2653 struct pci_dev *pdev = to_pci_dev(dev);
2655 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2657 /* register PCI DMA alias device */
2658 if (req_id != dma_alias) {
2659 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2660 dma_alias & 0xff, NULL, domain);
2662 if (!tmp || tmp != domain)
2663 return tmp;
2667 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2668 if (!tmp || tmp != domain)
2669 return tmp;
2671 return domain;
2674 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2676 struct dmar_domain *domain, *tmp;
2678 domain = find_domain(dev);
2679 if (domain)
2680 goto out;
2682 domain = find_or_alloc_domain(dev, gaw);
2683 if (!domain)
2684 goto out;
2686 tmp = set_domain_for_dev(dev, domain);
2687 if (!tmp || domain != tmp) {
2688 domain_exit(domain);
2689 domain = tmp;
2692 out:
2694 return domain;
2697 static int iommu_domain_identity_map(struct dmar_domain *domain,
2698 unsigned long long start,
2699 unsigned long long end)
2701 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2702 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2704 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2705 dma_to_mm_pfn(last_vpfn))) {
2706 pr_err("Reserving iova failed\n");
2707 return -ENOMEM;
2710 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2712 * RMRR range might have overlap with physical memory range,
2713 * clear it first
2715 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2717 return __domain_mapping(domain, first_vpfn, NULL,
2718 first_vpfn, last_vpfn - first_vpfn + 1,
2719 DMA_PTE_READ|DMA_PTE_WRITE);
2722 static int domain_prepare_identity_map(struct device *dev,
2723 struct dmar_domain *domain,
2724 unsigned long long start,
2725 unsigned long long end)
2727 /* For _hardware_ passthrough, don't bother. But for software
2728 passthrough, we do it anyway -- it may indicate a memory
2729 range which is reserved in E820, so which didn't get set
2730 up to start with in si_domain */
2731 if (domain == si_domain && hw_pass_through) {
2732 dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
2733 start, end);
2734 return 0;
2737 dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
2739 if (end < start) {
2740 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2741 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2742 dmi_get_system_info(DMI_BIOS_VENDOR),
2743 dmi_get_system_info(DMI_BIOS_VERSION),
2744 dmi_get_system_info(DMI_PRODUCT_VERSION));
2745 return -EIO;
2748 if (end >> agaw_to_width(domain->agaw)) {
2749 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2750 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2751 agaw_to_width(domain->agaw),
2752 dmi_get_system_info(DMI_BIOS_VENDOR),
2753 dmi_get_system_info(DMI_BIOS_VERSION),
2754 dmi_get_system_info(DMI_PRODUCT_VERSION));
2755 return -EIO;
2758 return iommu_domain_identity_map(domain, start, end);
2761 static int iommu_prepare_identity_map(struct device *dev,
2762 unsigned long long start,
2763 unsigned long long end)
2765 struct dmar_domain *domain;
2766 int ret;
2768 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2769 if (!domain)
2770 return -ENOMEM;
2772 ret = domain_prepare_identity_map(dev, domain, start, end);
2773 if (ret)
2774 domain_exit(domain);
2776 return ret;
2779 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2780 struct device *dev)
2782 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2783 return 0;
2784 return iommu_prepare_identity_map(dev, rmrr->base_address,
2785 rmrr->end_address);
2788 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2789 static inline void iommu_prepare_isa(void)
2791 struct pci_dev *pdev;
2792 int ret;
2794 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2795 if (!pdev)
2796 return;
2798 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2799 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2801 if (ret)
2802 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2804 pci_dev_put(pdev);
2806 #else
2807 static inline void iommu_prepare_isa(void)
2809 return;
2811 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2813 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2815 static int __init si_domain_init(int hw)
2817 int nid, ret;
2819 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2820 if (!si_domain)
2821 return -EFAULT;
2823 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2824 domain_exit(si_domain);
2825 return -EFAULT;
2828 pr_debug("Identity mapping domain allocated\n");
2830 if (hw)
2831 return 0;
2833 for_each_online_node(nid) {
2834 unsigned long start_pfn, end_pfn;
2835 int i;
2837 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2838 ret = iommu_domain_identity_map(si_domain,
2839 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2840 if (ret)
2841 return ret;
2845 return 0;
2848 static int identity_mapping(struct device *dev)
2850 struct device_domain_info *info;
2852 if (likely(!iommu_identity_mapping))
2853 return 0;
2855 info = dev->archdata.iommu;
2856 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2857 return (info->domain == si_domain);
2859 return 0;
2862 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2864 struct dmar_domain *ndomain;
2865 struct intel_iommu *iommu;
2866 u8 bus, devfn;
2868 iommu = device_to_iommu(dev, &bus, &devfn);
2869 if (!iommu)
2870 return -ENODEV;
2872 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2873 if (ndomain != domain)
2874 return -EBUSY;
2876 return 0;
2879 static bool device_has_rmrr(struct device *dev)
2881 struct dmar_rmrr_unit *rmrr;
2882 struct device *tmp;
2883 int i;
2885 rcu_read_lock();
2886 for_each_rmrr_units(rmrr) {
2888 * Return TRUE if this RMRR contains the device that
2889 * is passed in.
2891 for_each_active_dev_scope(rmrr->devices,
2892 rmrr->devices_cnt, i, tmp)
2893 if (tmp == dev) {
2894 rcu_read_unlock();
2895 return true;
2898 rcu_read_unlock();
2899 return false;
2903 * There are a couple cases where we need to restrict the functionality of
2904 * devices associated with RMRRs. The first is when evaluating a device for
2905 * identity mapping because problems exist when devices are moved in and out
2906 * of domains and their respective RMRR information is lost. This means that
2907 * a device with associated RMRRs will never be in a "passthrough" domain.
2908 * The second is use of the device through the IOMMU API. This interface
2909 * expects to have full control of the IOVA space for the device. We cannot
2910 * satisfy both the requirement that RMRR access is maintained and have an
2911 * unencumbered IOVA space. We also have no ability to quiesce the device's
2912 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2913 * We therefore prevent devices associated with an RMRR from participating in
2914 * the IOMMU API, which eliminates them from device assignment.
2916 * In both cases we assume that PCI USB devices with RMRRs have them largely
2917 * for historical reasons and that the RMRR space is not actively used post
2918 * boot. This exclusion may change if vendors begin to abuse it.
2920 * The same exception is made for graphics devices, with the requirement that
2921 * any use of the RMRR regions will be torn down before assigning the device
2922 * to a guest.
2924 static bool device_is_rmrr_locked(struct device *dev)
2926 if (!device_has_rmrr(dev))
2927 return false;
2929 if (dev_is_pci(dev)) {
2930 struct pci_dev *pdev = to_pci_dev(dev);
2932 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2933 return false;
2936 return true;
2939 static int iommu_should_identity_map(struct device *dev, int startup)
2941 if (dev_is_pci(dev)) {
2942 struct pci_dev *pdev = to_pci_dev(dev);
2944 if (device_is_rmrr_locked(dev))
2945 return 0;
2948 * Prevent any device marked as untrusted from getting
2949 * placed into the statically identity mapping domain.
2951 if (pdev->untrusted)
2952 return 0;
2954 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2955 return 1;
2957 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2958 return 1;
2960 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2961 return 0;
2964 * We want to start off with all devices in the 1:1 domain, and
2965 * take them out later if we find they can't access all of memory.
2967 * However, we can't do this for PCI devices behind bridges,
2968 * because all PCI devices behind the same bridge will end up
2969 * with the same source-id on their transactions.
2971 * Practically speaking, we can't change things around for these
2972 * devices at run-time, because we can't be sure there'll be no
2973 * DMA transactions in flight for any of their siblings.
2975 * So PCI devices (unless they're on the root bus) as well as
2976 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2977 * the 1:1 domain, just in _case_ one of their siblings turns out
2978 * not to be able to map all of memory.
2980 if (!pci_is_pcie(pdev)) {
2981 if (!pci_is_root_bus(pdev->bus))
2982 return 0;
2983 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2984 return 0;
2985 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2986 return 0;
2987 } else {
2988 if (device_has_rmrr(dev))
2989 return 0;
2993 * At boot time, we don't yet know if devices will be 64-bit capable.
2994 * Assume that they will — if they turn out not to be, then we can
2995 * take them out of the 1:1 domain later.
2997 if (!startup) {
2999 * If the device's dma_mask is less than the system's memory
3000 * size then this is not a candidate for identity mapping.
3002 u64 dma_mask = *dev->dma_mask;
3004 if (dev->coherent_dma_mask &&
3005 dev->coherent_dma_mask < dma_mask)
3006 dma_mask = dev->coherent_dma_mask;
3008 return dma_mask >= dma_get_required_mask(dev);
3011 return 1;
3014 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
3016 int ret;
3018 if (!iommu_should_identity_map(dev, 1))
3019 return 0;
3021 ret = domain_add_dev_info(si_domain, dev);
3022 if (!ret)
3023 dev_info(dev, "%s identity mapping\n",
3024 hw ? "Hardware" : "Software");
3025 else if (ret == -ENODEV)
3026 /* device not associated with an iommu */
3027 ret = 0;
3029 return ret;
3033 static int __init iommu_prepare_static_identity_mapping(int hw)
3035 struct pci_dev *pdev = NULL;
3036 struct dmar_drhd_unit *drhd;
3037 /* To avoid a -Wunused-but-set-variable warning. */
3038 struct intel_iommu *iommu __maybe_unused;
3039 struct device *dev;
3040 int i;
3041 int ret = 0;
3043 for_each_pci_dev(pdev) {
3044 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
3045 if (ret)
3046 return ret;
3049 for_each_active_iommu(iommu, drhd)
3050 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
3051 struct acpi_device_physical_node *pn;
3052 struct acpi_device *adev;
3054 if (dev->bus != &acpi_bus_type)
3055 continue;
3057 adev= to_acpi_device(dev);
3058 mutex_lock(&adev->physical_node_lock);
3059 list_for_each_entry(pn, &adev->physical_node_list, node) {
3060 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
3061 if (ret)
3062 break;
3064 mutex_unlock(&adev->physical_node_lock);
3065 if (ret)
3066 return ret;
3069 return 0;
3072 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3075 * Start from the sane iommu hardware state.
3076 * If the queued invalidation is already initialized by us
3077 * (for example, while enabling interrupt-remapping) then
3078 * we got the things already rolling from a sane state.
3080 if (!iommu->qi) {
3082 * Clear any previous faults.
3084 dmar_fault(-1, iommu);
3086 * Disable queued invalidation if supported and already enabled
3087 * before OS handover.
3089 dmar_disable_qi(iommu);
3092 if (dmar_enable_qi(iommu)) {
3094 * Queued Invalidate not enabled, use Register Based Invalidate
3096 iommu->flush.flush_context = __iommu_flush_context;
3097 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3098 pr_info("%s: Using Register based invalidation\n",
3099 iommu->name);
3100 } else {
3101 iommu->flush.flush_context = qi_flush_context;
3102 iommu->flush.flush_iotlb = qi_flush_iotlb;
3103 pr_info("%s: Using Queued invalidation\n", iommu->name);
3107 static int copy_context_table(struct intel_iommu *iommu,
3108 struct root_entry *old_re,
3109 struct context_entry **tbl,
3110 int bus, bool ext)
3112 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3113 struct context_entry *new_ce = NULL, ce;
3114 struct context_entry *old_ce = NULL;
3115 struct root_entry re;
3116 phys_addr_t old_ce_phys;
3118 tbl_idx = ext ? bus * 2 : bus;
3119 memcpy(&re, old_re, sizeof(re));
3121 for (devfn = 0; devfn < 256; devfn++) {
3122 /* First calculate the correct index */
3123 idx = (ext ? devfn * 2 : devfn) % 256;
3125 if (idx == 0) {
3126 /* First save what we may have and clean up */
3127 if (new_ce) {
3128 tbl[tbl_idx] = new_ce;
3129 __iommu_flush_cache(iommu, new_ce,
3130 VTD_PAGE_SIZE);
3131 pos = 1;
3134 if (old_ce)
3135 memunmap(old_ce);
3137 ret = 0;
3138 if (devfn < 0x80)
3139 old_ce_phys = root_entry_lctp(&re);
3140 else
3141 old_ce_phys = root_entry_uctp(&re);
3143 if (!old_ce_phys) {
3144 if (ext && devfn == 0) {
3145 /* No LCTP, try UCTP */
3146 devfn = 0x7f;
3147 continue;
3148 } else {
3149 goto out;
3153 ret = -ENOMEM;
3154 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3155 MEMREMAP_WB);
3156 if (!old_ce)
3157 goto out;
3159 new_ce = alloc_pgtable_page(iommu->node);
3160 if (!new_ce)
3161 goto out_unmap;
3163 ret = 0;
3166 /* Now copy the context entry */
3167 memcpy(&ce, old_ce + idx, sizeof(ce));
3169 if (!__context_present(&ce))
3170 continue;
3172 did = context_domain_id(&ce);
3173 if (did >= 0 && did < cap_ndoms(iommu->cap))
3174 set_bit(did, iommu->domain_ids);
3177 * We need a marker for copied context entries. This
3178 * marker needs to work for the old format as well as
3179 * for extended context entries.
3181 * Bit 67 of the context entry is used. In the old
3182 * format this bit is available to software, in the
3183 * extended format it is the PGE bit, but PGE is ignored
3184 * by HW if PASIDs are disabled (and thus still
3185 * available).
3187 * So disable PASIDs first and then mark the entry
3188 * copied. This means that we don't copy PASID
3189 * translations from the old kernel, but this is fine as
3190 * faults there are not fatal.
3192 context_clear_pasid_enable(&ce);
3193 context_set_copied(&ce);
3195 new_ce[idx] = ce;
3198 tbl[tbl_idx + pos] = new_ce;
3200 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3202 out_unmap:
3203 memunmap(old_ce);
3205 out:
3206 return ret;
3209 static int copy_translation_tables(struct intel_iommu *iommu)
3211 struct context_entry **ctxt_tbls;
3212 struct root_entry *old_rt;
3213 phys_addr_t old_rt_phys;
3214 int ctxt_table_entries;
3215 unsigned long flags;
3216 u64 rtaddr_reg;
3217 int bus, ret;
3218 bool new_ext, ext;
3220 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3221 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3222 new_ext = !!ecap_ecs(iommu->ecap);
3225 * The RTT bit can only be changed when translation is disabled,
3226 * but disabling translation means to open a window for data
3227 * corruption. So bail out and don't copy anything if we would
3228 * have to change the bit.
3230 if (new_ext != ext)
3231 return -EINVAL;
3233 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3234 if (!old_rt_phys)
3235 return -EINVAL;
3237 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3238 if (!old_rt)
3239 return -ENOMEM;
3241 /* This is too big for the stack - allocate it from slab */
3242 ctxt_table_entries = ext ? 512 : 256;
3243 ret = -ENOMEM;
3244 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3245 if (!ctxt_tbls)
3246 goto out_unmap;
3248 for (bus = 0; bus < 256; bus++) {
3249 ret = copy_context_table(iommu, &old_rt[bus],
3250 ctxt_tbls, bus, ext);
3251 if (ret) {
3252 pr_err("%s: Failed to copy context table for bus %d\n",
3253 iommu->name, bus);
3254 continue;
3258 spin_lock_irqsave(&iommu->lock, flags);
3260 /* Context tables are copied, now write them to the root_entry table */
3261 for (bus = 0; bus < 256; bus++) {
3262 int idx = ext ? bus * 2 : bus;
3263 u64 val;
3265 if (ctxt_tbls[idx]) {
3266 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3267 iommu->root_entry[bus].lo = val;
3270 if (!ext || !ctxt_tbls[idx + 1])
3271 continue;
3273 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3274 iommu->root_entry[bus].hi = val;
3277 spin_unlock_irqrestore(&iommu->lock, flags);
3279 kfree(ctxt_tbls);
3281 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3283 ret = 0;
3285 out_unmap:
3286 memunmap(old_rt);
3288 return ret;
3291 static int __init init_dmars(void)
3293 struct dmar_drhd_unit *drhd;
3294 struct dmar_rmrr_unit *rmrr;
3295 bool copied_tables = false;
3296 struct device *dev;
3297 struct intel_iommu *iommu;
3298 int i, ret;
3301 * for each drhd
3302 * allocate root
3303 * initialize and program root entry to not present
3304 * endfor
3306 for_each_drhd_unit(drhd) {
3308 * lock not needed as this is only incremented in the single
3309 * threaded kernel __init code path all other access are read
3310 * only
3312 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3313 g_num_of_iommus++;
3314 continue;
3316 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3319 /* Preallocate enough resources for IOMMU hot-addition */
3320 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3321 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3323 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3324 GFP_KERNEL);
3325 if (!g_iommus) {
3326 pr_err("Allocating global iommu array failed\n");
3327 ret = -ENOMEM;
3328 goto error;
3331 for_each_active_iommu(iommu, drhd) {
3333 * Find the max pasid size of all IOMMU's in the system.
3334 * We need to ensure the system pasid table is no bigger
3335 * than the smallest supported.
3337 if (pasid_supported(iommu)) {
3338 u32 temp = 2 << ecap_pss(iommu->ecap);
3340 intel_pasid_max_id = min_t(u32, temp,
3341 intel_pasid_max_id);
3344 g_iommus[iommu->seq_id] = iommu;
3346 intel_iommu_init_qi(iommu);
3348 ret = iommu_init_domains(iommu);
3349 if (ret)
3350 goto free_iommu;
3352 init_translation_status(iommu);
3354 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3355 iommu_disable_translation(iommu);
3356 clear_translation_pre_enabled(iommu);
3357 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3358 iommu->name);
3362 * TBD:
3363 * we could share the same root & context tables
3364 * among all IOMMU's. Need to Split it later.
3366 ret = iommu_alloc_root_entry(iommu);
3367 if (ret)
3368 goto free_iommu;
3370 if (translation_pre_enabled(iommu)) {
3371 pr_info("Translation already enabled - trying to copy translation structures\n");
3373 ret = copy_translation_tables(iommu);
3374 if (ret) {
3376 * We found the IOMMU with translation
3377 * enabled - but failed to copy over the
3378 * old root-entry table. Try to proceed
3379 * by disabling translation now and
3380 * allocating a clean root-entry table.
3381 * This might cause DMAR faults, but
3382 * probably the dump will still succeed.
3384 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3385 iommu->name);
3386 iommu_disable_translation(iommu);
3387 clear_translation_pre_enabled(iommu);
3388 } else {
3389 pr_info("Copied translation tables from previous kernel for %s\n",
3390 iommu->name);
3391 copied_tables = true;
3395 if (!ecap_pass_through(iommu->ecap))
3396 hw_pass_through = 0;
3397 #ifdef CONFIG_INTEL_IOMMU_SVM
3398 if (pasid_supported(iommu))
3399 intel_svm_init(iommu);
3400 #endif
3404 * Now that qi is enabled on all iommus, set the root entry and flush
3405 * caches. This is required on some Intel X58 chipsets, otherwise the
3406 * flush_context function will loop forever and the boot hangs.
3408 for_each_active_iommu(iommu, drhd) {
3409 iommu_flush_write_buffer(iommu);
3410 iommu_set_root_entry(iommu);
3411 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3412 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3415 if (iommu_pass_through)
3416 iommu_identity_mapping |= IDENTMAP_ALL;
3418 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3419 dmar_map_gfx = 0;
3420 #endif
3422 if (!dmar_map_gfx)
3423 iommu_identity_mapping |= IDENTMAP_GFX;
3425 check_tylersburg_isoch();
3427 if (iommu_identity_mapping) {
3428 ret = si_domain_init(hw_pass_through);
3429 if (ret)
3430 goto free_iommu;
3435 * If we copied translations from a previous kernel in the kdump
3436 * case, we can not assign the devices to domains now, as that
3437 * would eliminate the old mappings. So skip this part and defer
3438 * the assignment to device driver initialization time.
3440 if (copied_tables)
3441 goto domains_done;
3444 * If pass through is not set or not enabled, setup context entries for
3445 * identity mappings for rmrr, gfx, and isa and may fall back to static
3446 * identity mapping if iommu_identity_mapping is set.
3448 if (iommu_identity_mapping) {
3449 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3450 if (ret) {
3451 pr_crit("Failed to setup IOMMU pass-through\n");
3452 goto free_iommu;
3456 * For each rmrr
3457 * for each dev attached to rmrr
3458 * do
3459 * locate drhd for dev, alloc domain for dev
3460 * allocate free domain
3461 * allocate page table entries for rmrr
3462 * if context not allocated for bus
3463 * allocate and init context
3464 * set present in root table for this bus
3465 * init context with domain, translation etc
3466 * endfor
3467 * endfor
3469 pr_info("Setting RMRR:\n");
3470 for_each_rmrr_units(rmrr) {
3471 /* some BIOS lists non-exist devices in DMAR table. */
3472 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3473 i, dev) {
3474 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3475 if (ret)
3476 pr_err("Mapping reserved region failed\n");
3480 iommu_prepare_isa();
3482 domains_done:
3485 * for each drhd
3486 * enable fault log
3487 * global invalidate context cache
3488 * global invalidate iotlb
3489 * enable translation
3491 for_each_iommu(iommu, drhd) {
3492 if (drhd->ignored) {
3494 * we always have to disable PMRs or DMA may fail on
3495 * this device
3497 if (force_on)
3498 iommu_disable_protect_mem_regions(iommu);
3499 continue;
3502 iommu_flush_write_buffer(iommu);
3504 #ifdef CONFIG_INTEL_IOMMU_SVM
3505 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3507 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3508 * could cause possible lock race condition.
3510 up_write(&dmar_global_lock);
3511 ret = intel_svm_enable_prq(iommu);
3512 down_write(&dmar_global_lock);
3513 if (ret)
3514 goto free_iommu;
3516 #endif
3517 ret = dmar_set_interrupt(iommu);
3518 if (ret)
3519 goto free_iommu;
3521 if (!translation_pre_enabled(iommu))
3522 iommu_enable_translation(iommu);
3524 iommu_disable_protect_mem_regions(iommu);
3527 return 0;
3529 free_iommu:
3530 for_each_active_iommu(iommu, drhd) {
3531 disable_dmar_iommu(iommu);
3532 free_dmar_iommu(iommu);
3535 kfree(g_iommus);
3537 error:
3538 return ret;
3541 /* This takes a number of _MM_ pages, not VTD pages */
3542 static unsigned long intel_alloc_iova(struct device *dev,
3543 struct dmar_domain *domain,
3544 unsigned long nrpages, uint64_t dma_mask)
3546 unsigned long iova_pfn;
3548 /* Restrict dma_mask to the width that the iommu can handle */
3549 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3550 /* Ensure we reserve the whole size-aligned region */
3551 nrpages = __roundup_pow_of_two(nrpages);
3553 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3555 * First try to allocate an io virtual address in
3556 * DMA_BIT_MASK(32) and if that fails then try allocating
3557 * from higher range
3559 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3560 IOVA_PFN(DMA_BIT_MASK(32)), false);
3561 if (iova_pfn)
3562 return iova_pfn;
3564 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3565 IOVA_PFN(dma_mask), true);
3566 if (unlikely(!iova_pfn)) {
3567 dev_err(dev, "Allocating %ld-page iova failed", nrpages);
3568 return 0;
3571 return iova_pfn;
3574 struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3576 struct dmar_domain *domain, *tmp;
3577 struct dmar_rmrr_unit *rmrr;
3578 struct device *i_dev;
3579 int i, ret;
3581 domain = find_domain(dev);
3582 if (domain)
3583 goto out;
3585 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3586 if (!domain)
3587 goto out;
3589 /* We have a new domain - setup possible RMRRs for the device */
3590 rcu_read_lock();
3591 for_each_rmrr_units(rmrr) {
3592 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3593 i, i_dev) {
3594 if (i_dev != dev)
3595 continue;
3597 ret = domain_prepare_identity_map(dev, domain,
3598 rmrr->base_address,
3599 rmrr->end_address);
3600 if (ret)
3601 dev_err(dev, "Mapping reserved region failed\n");
3604 rcu_read_unlock();
3606 tmp = set_domain_for_dev(dev, domain);
3607 if (!tmp || domain != tmp) {
3608 domain_exit(domain);
3609 domain = tmp;
3612 out:
3614 if (!domain)
3615 dev_err(dev, "Allocating domain failed\n");
3618 return domain;
3621 /* Check if the dev needs to go through non-identity map and unmap process.*/
3622 static bool iommu_need_mapping(struct device *dev)
3624 int found;
3626 if (iommu_dummy(dev))
3627 return false;
3629 if (!iommu_identity_mapping)
3630 return true;
3632 found = identity_mapping(dev);
3633 if (found) {
3634 if (iommu_should_identity_map(dev, 0))
3635 return false;
3638 * 32 bit DMA is removed from si_domain and fall back to
3639 * non-identity mapping.
3641 dmar_remove_one_dev_info(dev);
3642 dev_info(dev, "32bit DMA uses non-identity mapping\n");
3643 } else {
3645 * In case of a detached 64 bit DMA device from vm, the device
3646 * is put into si_domain for identity mapping.
3648 if (iommu_should_identity_map(dev, 0) &&
3649 !domain_add_dev_info(si_domain, dev)) {
3650 dev_info(dev, "64bit DMA uses identity mapping\n");
3651 return false;
3655 return true;
3658 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3659 size_t size, int dir, u64 dma_mask)
3661 struct dmar_domain *domain;
3662 phys_addr_t start_paddr;
3663 unsigned long iova_pfn;
3664 int prot = 0;
3665 int ret;
3666 struct intel_iommu *iommu;
3667 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3669 BUG_ON(dir == DMA_NONE);
3671 domain = get_valid_domain_for_dev(dev);
3672 if (!domain)
3673 return DMA_MAPPING_ERROR;
3675 iommu = domain_get_iommu(domain);
3676 size = aligned_nrpages(paddr, size);
3678 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3679 if (!iova_pfn)
3680 goto error;
3683 * Check if DMAR supports zero-length reads on write only
3684 * mappings..
3686 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3687 !cap_zlr(iommu->cap))
3688 prot |= DMA_PTE_READ;
3689 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3690 prot |= DMA_PTE_WRITE;
3692 * paddr - (paddr + size) might be partial page, we should map the whole
3693 * page. Note: if two part of one page are separately mapped, we
3694 * might have two guest_addr mapping to the same host paddr, but this
3695 * is not a big problem
3697 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3698 mm_to_dma_pfn(paddr_pfn), size, prot);
3699 if (ret)
3700 goto error;
3702 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3703 start_paddr += paddr & ~PAGE_MASK;
3704 return start_paddr;
3706 error:
3707 if (iova_pfn)
3708 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3709 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3710 size, (unsigned long long)paddr, dir);
3711 return DMA_MAPPING_ERROR;
3714 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3715 unsigned long offset, size_t size,
3716 enum dma_data_direction dir,
3717 unsigned long attrs)
3719 if (iommu_need_mapping(dev))
3720 return __intel_map_single(dev, page_to_phys(page) + offset,
3721 size, dir, *dev->dma_mask);
3722 return dma_direct_map_page(dev, page, offset, size, dir, attrs);
3725 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3726 size_t size, enum dma_data_direction dir,
3727 unsigned long attrs)
3729 if (iommu_need_mapping(dev))
3730 return __intel_map_single(dev, phys_addr, size, dir,
3731 *dev->dma_mask);
3732 return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3735 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3737 struct dmar_domain *domain;
3738 unsigned long start_pfn, last_pfn;
3739 unsigned long nrpages;
3740 unsigned long iova_pfn;
3741 struct intel_iommu *iommu;
3742 struct page *freelist;
3743 struct pci_dev *pdev = NULL;
3745 domain = find_domain(dev);
3746 BUG_ON(!domain);
3748 iommu = domain_get_iommu(domain);
3750 iova_pfn = IOVA_PFN(dev_addr);
3752 nrpages = aligned_nrpages(dev_addr, size);
3753 start_pfn = mm_to_dma_pfn(iova_pfn);
3754 last_pfn = start_pfn + nrpages - 1;
3756 if (dev_is_pci(dev))
3757 pdev = to_pci_dev(dev);
3759 dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3761 freelist = domain_unmap(domain, start_pfn, last_pfn);
3763 if (intel_iommu_strict || (pdev && pdev->untrusted)) {
3764 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3765 nrpages, !freelist, 0);
3766 /* free iova */
3767 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3768 dma_free_pagelist(freelist);
3769 } else {
3770 queue_iova(&domain->iovad, iova_pfn, nrpages,
3771 (unsigned long)freelist);
3773 * queue up the release of the unmap to save the 1/6th of the
3774 * cpu used up by the iotlb flush operation...
3779 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3780 size_t size, enum dma_data_direction dir,
3781 unsigned long attrs)
3783 if (iommu_need_mapping(dev))
3784 intel_unmap(dev, dev_addr, size);
3785 else
3786 dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
3789 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3790 size_t size, enum dma_data_direction dir, unsigned long attrs)
3792 if (iommu_need_mapping(dev))
3793 intel_unmap(dev, dev_addr, size);
3796 static void *intel_alloc_coherent(struct device *dev, size_t size,
3797 dma_addr_t *dma_handle, gfp_t flags,
3798 unsigned long attrs)
3800 struct page *page = NULL;
3801 int order;
3803 if (!iommu_need_mapping(dev))
3804 return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3806 size = PAGE_ALIGN(size);
3807 order = get_order(size);
3809 if (gfpflags_allow_blocking(flags)) {
3810 unsigned int count = size >> PAGE_SHIFT;
3812 page = dma_alloc_from_contiguous(dev, count, order,
3813 flags & __GFP_NOWARN);
3816 if (!page)
3817 page = alloc_pages(flags, order);
3818 if (!page)
3819 return NULL;
3820 memset(page_address(page), 0, size);
3822 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3823 DMA_BIDIRECTIONAL,
3824 dev->coherent_dma_mask);
3825 if (*dma_handle != DMA_MAPPING_ERROR)
3826 return page_address(page);
3827 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3828 __free_pages(page, order);
3830 return NULL;
3833 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3834 dma_addr_t dma_handle, unsigned long attrs)
3836 int order;
3837 struct page *page = virt_to_page(vaddr);
3839 if (!iommu_need_mapping(dev))
3840 return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3842 size = PAGE_ALIGN(size);
3843 order = get_order(size);
3845 intel_unmap(dev, dma_handle, size);
3846 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3847 __free_pages(page, order);
3850 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3851 int nelems, enum dma_data_direction dir,
3852 unsigned long attrs)
3854 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3855 unsigned long nrpages = 0;
3856 struct scatterlist *sg;
3857 int i;
3859 if (!iommu_need_mapping(dev))
3860 return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
3862 for_each_sg(sglist, sg, nelems, i) {
3863 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3866 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3869 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3870 enum dma_data_direction dir, unsigned long attrs)
3872 int i;
3873 struct dmar_domain *domain;
3874 size_t size = 0;
3875 int prot = 0;
3876 unsigned long iova_pfn;
3877 int ret;
3878 struct scatterlist *sg;
3879 unsigned long start_vpfn;
3880 struct intel_iommu *iommu;
3882 BUG_ON(dir == DMA_NONE);
3883 if (!iommu_need_mapping(dev))
3884 return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
3886 domain = get_valid_domain_for_dev(dev);
3887 if (!domain)
3888 return 0;
3890 iommu = domain_get_iommu(domain);
3892 for_each_sg(sglist, sg, nelems, i)
3893 size += aligned_nrpages(sg->offset, sg->length);
3895 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3896 *dev->dma_mask);
3897 if (!iova_pfn) {
3898 sglist->dma_length = 0;
3899 return 0;
3903 * Check if DMAR supports zero-length reads on write only
3904 * mappings..
3906 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3907 !cap_zlr(iommu->cap))
3908 prot |= DMA_PTE_READ;
3909 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3910 prot |= DMA_PTE_WRITE;
3912 start_vpfn = mm_to_dma_pfn(iova_pfn);
3914 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3915 if (unlikely(ret)) {
3916 dma_pte_free_pagetable(domain, start_vpfn,
3917 start_vpfn + size - 1,
3918 agaw_to_level(domain->agaw) + 1);
3919 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3920 return 0;
3923 return nelems;
3926 static const struct dma_map_ops intel_dma_ops = {
3927 .alloc = intel_alloc_coherent,
3928 .free = intel_free_coherent,
3929 .map_sg = intel_map_sg,
3930 .unmap_sg = intel_unmap_sg,
3931 .map_page = intel_map_page,
3932 .unmap_page = intel_unmap_page,
3933 .map_resource = intel_map_resource,
3934 .unmap_resource = intel_unmap_resource,
3935 .dma_supported = dma_direct_supported,
3938 static inline int iommu_domain_cache_init(void)
3940 int ret = 0;
3942 iommu_domain_cache = kmem_cache_create("iommu_domain",
3943 sizeof(struct dmar_domain),
3945 SLAB_HWCACHE_ALIGN,
3947 NULL);
3948 if (!iommu_domain_cache) {
3949 pr_err("Couldn't create iommu_domain cache\n");
3950 ret = -ENOMEM;
3953 return ret;
3956 static inline int iommu_devinfo_cache_init(void)
3958 int ret = 0;
3960 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3961 sizeof(struct device_domain_info),
3963 SLAB_HWCACHE_ALIGN,
3964 NULL);
3965 if (!iommu_devinfo_cache) {
3966 pr_err("Couldn't create devinfo cache\n");
3967 ret = -ENOMEM;
3970 return ret;
3973 static int __init iommu_init_mempool(void)
3975 int ret;
3976 ret = iova_cache_get();
3977 if (ret)
3978 return ret;
3980 ret = iommu_domain_cache_init();
3981 if (ret)
3982 goto domain_error;
3984 ret = iommu_devinfo_cache_init();
3985 if (!ret)
3986 return ret;
3988 kmem_cache_destroy(iommu_domain_cache);
3989 domain_error:
3990 iova_cache_put();
3992 return -ENOMEM;
3995 static void __init iommu_exit_mempool(void)
3997 kmem_cache_destroy(iommu_devinfo_cache);
3998 kmem_cache_destroy(iommu_domain_cache);
3999 iova_cache_put();
4002 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4004 struct dmar_drhd_unit *drhd;
4005 u32 vtbar;
4006 int rc;
4008 /* We know that this device on this chipset has its own IOMMU.
4009 * If we find it under a different IOMMU, then the BIOS is lying
4010 * to us. Hope that the IOMMU for this device is actually
4011 * disabled, and it needs no translation...
4013 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4014 if (rc) {
4015 /* "can't" happen */
4016 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4017 return;
4019 vtbar &= 0xffff0000;
4021 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4022 drhd = dmar_find_matched_drhd_unit(pdev);
4023 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4024 TAINT_FIRMWARE_WORKAROUND,
4025 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4026 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4028 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4030 static void __init init_no_remapping_devices(void)
4032 struct dmar_drhd_unit *drhd;
4033 struct device *dev;
4034 int i;
4036 for_each_drhd_unit(drhd) {
4037 if (!drhd->include_all) {
4038 for_each_active_dev_scope(drhd->devices,
4039 drhd->devices_cnt, i, dev)
4040 break;
4041 /* ignore DMAR unit if no devices exist */
4042 if (i == drhd->devices_cnt)
4043 drhd->ignored = 1;
4047 for_each_active_drhd_unit(drhd) {
4048 if (drhd->include_all)
4049 continue;
4051 for_each_active_dev_scope(drhd->devices,
4052 drhd->devices_cnt, i, dev)
4053 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4054 break;
4055 if (i < drhd->devices_cnt)
4056 continue;
4058 /* This IOMMU has *only* gfx devices. Either bypass it or
4059 set the gfx_mapped flag, as appropriate */
4060 if (!dmar_map_gfx) {
4061 drhd->ignored = 1;
4062 for_each_active_dev_scope(drhd->devices,
4063 drhd->devices_cnt, i, dev)
4064 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4069 #ifdef CONFIG_SUSPEND
4070 static int init_iommu_hw(void)
4072 struct dmar_drhd_unit *drhd;
4073 struct intel_iommu *iommu = NULL;
4075 for_each_active_iommu(iommu, drhd)
4076 if (iommu->qi)
4077 dmar_reenable_qi(iommu);
4079 for_each_iommu(iommu, drhd) {
4080 if (drhd->ignored) {
4082 * we always have to disable PMRs or DMA may fail on
4083 * this device
4085 if (force_on)
4086 iommu_disable_protect_mem_regions(iommu);
4087 continue;
4090 iommu_flush_write_buffer(iommu);
4092 iommu_set_root_entry(iommu);
4094 iommu->flush.flush_context(iommu, 0, 0, 0,
4095 DMA_CCMD_GLOBAL_INVL);
4096 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4097 iommu_enable_translation(iommu);
4098 iommu_disable_protect_mem_regions(iommu);
4101 return 0;
4104 static void iommu_flush_all(void)
4106 struct dmar_drhd_unit *drhd;
4107 struct intel_iommu *iommu;
4109 for_each_active_iommu(iommu, drhd) {
4110 iommu->flush.flush_context(iommu, 0, 0, 0,
4111 DMA_CCMD_GLOBAL_INVL);
4112 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4113 DMA_TLB_GLOBAL_FLUSH);
4117 static int iommu_suspend(void)
4119 struct dmar_drhd_unit *drhd;
4120 struct intel_iommu *iommu = NULL;
4121 unsigned long flag;
4123 for_each_active_iommu(iommu, drhd) {
4124 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
4125 GFP_ATOMIC);
4126 if (!iommu->iommu_state)
4127 goto nomem;
4130 iommu_flush_all();
4132 for_each_active_iommu(iommu, drhd) {
4133 iommu_disable_translation(iommu);
4135 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4137 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4138 readl(iommu->reg + DMAR_FECTL_REG);
4139 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4140 readl(iommu->reg + DMAR_FEDATA_REG);
4141 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4142 readl(iommu->reg + DMAR_FEADDR_REG);
4143 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4144 readl(iommu->reg + DMAR_FEUADDR_REG);
4146 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4148 return 0;
4150 nomem:
4151 for_each_active_iommu(iommu, drhd)
4152 kfree(iommu->iommu_state);
4154 return -ENOMEM;
4157 static void iommu_resume(void)
4159 struct dmar_drhd_unit *drhd;
4160 struct intel_iommu *iommu = NULL;
4161 unsigned long flag;
4163 if (init_iommu_hw()) {
4164 if (force_on)
4165 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4166 else
4167 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4168 return;
4171 for_each_active_iommu(iommu, drhd) {
4173 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4175 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4176 iommu->reg + DMAR_FECTL_REG);
4177 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4178 iommu->reg + DMAR_FEDATA_REG);
4179 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4180 iommu->reg + DMAR_FEADDR_REG);
4181 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4182 iommu->reg + DMAR_FEUADDR_REG);
4184 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4187 for_each_active_iommu(iommu, drhd)
4188 kfree(iommu->iommu_state);
4191 static struct syscore_ops iommu_syscore_ops = {
4192 .resume = iommu_resume,
4193 .suspend = iommu_suspend,
4196 static void __init init_iommu_pm_ops(void)
4198 register_syscore_ops(&iommu_syscore_ops);
4201 #else
4202 static inline void init_iommu_pm_ops(void) {}
4203 #endif /* CONFIG_PM */
4206 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4208 struct acpi_dmar_reserved_memory *rmrr;
4209 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4210 struct dmar_rmrr_unit *rmrru;
4211 size_t length;
4213 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4214 if (!rmrru)
4215 goto out;
4217 rmrru->hdr = header;
4218 rmrr = (struct acpi_dmar_reserved_memory *)header;
4219 rmrru->base_address = rmrr->base_address;
4220 rmrru->end_address = rmrr->end_address;
4222 length = rmrr->end_address - rmrr->base_address + 1;
4223 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4224 IOMMU_RESV_DIRECT);
4225 if (!rmrru->resv)
4226 goto free_rmrru;
4228 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4229 ((void *)rmrr) + rmrr->header.length,
4230 &rmrru->devices_cnt);
4231 if (rmrru->devices_cnt && rmrru->devices == NULL)
4232 goto free_all;
4234 list_add(&rmrru->list, &dmar_rmrr_units);
4236 return 0;
4237 free_all:
4238 kfree(rmrru->resv);
4239 free_rmrru:
4240 kfree(rmrru);
4241 out:
4242 return -ENOMEM;
4245 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4247 struct dmar_atsr_unit *atsru;
4248 struct acpi_dmar_atsr *tmp;
4250 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4251 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4252 if (atsr->segment != tmp->segment)
4253 continue;
4254 if (atsr->header.length != tmp->header.length)
4255 continue;
4256 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4257 return atsru;
4260 return NULL;
4263 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4265 struct acpi_dmar_atsr *atsr;
4266 struct dmar_atsr_unit *atsru;
4268 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4269 return 0;
4271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4272 atsru = dmar_find_atsr(atsr);
4273 if (atsru)
4274 return 0;
4276 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4277 if (!atsru)
4278 return -ENOMEM;
4281 * If memory is allocated from slab by ACPI _DSM method, we need to
4282 * copy the memory content because the memory buffer will be freed
4283 * on return.
4285 atsru->hdr = (void *)(atsru + 1);
4286 memcpy(atsru->hdr, hdr, hdr->length);
4287 atsru->include_all = atsr->flags & 0x1;
4288 if (!atsru->include_all) {
4289 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4290 (void *)atsr + atsr->header.length,
4291 &atsru->devices_cnt);
4292 if (atsru->devices_cnt && atsru->devices == NULL) {
4293 kfree(atsru);
4294 return -ENOMEM;
4298 list_add_rcu(&atsru->list, &dmar_atsr_units);
4300 return 0;
4303 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4305 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4306 kfree(atsru);
4309 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4311 struct acpi_dmar_atsr *atsr;
4312 struct dmar_atsr_unit *atsru;
4314 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4315 atsru = dmar_find_atsr(atsr);
4316 if (atsru) {
4317 list_del_rcu(&atsru->list);
4318 synchronize_rcu();
4319 intel_iommu_free_atsr(atsru);
4322 return 0;
4325 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4327 int i;
4328 struct device *dev;
4329 struct acpi_dmar_atsr *atsr;
4330 struct dmar_atsr_unit *atsru;
4332 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4333 atsru = dmar_find_atsr(atsr);
4334 if (!atsru)
4335 return 0;
4337 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4338 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4339 i, dev)
4340 return -EBUSY;
4343 return 0;
4346 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4348 int sp, ret;
4349 struct intel_iommu *iommu = dmaru->iommu;
4351 if (g_iommus[iommu->seq_id])
4352 return 0;
4354 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4355 pr_warn("%s: Doesn't support hardware pass through.\n",
4356 iommu->name);
4357 return -ENXIO;
4359 if (!ecap_sc_support(iommu->ecap) &&
4360 domain_update_iommu_snooping(iommu)) {
4361 pr_warn("%s: Doesn't support snooping.\n",
4362 iommu->name);
4363 return -ENXIO;
4365 sp = domain_update_iommu_superpage(iommu) - 1;
4366 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4367 pr_warn("%s: Doesn't support large page.\n",
4368 iommu->name);
4369 return -ENXIO;
4373 * Disable translation if already enabled prior to OS handover.
4375 if (iommu->gcmd & DMA_GCMD_TE)
4376 iommu_disable_translation(iommu);
4378 g_iommus[iommu->seq_id] = iommu;
4379 ret = iommu_init_domains(iommu);
4380 if (ret == 0)
4381 ret = iommu_alloc_root_entry(iommu);
4382 if (ret)
4383 goto out;
4385 #ifdef CONFIG_INTEL_IOMMU_SVM
4386 if (pasid_supported(iommu))
4387 intel_svm_init(iommu);
4388 #endif
4390 if (dmaru->ignored) {
4392 * we always have to disable PMRs or DMA may fail on this device
4394 if (force_on)
4395 iommu_disable_protect_mem_regions(iommu);
4396 return 0;
4399 intel_iommu_init_qi(iommu);
4400 iommu_flush_write_buffer(iommu);
4402 #ifdef CONFIG_INTEL_IOMMU_SVM
4403 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4404 ret = intel_svm_enable_prq(iommu);
4405 if (ret)
4406 goto disable_iommu;
4408 #endif
4409 ret = dmar_set_interrupt(iommu);
4410 if (ret)
4411 goto disable_iommu;
4413 iommu_set_root_entry(iommu);
4414 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4415 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4416 iommu_enable_translation(iommu);
4418 iommu_disable_protect_mem_regions(iommu);
4419 return 0;
4421 disable_iommu:
4422 disable_dmar_iommu(iommu);
4423 out:
4424 free_dmar_iommu(iommu);
4425 return ret;
4428 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4430 int ret = 0;
4431 struct intel_iommu *iommu = dmaru->iommu;
4433 if (!intel_iommu_enabled)
4434 return 0;
4435 if (iommu == NULL)
4436 return -EINVAL;
4438 if (insert) {
4439 ret = intel_iommu_add(dmaru);
4440 } else {
4441 disable_dmar_iommu(iommu);
4442 free_dmar_iommu(iommu);
4445 return ret;
4448 static void intel_iommu_free_dmars(void)
4450 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4451 struct dmar_atsr_unit *atsru, *atsr_n;
4453 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4454 list_del(&rmrru->list);
4455 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4456 kfree(rmrru->resv);
4457 kfree(rmrru);
4460 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4461 list_del(&atsru->list);
4462 intel_iommu_free_atsr(atsru);
4466 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4468 int i, ret = 1;
4469 struct pci_bus *bus;
4470 struct pci_dev *bridge = NULL;
4471 struct device *tmp;
4472 struct acpi_dmar_atsr *atsr;
4473 struct dmar_atsr_unit *atsru;
4475 dev = pci_physfn(dev);
4476 for (bus = dev->bus; bus; bus = bus->parent) {
4477 bridge = bus->self;
4478 /* If it's an integrated device, allow ATS */
4479 if (!bridge)
4480 return 1;
4481 /* Connected via non-PCIe: no ATS */
4482 if (!pci_is_pcie(bridge) ||
4483 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4484 return 0;
4485 /* If we found the root port, look it up in the ATSR */
4486 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4487 break;
4490 rcu_read_lock();
4491 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4492 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4493 if (atsr->segment != pci_domain_nr(dev->bus))
4494 continue;
4496 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4497 if (tmp == &bridge->dev)
4498 goto out;
4500 if (atsru->include_all)
4501 goto out;
4503 ret = 0;
4504 out:
4505 rcu_read_unlock();
4507 return ret;
4510 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4512 int ret;
4513 struct dmar_rmrr_unit *rmrru;
4514 struct dmar_atsr_unit *atsru;
4515 struct acpi_dmar_atsr *atsr;
4516 struct acpi_dmar_reserved_memory *rmrr;
4518 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4519 return 0;
4521 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4522 rmrr = container_of(rmrru->hdr,
4523 struct acpi_dmar_reserved_memory, header);
4524 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4525 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4526 ((void *)rmrr) + rmrr->header.length,
4527 rmrr->segment, rmrru->devices,
4528 rmrru->devices_cnt);
4529 if (ret < 0)
4530 return ret;
4531 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4532 dmar_remove_dev_scope(info, rmrr->segment,
4533 rmrru->devices, rmrru->devices_cnt);
4537 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4538 if (atsru->include_all)
4539 continue;
4541 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4542 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4543 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4544 (void *)atsr + atsr->header.length,
4545 atsr->segment, atsru->devices,
4546 atsru->devices_cnt);
4547 if (ret > 0)
4548 break;
4549 else if (ret < 0)
4550 return ret;
4551 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4552 if (dmar_remove_dev_scope(info, atsr->segment,
4553 atsru->devices, atsru->devices_cnt))
4554 break;
4558 return 0;
4562 * Here we only respond to action of unbound device from driver.
4564 * Added device is not attached to its DMAR domain here yet. That will happen
4565 * when mapping the device to iova.
4567 static int device_notifier(struct notifier_block *nb,
4568 unsigned long action, void *data)
4570 struct device *dev = data;
4571 struct dmar_domain *domain;
4573 if (iommu_dummy(dev))
4574 return 0;
4576 if (action == BUS_NOTIFY_REMOVED_DEVICE) {
4577 domain = find_domain(dev);
4578 if (!domain)
4579 return 0;
4581 dmar_remove_one_dev_info(dev);
4582 if (!domain_type_is_vm_or_si(domain) &&
4583 list_empty(&domain->devices))
4584 domain_exit(domain);
4585 } else if (action == BUS_NOTIFY_ADD_DEVICE) {
4586 if (iommu_should_identity_map(dev, 1))
4587 domain_add_dev_info(si_domain, dev);
4590 return 0;
4593 static struct notifier_block device_nb = {
4594 .notifier_call = device_notifier,
4597 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4598 unsigned long val, void *v)
4600 struct memory_notify *mhp = v;
4601 unsigned long long start, end;
4602 unsigned long start_vpfn, last_vpfn;
4604 switch (val) {
4605 case MEM_GOING_ONLINE:
4606 start = mhp->start_pfn << PAGE_SHIFT;
4607 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4608 if (iommu_domain_identity_map(si_domain, start, end)) {
4609 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4610 start, end);
4611 return NOTIFY_BAD;
4613 break;
4615 case MEM_OFFLINE:
4616 case MEM_CANCEL_ONLINE:
4617 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4618 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4619 while (start_vpfn <= last_vpfn) {
4620 struct iova *iova;
4621 struct dmar_drhd_unit *drhd;
4622 struct intel_iommu *iommu;
4623 struct page *freelist;
4625 iova = find_iova(&si_domain->iovad, start_vpfn);
4626 if (iova == NULL) {
4627 pr_debug("Failed get IOVA for PFN %lx\n",
4628 start_vpfn);
4629 break;
4632 iova = split_and_remove_iova(&si_domain->iovad, iova,
4633 start_vpfn, last_vpfn);
4634 if (iova == NULL) {
4635 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4636 start_vpfn, last_vpfn);
4637 return NOTIFY_BAD;
4640 freelist = domain_unmap(si_domain, iova->pfn_lo,
4641 iova->pfn_hi);
4643 rcu_read_lock();
4644 for_each_active_iommu(iommu, drhd)
4645 iommu_flush_iotlb_psi(iommu, si_domain,
4646 iova->pfn_lo, iova_size(iova),
4647 !freelist, 0);
4648 rcu_read_unlock();
4649 dma_free_pagelist(freelist);
4651 start_vpfn = iova->pfn_hi + 1;
4652 free_iova_mem(iova);
4654 break;
4657 return NOTIFY_OK;
4660 static struct notifier_block intel_iommu_memory_nb = {
4661 .notifier_call = intel_iommu_memory_notifier,
4662 .priority = 0
4665 static void free_all_cpu_cached_iovas(unsigned int cpu)
4667 int i;
4669 for (i = 0; i < g_num_of_iommus; i++) {
4670 struct intel_iommu *iommu = g_iommus[i];
4671 struct dmar_domain *domain;
4672 int did;
4674 if (!iommu)
4675 continue;
4677 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4678 domain = get_iommu_domain(iommu, (u16)did);
4680 if (!domain)
4681 continue;
4682 free_cpu_cached_iovas(cpu, &domain->iovad);
4687 static int intel_iommu_cpu_dead(unsigned int cpu)
4689 free_all_cpu_cached_iovas(cpu);
4690 return 0;
4693 static void intel_disable_iommus(void)
4695 struct intel_iommu *iommu = NULL;
4696 struct dmar_drhd_unit *drhd;
4698 for_each_iommu(iommu, drhd)
4699 iommu_disable_translation(iommu);
4702 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4704 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4706 return container_of(iommu_dev, struct intel_iommu, iommu);
4709 static ssize_t intel_iommu_show_version(struct device *dev,
4710 struct device_attribute *attr,
4711 char *buf)
4713 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4714 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4715 return sprintf(buf, "%d:%d\n",
4716 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4718 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4720 static ssize_t intel_iommu_show_address(struct device *dev,
4721 struct device_attribute *attr,
4722 char *buf)
4724 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4725 return sprintf(buf, "%llx\n", iommu->reg_phys);
4727 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4729 static ssize_t intel_iommu_show_cap(struct device *dev,
4730 struct device_attribute *attr,
4731 char *buf)
4733 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4734 return sprintf(buf, "%llx\n", iommu->cap);
4736 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4738 static ssize_t intel_iommu_show_ecap(struct device *dev,
4739 struct device_attribute *attr,
4740 char *buf)
4742 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4743 return sprintf(buf, "%llx\n", iommu->ecap);
4745 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4747 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4748 struct device_attribute *attr,
4749 char *buf)
4751 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4752 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4754 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4756 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4757 struct device_attribute *attr,
4758 char *buf)
4760 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4761 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4762 cap_ndoms(iommu->cap)));
4764 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4766 static struct attribute *intel_iommu_attrs[] = {
4767 &dev_attr_version.attr,
4768 &dev_attr_address.attr,
4769 &dev_attr_cap.attr,
4770 &dev_attr_ecap.attr,
4771 &dev_attr_domains_supported.attr,
4772 &dev_attr_domains_used.attr,
4773 NULL,
4776 static struct attribute_group intel_iommu_group = {
4777 .name = "intel-iommu",
4778 .attrs = intel_iommu_attrs,
4781 const struct attribute_group *intel_iommu_groups[] = {
4782 &intel_iommu_group,
4783 NULL,
4786 static int __init platform_optin_force_iommu(void)
4788 struct pci_dev *pdev = NULL;
4789 bool has_untrusted_dev = false;
4791 if (!dmar_platform_optin() || no_platform_optin)
4792 return 0;
4794 for_each_pci_dev(pdev) {
4795 if (pdev->untrusted) {
4796 has_untrusted_dev = true;
4797 break;
4801 if (!has_untrusted_dev)
4802 return 0;
4804 if (no_iommu || dmar_disabled)
4805 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4808 * If Intel-IOMMU is disabled by default, we will apply identity
4809 * map for all devices except those marked as being untrusted.
4811 if (dmar_disabled)
4812 iommu_identity_mapping |= IDENTMAP_ALL;
4814 dmar_disabled = 0;
4815 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4816 swiotlb = 0;
4817 #endif
4818 no_iommu = 0;
4820 return 1;
4823 int __init intel_iommu_init(void)
4825 int ret = -ENODEV;
4826 struct dmar_drhd_unit *drhd;
4827 struct intel_iommu *iommu;
4830 * Intel IOMMU is required for a TXT/tboot launch or platform
4831 * opt in, so enforce that.
4833 force_on = tboot_force_iommu() || platform_optin_force_iommu();
4835 if (iommu_init_mempool()) {
4836 if (force_on)
4837 panic("tboot: Failed to initialize iommu memory\n");
4838 return -ENOMEM;
4841 down_write(&dmar_global_lock);
4842 if (dmar_table_init()) {
4843 if (force_on)
4844 panic("tboot: Failed to initialize DMAR table\n");
4845 goto out_free_dmar;
4848 if (dmar_dev_scope_init() < 0) {
4849 if (force_on)
4850 panic("tboot: Failed to initialize DMAR device scope\n");
4851 goto out_free_dmar;
4854 up_write(&dmar_global_lock);
4857 * The bus notifier takes the dmar_global_lock, so lockdep will
4858 * complain later when we register it under the lock.
4860 dmar_register_bus_notifier();
4862 down_write(&dmar_global_lock);
4864 if (no_iommu || dmar_disabled) {
4866 * We exit the function here to ensure IOMMU's remapping and
4867 * mempool aren't setup, which means that the IOMMU's PMRs
4868 * won't be disabled via the call to init_dmars(). So disable
4869 * it explicitly here. The PMRs were setup by tboot prior to
4870 * calling SENTER, but the kernel is expected to reset/tear
4871 * down the PMRs.
4873 if (intel_iommu_tboot_noforce) {
4874 for_each_iommu(iommu, drhd)
4875 iommu_disable_protect_mem_regions(iommu);
4879 * Make sure the IOMMUs are switched off, even when we
4880 * boot into a kexec kernel and the previous kernel left
4881 * them enabled
4883 intel_disable_iommus();
4884 goto out_free_dmar;
4887 if (list_empty(&dmar_rmrr_units))
4888 pr_info("No RMRR found\n");
4890 if (list_empty(&dmar_atsr_units))
4891 pr_info("No ATSR found\n");
4893 if (dmar_init_reserved_ranges()) {
4894 if (force_on)
4895 panic("tboot: Failed to reserve iommu ranges\n");
4896 goto out_free_reserved_range;
4899 if (dmar_map_gfx)
4900 intel_iommu_gfx_mapped = 1;
4902 init_no_remapping_devices();
4904 ret = init_dmars();
4905 if (ret) {
4906 if (force_on)
4907 panic("tboot: Failed to initialize DMARs\n");
4908 pr_err("Initialization failed\n");
4909 goto out_free_reserved_range;
4911 up_write(&dmar_global_lock);
4912 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4914 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4915 swiotlb = 0;
4916 #endif
4917 dma_ops = &intel_dma_ops;
4919 init_iommu_pm_ops();
4921 for_each_active_iommu(iommu, drhd) {
4922 iommu_device_sysfs_add(&iommu->iommu, NULL,
4923 intel_iommu_groups,
4924 "%s", iommu->name);
4925 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4926 iommu_device_register(&iommu->iommu);
4929 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4930 bus_register_notifier(&pci_bus_type, &device_nb);
4931 if (si_domain && !hw_pass_through)
4932 register_memory_notifier(&intel_iommu_memory_nb);
4933 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4934 intel_iommu_cpu_dead);
4935 intel_iommu_enabled = 1;
4936 intel_iommu_debugfs_init();
4938 return 0;
4940 out_free_reserved_range:
4941 put_iova_domain(&reserved_iova_list);
4942 out_free_dmar:
4943 intel_iommu_free_dmars();
4944 up_write(&dmar_global_lock);
4945 iommu_exit_mempool();
4946 return ret;
4949 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4951 struct intel_iommu *iommu = opaque;
4953 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4954 return 0;
4958 * NB - intel-iommu lacks any sort of reference counting for the users of
4959 * dependent devices. If multiple endpoints have intersecting dependent
4960 * devices, unbinding the driver from any one of them will possibly leave
4961 * the others unable to operate.
4963 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4965 if (!iommu || !dev || !dev_is_pci(dev))
4966 return;
4968 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4971 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4973 struct intel_iommu *iommu;
4974 unsigned long flags;
4976 assert_spin_locked(&device_domain_lock);
4978 if (WARN_ON(!info))
4979 return;
4981 iommu = info->iommu;
4983 if (info->dev) {
4984 if (dev_is_pci(info->dev) && sm_supported(iommu))
4985 intel_pasid_tear_down_entry(iommu, info->dev,
4986 PASID_RID2PASID);
4988 iommu_disable_dev_iotlb(info);
4989 domain_context_clear(iommu, info->dev);
4990 intel_pasid_free_table(info->dev);
4993 unlink_domain_info(info);
4995 spin_lock_irqsave(&iommu->lock, flags);
4996 domain_detach_iommu(info->domain, iommu);
4997 spin_unlock_irqrestore(&iommu->lock, flags);
4999 free_devinfo_mem(info);
5002 static void dmar_remove_one_dev_info(struct device *dev)
5004 struct device_domain_info *info;
5005 unsigned long flags;
5007 spin_lock_irqsave(&device_domain_lock, flags);
5008 info = dev->archdata.iommu;
5009 __dmar_remove_one_dev_info(info);
5010 spin_unlock_irqrestore(&device_domain_lock, flags);
5013 static int md_domain_init(struct dmar_domain *domain, int guest_width)
5015 int adjust_width;
5017 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
5018 domain_reserve_special_ranges(domain);
5020 /* calculate AGAW */
5021 domain->gaw = guest_width;
5022 adjust_width = guestwidth_to_adjustwidth(guest_width);
5023 domain->agaw = width_to_agaw(adjust_width);
5025 domain->iommu_coherency = 0;
5026 domain->iommu_snooping = 0;
5027 domain->iommu_superpage = 0;
5028 domain->max_addr = 0;
5030 /* always allocate the top pgd */
5031 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5032 if (!domain->pgd)
5033 return -ENOMEM;
5034 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5035 return 0;
5038 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5040 struct dmar_domain *dmar_domain;
5041 struct iommu_domain *domain;
5043 if (type != IOMMU_DOMAIN_UNMANAGED)
5044 return NULL;
5046 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
5047 if (!dmar_domain) {
5048 pr_err("Can't allocate dmar_domain\n");
5049 return NULL;
5051 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5052 pr_err("Domain initialization failed\n");
5053 domain_exit(dmar_domain);
5054 return NULL;
5056 domain_update_iommu_cap(dmar_domain);
5058 domain = &dmar_domain->domain;
5059 domain->geometry.aperture_start = 0;
5060 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5061 domain->geometry.force_aperture = true;
5063 return domain;
5066 static void intel_iommu_domain_free(struct iommu_domain *domain)
5068 domain_exit(to_dmar_domain(domain));
5072 * Check whether a @domain could be attached to the @dev through the
5073 * aux-domain attach/detach APIs.
5075 static inline bool
5076 is_aux_domain(struct device *dev, struct iommu_domain *domain)
5078 struct device_domain_info *info = dev->archdata.iommu;
5080 return info && info->auxd_enabled &&
5081 domain->type == IOMMU_DOMAIN_UNMANAGED;
5084 static void auxiliary_link_device(struct dmar_domain *domain,
5085 struct device *dev)
5087 struct device_domain_info *info = dev->archdata.iommu;
5089 assert_spin_locked(&device_domain_lock);
5090 if (WARN_ON(!info))
5091 return;
5093 domain->auxd_refcnt++;
5094 list_add(&domain->auxd, &info->auxiliary_domains);
5097 static void auxiliary_unlink_device(struct dmar_domain *domain,
5098 struct device *dev)
5100 struct device_domain_info *info = dev->archdata.iommu;
5102 assert_spin_locked(&device_domain_lock);
5103 if (WARN_ON(!info))
5104 return;
5106 list_del(&domain->auxd);
5107 domain->auxd_refcnt--;
5109 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5110 intel_pasid_free_id(domain->default_pasid);
5113 static int aux_domain_add_dev(struct dmar_domain *domain,
5114 struct device *dev)
5116 int ret;
5117 u8 bus, devfn;
5118 unsigned long flags;
5119 struct intel_iommu *iommu;
5121 iommu = device_to_iommu(dev, &bus, &devfn);
5122 if (!iommu)
5123 return -ENODEV;
5125 if (domain->default_pasid <= 0) {
5126 int pasid;
5128 pasid = intel_pasid_alloc_id(domain, PASID_MIN,
5129 pci_max_pasids(to_pci_dev(dev)),
5130 GFP_KERNEL);
5131 if (pasid <= 0) {
5132 pr_err("Can't allocate default pasid\n");
5133 return -ENODEV;
5135 domain->default_pasid = pasid;
5138 spin_lock_irqsave(&device_domain_lock, flags);
5140 * iommu->lock must be held to attach domain to iommu and setup the
5141 * pasid entry for second level translation.
5143 spin_lock(&iommu->lock);
5144 ret = domain_attach_iommu(domain, iommu);
5145 if (ret)
5146 goto attach_failed;
5148 /* Setup the PASID entry for mediated devices: */
5149 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5150 domain->default_pasid);
5151 if (ret)
5152 goto table_failed;
5153 spin_unlock(&iommu->lock);
5155 auxiliary_link_device(domain, dev);
5157 spin_unlock_irqrestore(&device_domain_lock, flags);
5159 return 0;
5161 table_failed:
5162 domain_detach_iommu(domain, iommu);
5163 attach_failed:
5164 spin_unlock(&iommu->lock);
5165 spin_unlock_irqrestore(&device_domain_lock, flags);
5166 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5167 intel_pasid_free_id(domain->default_pasid);
5169 return ret;
5172 static void aux_domain_remove_dev(struct dmar_domain *domain,
5173 struct device *dev)
5175 struct device_domain_info *info;
5176 struct intel_iommu *iommu;
5177 unsigned long flags;
5179 if (!is_aux_domain(dev, &domain->domain))
5180 return;
5182 spin_lock_irqsave(&device_domain_lock, flags);
5183 info = dev->archdata.iommu;
5184 iommu = info->iommu;
5186 auxiliary_unlink_device(domain, dev);
5188 spin_lock(&iommu->lock);
5189 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
5190 domain_detach_iommu(domain, iommu);
5191 spin_unlock(&iommu->lock);
5193 spin_unlock_irqrestore(&device_domain_lock, flags);
5196 static int prepare_domain_attach_device(struct iommu_domain *domain,
5197 struct device *dev)
5199 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5200 struct intel_iommu *iommu;
5201 int addr_width;
5202 u8 bus, devfn;
5204 iommu = device_to_iommu(dev, &bus, &devfn);
5205 if (!iommu)
5206 return -ENODEV;
5208 /* check if this iommu agaw is sufficient for max mapped address */
5209 addr_width = agaw_to_width(iommu->agaw);
5210 if (addr_width > cap_mgaw(iommu->cap))
5211 addr_width = cap_mgaw(iommu->cap);
5213 if (dmar_domain->max_addr > (1LL << addr_width)) {
5214 dev_err(dev, "%s: iommu width (%d) is not "
5215 "sufficient for the mapped address (%llx)\n",
5216 __func__, addr_width, dmar_domain->max_addr);
5217 return -EFAULT;
5219 dmar_domain->gaw = addr_width;
5222 * Knock out extra levels of page tables if necessary
5224 while (iommu->agaw < dmar_domain->agaw) {
5225 struct dma_pte *pte;
5227 pte = dmar_domain->pgd;
5228 if (dma_pte_present(pte)) {
5229 dmar_domain->pgd = (struct dma_pte *)
5230 phys_to_virt(dma_pte_addr(pte));
5231 free_pgtable_page(pte);
5233 dmar_domain->agaw--;
5236 return 0;
5239 static int intel_iommu_attach_device(struct iommu_domain *domain,
5240 struct device *dev)
5242 int ret;
5244 if (device_is_rmrr_locked(dev)) {
5245 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5246 return -EPERM;
5249 if (is_aux_domain(dev, domain))
5250 return -EPERM;
5252 /* normally dev is not mapped */
5253 if (unlikely(domain_context_mapped(dev))) {
5254 struct dmar_domain *old_domain;
5256 old_domain = find_domain(dev);
5257 if (old_domain) {
5258 rcu_read_lock();
5259 dmar_remove_one_dev_info(dev);
5260 rcu_read_unlock();
5262 if (!domain_type_is_vm_or_si(old_domain) &&
5263 list_empty(&old_domain->devices))
5264 domain_exit(old_domain);
5268 ret = prepare_domain_attach_device(domain, dev);
5269 if (ret)
5270 return ret;
5272 return domain_add_dev_info(to_dmar_domain(domain), dev);
5275 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5276 struct device *dev)
5278 int ret;
5280 if (!is_aux_domain(dev, domain))
5281 return -EPERM;
5283 ret = prepare_domain_attach_device(domain, dev);
5284 if (ret)
5285 return ret;
5287 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5290 static void intel_iommu_detach_device(struct iommu_domain *domain,
5291 struct device *dev)
5293 dmar_remove_one_dev_info(dev);
5296 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5297 struct device *dev)
5299 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5302 static int intel_iommu_map(struct iommu_domain *domain,
5303 unsigned long iova, phys_addr_t hpa,
5304 size_t size, int iommu_prot)
5306 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5307 u64 max_addr;
5308 int prot = 0;
5309 int ret;
5311 if (iommu_prot & IOMMU_READ)
5312 prot |= DMA_PTE_READ;
5313 if (iommu_prot & IOMMU_WRITE)
5314 prot |= DMA_PTE_WRITE;
5315 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5316 prot |= DMA_PTE_SNP;
5318 max_addr = iova + size;
5319 if (dmar_domain->max_addr < max_addr) {
5320 u64 end;
5322 /* check if minimum agaw is sufficient for mapped address */
5323 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5324 if (end < max_addr) {
5325 pr_err("%s: iommu width (%d) is not "
5326 "sufficient for the mapped address (%llx)\n",
5327 __func__, dmar_domain->gaw, max_addr);
5328 return -EFAULT;
5330 dmar_domain->max_addr = max_addr;
5332 /* Round up size to next multiple of PAGE_SIZE, if it and
5333 the low bits of hpa would take us onto the next page */
5334 size = aligned_nrpages(hpa, size);
5335 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5336 hpa >> VTD_PAGE_SHIFT, size, prot);
5337 return ret;
5340 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5341 unsigned long iova, size_t size)
5343 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5344 struct page *freelist = NULL;
5345 unsigned long start_pfn, last_pfn;
5346 unsigned int npages;
5347 int iommu_id, level = 0;
5349 /* Cope with horrid API which requires us to unmap more than the
5350 size argument if it happens to be a large-page mapping. */
5351 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5353 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5354 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5356 start_pfn = iova >> VTD_PAGE_SHIFT;
5357 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5359 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5361 npages = last_pfn - start_pfn + 1;
5363 for_each_domain_iommu(iommu_id, dmar_domain)
5364 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5365 start_pfn, npages, !freelist, 0);
5367 dma_free_pagelist(freelist);
5369 if (dmar_domain->max_addr == iova + size)
5370 dmar_domain->max_addr = iova;
5372 return size;
5375 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5376 dma_addr_t iova)
5378 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5379 struct dma_pte *pte;
5380 int level = 0;
5381 u64 phys = 0;
5383 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5384 if (pte)
5385 phys = dma_pte_addr(pte);
5387 return phys;
5390 static inline bool scalable_mode_support(void)
5392 struct dmar_drhd_unit *drhd;
5393 struct intel_iommu *iommu;
5394 bool ret = true;
5396 rcu_read_lock();
5397 for_each_active_iommu(iommu, drhd) {
5398 if (!sm_supported(iommu)) {
5399 ret = false;
5400 break;
5403 rcu_read_unlock();
5405 return ret;
5408 static inline bool iommu_pasid_support(void)
5410 struct dmar_drhd_unit *drhd;
5411 struct intel_iommu *iommu;
5412 bool ret = true;
5414 rcu_read_lock();
5415 for_each_active_iommu(iommu, drhd) {
5416 if (!pasid_supported(iommu)) {
5417 ret = false;
5418 break;
5421 rcu_read_unlock();
5423 return ret;
5426 static bool intel_iommu_capable(enum iommu_cap cap)
5428 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5429 return domain_update_iommu_snooping(NULL) == 1;
5430 if (cap == IOMMU_CAP_INTR_REMAP)
5431 return irq_remapping_enabled == 1;
5433 return false;
5436 static int intel_iommu_add_device(struct device *dev)
5438 struct intel_iommu *iommu;
5439 struct iommu_group *group;
5440 u8 bus, devfn;
5442 iommu = device_to_iommu(dev, &bus, &devfn);
5443 if (!iommu)
5444 return -ENODEV;
5446 iommu_device_link(&iommu->iommu, dev);
5448 group = iommu_group_get_for_dev(dev);
5450 if (IS_ERR(group))
5451 return PTR_ERR(group);
5453 iommu_group_put(group);
5454 return 0;
5457 static void intel_iommu_remove_device(struct device *dev)
5459 struct intel_iommu *iommu;
5460 u8 bus, devfn;
5462 iommu = device_to_iommu(dev, &bus, &devfn);
5463 if (!iommu)
5464 return;
5466 iommu_group_remove_device(dev);
5468 iommu_device_unlink(&iommu->iommu, dev);
5471 static void intel_iommu_get_resv_regions(struct device *device,
5472 struct list_head *head)
5474 struct iommu_resv_region *reg;
5475 struct dmar_rmrr_unit *rmrr;
5476 struct device *i_dev;
5477 int i;
5479 rcu_read_lock();
5480 for_each_rmrr_units(rmrr) {
5481 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5482 i, i_dev) {
5483 if (i_dev != device)
5484 continue;
5486 list_add_tail(&rmrr->resv->list, head);
5489 rcu_read_unlock();
5491 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5492 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5493 0, IOMMU_RESV_MSI);
5494 if (!reg)
5495 return;
5496 list_add_tail(&reg->list, head);
5499 static void intel_iommu_put_resv_regions(struct device *dev,
5500 struct list_head *head)
5502 struct iommu_resv_region *entry, *next;
5504 list_for_each_entry_safe(entry, next, head, list) {
5505 if (entry->type == IOMMU_RESV_MSI)
5506 kfree(entry);
5510 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5512 struct device_domain_info *info;
5513 struct context_entry *context;
5514 struct dmar_domain *domain;
5515 unsigned long flags;
5516 u64 ctx_lo;
5517 int ret;
5519 domain = get_valid_domain_for_dev(dev);
5520 if (!domain)
5521 return -EINVAL;
5523 spin_lock_irqsave(&device_domain_lock, flags);
5524 spin_lock(&iommu->lock);
5526 ret = -EINVAL;
5527 info = dev->archdata.iommu;
5528 if (!info || !info->pasid_supported)
5529 goto out;
5531 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5532 if (WARN_ON(!context))
5533 goto out;
5535 ctx_lo = context[0].lo;
5537 if (!(ctx_lo & CONTEXT_PASIDE)) {
5538 ctx_lo |= CONTEXT_PASIDE;
5539 context[0].lo = ctx_lo;
5540 wmb();
5541 iommu->flush.flush_context(iommu,
5542 domain->iommu_did[iommu->seq_id],
5543 PCI_DEVID(info->bus, info->devfn),
5544 DMA_CCMD_MASK_NOBIT,
5545 DMA_CCMD_DEVICE_INVL);
5548 /* Enable PASID support in the device, if it wasn't already */
5549 if (!info->pasid_enabled)
5550 iommu_enable_dev_iotlb(info);
5552 ret = 0;
5554 out:
5555 spin_unlock(&iommu->lock);
5556 spin_unlock_irqrestore(&device_domain_lock, flags);
5558 return ret;
5561 #ifdef CONFIG_INTEL_IOMMU_SVM
5562 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5564 struct intel_iommu *iommu;
5565 u8 bus, devfn;
5567 if (iommu_dummy(dev)) {
5568 dev_warn(dev,
5569 "No IOMMU translation for device; cannot enable SVM\n");
5570 return NULL;
5573 iommu = device_to_iommu(dev, &bus, &devfn);
5574 if ((!iommu)) {
5575 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5576 return NULL;
5579 return iommu;
5581 #endif /* CONFIG_INTEL_IOMMU_SVM */
5583 static int intel_iommu_enable_auxd(struct device *dev)
5585 struct device_domain_info *info;
5586 struct intel_iommu *iommu;
5587 unsigned long flags;
5588 u8 bus, devfn;
5589 int ret;
5591 iommu = device_to_iommu(dev, &bus, &devfn);
5592 if (!iommu || dmar_disabled)
5593 return -EINVAL;
5595 if (!sm_supported(iommu) || !pasid_supported(iommu))
5596 return -EINVAL;
5598 ret = intel_iommu_enable_pasid(iommu, dev);
5599 if (ret)
5600 return -ENODEV;
5602 spin_lock_irqsave(&device_domain_lock, flags);
5603 info = dev->archdata.iommu;
5604 info->auxd_enabled = 1;
5605 spin_unlock_irqrestore(&device_domain_lock, flags);
5607 return 0;
5610 static int intel_iommu_disable_auxd(struct device *dev)
5612 struct device_domain_info *info;
5613 unsigned long flags;
5615 spin_lock_irqsave(&device_domain_lock, flags);
5616 info = dev->archdata.iommu;
5617 if (!WARN_ON(!info))
5618 info->auxd_enabled = 0;
5619 spin_unlock_irqrestore(&device_domain_lock, flags);
5621 return 0;
5625 * A PCI express designated vendor specific extended capability is defined
5626 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5627 * for system software and tools to detect endpoint devices supporting the
5628 * Intel scalable IO virtualization without host driver dependency.
5630 * Returns the address of the matching extended capability structure within
5631 * the device's PCI configuration space or 0 if the device does not support
5632 * it.
5634 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5636 int pos;
5637 u16 vendor, id;
5639 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5640 while (pos) {
5641 pci_read_config_word(pdev, pos + 4, &vendor);
5642 pci_read_config_word(pdev, pos + 8, &id);
5643 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5644 return pos;
5646 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5649 return 0;
5652 static bool
5653 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5655 if (feat == IOMMU_DEV_FEAT_AUX) {
5656 int ret;
5658 if (!dev_is_pci(dev) || dmar_disabled ||
5659 !scalable_mode_support() || !iommu_pasid_support())
5660 return false;
5662 ret = pci_pasid_features(to_pci_dev(dev));
5663 if (ret < 0)
5664 return false;
5666 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5669 return false;
5672 static int
5673 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5675 if (feat == IOMMU_DEV_FEAT_AUX)
5676 return intel_iommu_enable_auxd(dev);
5678 return -ENODEV;
5681 static int
5682 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5684 if (feat == IOMMU_DEV_FEAT_AUX)
5685 return intel_iommu_disable_auxd(dev);
5687 return -ENODEV;
5690 static bool
5691 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5693 struct device_domain_info *info = dev->archdata.iommu;
5695 if (feat == IOMMU_DEV_FEAT_AUX)
5696 return scalable_mode_support() && info && info->auxd_enabled;
5698 return false;
5701 static int
5702 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5704 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5706 return dmar_domain->default_pasid > 0 ?
5707 dmar_domain->default_pasid : -EINVAL;
5710 const struct iommu_ops intel_iommu_ops = {
5711 .capable = intel_iommu_capable,
5712 .domain_alloc = intel_iommu_domain_alloc,
5713 .domain_free = intel_iommu_domain_free,
5714 .attach_dev = intel_iommu_attach_device,
5715 .detach_dev = intel_iommu_detach_device,
5716 .aux_attach_dev = intel_iommu_aux_attach_device,
5717 .aux_detach_dev = intel_iommu_aux_detach_device,
5718 .aux_get_pasid = intel_iommu_aux_get_pasid,
5719 .map = intel_iommu_map,
5720 .unmap = intel_iommu_unmap,
5721 .iova_to_phys = intel_iommu_iova_to_phys,
5722 .add_device = intel_iommu_add_device,
5723 .remove_device = intel_iommu_remove_device,
5724 .get_resv_regions = intel_iommu_get_resv_regions,
5725 .put_resv_regions = intel_iommu_put_resv_regions,
5726 .device_group = pci_device_group,
5727 .dev_has_feat = intel_iommu_dev_has_feat,
5728 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5729 .dev_enable_feat = intel_iommu_dev_enable_feat,
5730 .dev_disable_feat = intel_iommu_dev_disable_feat,
5731 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5734 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5736 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5737 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5738 dmar_map_gfx = 0;
5741 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5742 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5743 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5744 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5745 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5746 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5747 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5749 static void quirk_iommu_rwbf(struct pci_dev *dev)
5752 * Mobile 4 Series Chipset neglects to set RWBF capability,
5753 * but needs it. Same seems to hold for the desktop versions.
5755 pci_info(dev, "Forcing write-buffer flush capability\n");
5756 rwbf_quirk = 1;
5759 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5760 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5761 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5762 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5763 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5764 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5765 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5767 #define GGC 0x52
5768 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5769 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5770 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5771 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5772 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5773 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5774 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5775 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5777 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5779 unsigned short ggc;
5781 if (pci_read_config_word(dev, GGC, &ggc))
5782 return;
5784 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5785 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5786 dmar_map_gfx = 0;
5787 } else if (dmar_map_gfx) {
5788 /* we have to ensure the gfx device is idle before we flush */
5789 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5790 intel_iommu_strict = 1;
5793 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5794 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5795 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5796 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5798 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5799 ISOCH DMAR unit for the Azalia sound device, but not give it any
5800 TLB entries, which causes it to deadlock. Check for that. We do
5801 this in a function called from init_dmars(), instead of in a PCI
5802 quirk, because we don't want to print the obnoxious "BIOS broken"
5803 message if VT-d is actually disabled.
5805 static void __init check_tylersburg_isoch(void)
5807 struct pci_dev *pdev;
5808 uint32_t vtisochctrl;
5810 /* If there's no Azalia in the system anyway, forget it. */
5811 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5812 if (!pdev)
5813 return;
5814 pci_dev_put(pdev);
5816 /* System Management Registers. Might be hidden, in which case
5817 we can't do the sanity check. But that's OK, because the
5818 known-broken BIOSes _don't_ actually hide it, so far. */
5819 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5820 if (!pdev)
5821 return;
5823 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5824 pci_dev_put(pdev);
5825 return;
5828 pci_dev_put(pdev);
5830 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5831 if (vtisochctrl & 1)
5832 return;
5834 /* Drop all bits other than the number of TLB entries */
5835 vtisochctrl &= 0x1c;
5837 /* If we have the recommended number of TLB entries (16), fine. */
5838 if (vtisochctrl == 0x10)
5839 return;
5841 /* Zero TLB entries? You get to ride the short bus to school. */
5842 if (!vtisochctrl) {
5843 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5844 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5845 dmi_get_system_info(DMI_BIOS_VENDOR),
5846 dmi_get_system_info(DMI_BIOS_VERSION),
5847 dmi_get_system_info(DMI_PRODUCT_VERSION));
5848 iommu_identity_mapping |= IDENTMAP_AZALIA;
5849 return;
5852 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5853 vtisochctrl);