2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <asm/irq_remapping.h>
46 #include <asm/cacheflush.h>
47 #include <asm/iommu.h>
49 #include "irq_remapping.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
57 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
59 #define IOAPIC_RANGE_START (0xfee00000)
60 #define IOAPIC_RANGE_END (0xfeefffff)
61 #define IOVA_START_ADDR (0x1000)
63 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
65 #define MAX_AGAW_WIDTH 64
66 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
68 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
69 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
71 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
72 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
73 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
74 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
75 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
77 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
78 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
79 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
81 /* page table handling */
82 #define LEVEL_STRIDE (9)
83 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
101 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
103 static inline int agaw_to_level(int agaw
)
108 static inline int agaw_to_width(int agaw
)
110 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
113 static inline int width_to_agaw(int width
)
115 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
118 static inline unsigned int level_to_offset_bits(int level
)
120 return (level
- 1) * LEVEL_STRIDE
;
123 static inline int pfn_level_offset(unsigned long pfn
, int level
)
125 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
128 static inline unsigned long level_mask(int level
)
130 return -1UL << level_to_offset_bits(level
);
133 static inline unsigned long level_size(int level
)
135 return 1UL << level_to_offset_bits(level
);
138 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
140 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
143 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
145 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
148 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
152 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
155 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
157 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
159 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
161 return mm_to_dma_pfn(page_to_pfn(pg
));
163 static inline unsigned long virt_to_dma_pfn(void *p
)
165 return page_to_dma_pfn(virt_to_page(p
));
168 /* global iommu list, set NULL for ignored DMAR units */
169 static struct intel_iommu
**g_iommus
;
171 static void __init
check_tylersburg_isoch(void);
172 static int rwbf_quirk
;
175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
178 static int force_on
= 0;
183 * 12-63: Context Ptr (12 - (haw-1))
190 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191 static inline bool root_present(struct root_entry
*root
)
193 return (root
->val
& 1);
195 static inline void set_root_present(struct root_entry
*root
)
199 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
201 root
->val
|= value
& VTD_PAGE_MASK
;
204 static inline struct context_entry
*
205 get_context_addr_from_root(struct root_entry
*root
)
207 return (struct context_entry
*)
208 (root_present(root
)?phys_to_virt(
209 root
->val
& VTD_PAGE_MASK
) :
216 * 1: fault processing disable
217 * 2-3: translation type
218 * 12-63: address space root
224 struct context_entry
{
229 static inline bool context_present(struct context_entry
*context
)
231 return (context
->lo
& 1);
233 static inline void context_set_present(struct context_entry
*context
)
238 static inline void context_set_fault_enable(struct context_entry
*context
)
240 context
->lo
&= (((u64
)-1) << 2) | 1;
243 static inline void context_set_translation_type(struct context_entry
*context
,
246 context
->lo
&= (((u64
)-1) << 4) | 3;
247 context
->lo
|= (value
& 3) << 2;
250 static inline void context_set_address_root(struct context_entry
*context
,
253 context
->lo
|= value
& VTD_PAGE_MASK
;
256 static inline void context_set_address_width(struct context_entry
*context
,
259 context
->hi
|= value
& 7;
262 static inline void context_set_domain_id(struct context_entry
*context
,
265 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
268 static inline void context_clear_entry(struct context_entry
*context
)
281 * 12-63: Host physcial address
287 static inline void dma_clear_pte(struct dma_pte
*pte
)
292 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
295 return pte
->val
& VTD_PAGE_MASK
;
297 /* Must have a full atomic 64-bit read */
298 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
302 static inline bool dma_pte_present(struct dma_pte
*pte
)
304 return (pte
->val
& 3) != 0;
307 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
309 return (pte
->val
& (1 << 7));
312 static inline int first_pte_in_page(struct dma_pte
*pte
)
314 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
323 static struct dmar_domain
*si_domain
;
324 static int hw_pass_through
= 1;
326 /* devices under the same p2p bridge are owned in one domain */
327 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
329 /* domain represents a virtual machine, more than one devices
330 * across iommus may be owned in one domain, e.g. kvm guest.
332 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
334 /* si_domain contains mulitple devices */
335 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
337 /* define the limit of IOMMUs supported in each domain */
339 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
341 # define IOMMU_UNITS_SUPPORTED 64
345 int id
; /* domain id */
346 int nid
; /* node id */
347 DECLARE_BITMAP(iommu_bmp
, IOMMU_UNITS_SUPPORTED
);
348 /* bitmap of iommus this domain uses*/
350 struct list_head devices
; /* all devices' list */
351 struct iova_domain iovad
; /* iova's that belong to this domain */
353 struct dma_pte
*pgd
; /* virtual address */
354 int gaw
; /* max guest address width */
356 /* adjusted guest address width, 0 is level 2 30-bit */
359 int flags
; /* flags to find out type of domain */
361 int iommu_coherency
;/* indicate coherency of iommu access */
362 int iommu_snooping
; /* indicate snooping control feature*/
363 int iommu_count
; /* reference count of iommu */
364 int iommu_superpage
;/* Level of superpages supported:
365 0 == 4KiB (no superpages), 1 == 2MiB,
366 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
367 spinlock_t iommu_lock
; /* protect iommu set in domain */
368 u64 max_addr
; /* maximum mapped address */
371 /* PCI domain-device relationship */
372 struct device_domain_info
{
373 struct list_head link
; /* link to domain siblings */
374 struct list_head global
; /* link to global list */
375 int segment
; /* PCI domain */
376 u8 bus
; /* PCI bus number */
377 u8 devfn
; /* PCI devfn number */
378 struct pci_dev
*dev
; /* it's NULL for PCIe-to-PCI bridge */
379 struct intel_iommu
*iommu
; /* IOMMU used by this device */
380 struct dmar_domain
*domain
; /* pointer to domain */
383 static void flush_unmaps_timeout(unsigned long data
);
385 static DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
387 #define HIGH_WATER_MARK 250
388 struct deferred_flush_tables
{
390 struct iova
*iova
[HIGH_WATER_MARK
];
391 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
394 static struct deferred_flush_tables
*deferred_flush
;
396 /* bitmap for indexing intel_iommus */
397 static int g_num_of_iommus
;
399 static DEFINE_SPINLOCK(async_umap_flush_lock
);
400 static LIST_HEAD(unmaps_to_do
);
403 static long list_size
;
405 static void domain_remove_dev_info(struct dmar_domain
*domain
);
407 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
408 int dmar_disabled
= 0;
410 int dmar_disabled
= 1;
411 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
413 int intel_iommu_enabled
= 0;
414 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
416 static int dmar_map_gfx
= 1;
417 static int dmar_forcedac
;
418 static int intel_iommu_strict
;
419 static int intel_iommu_superpage
= 1;
421 int intel_iommu_gfx_mapped
;
422 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
424 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
425 static DEFINE_SPINLOCK(device_domain_lock
);
426 static LIST_HEAD(device_domain_list
);
428 static struct iommu_ops intel_iommu_ops
;
430 static int __init
intel_iommu_setup(char *str
)
435 if (!strncmp(str
, "on", 2)) {
437 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
438 } else if (!strncmp(str
, "off", 3)) {
440 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
441 } else if (!strncmp(str
, "igfx_off", 8)) {
444 "Intel-IOMMU: disable GFX device mapping\n");
445 } else if (!strncmp(str
, "forcedac", 8)) {
447 "Intel-IOMMU: Forcing DAC for PCI devices\n");
449 } else if (!strncmp(str
, "strict", 6)) {
451 "Intel-IOMMU: disable batched IOTLB flush\n");
452 intel_iommu_strict
= 1;
453 } else if (!strncmp(str
, "sp_off", 6)) {
455 "Intel-IOMMU: disable supported super page\n");
456 intel_iommu_superpage
= 0;
459 str
+= strcspn(str
, ",");
465 __setup("intel_iommu=", intel_iommu_setup
);
467 static struct kmem_cache
*iommu_domain_cache
;
468 static struct kmem_cache
*iommu_devinfo_cache
;
469 static struct kmem_cache
*iommu_iova_cache
;
471 static inline void *alloc_pgtable_page(int node
)
476 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
478 vaddr
= page_address(page
);
482 static inline void free_pgtable_page(void *vaddr
)
484 free_page((unsigned long)vaddr
);
487 static inline void *alloc_domain_mem(void)
489 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
492 static void free_domain_mem(void *vaddr
)
494 kmem_cache_free(iommu_domain_cache
, vaddr
);
497 static inline void * alloc_devinfo_mem(void)
499 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
502 static inline void free_devinfo_mem(void *vaddr
)
504 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
507 struct iova
*alloc_iova_mem(void)
509 return kmem_cache_alloc(iommu_iova_cache
, GFP_ATOMIC
);
512 void free_iova_mem(struct iova
*iova
)
514 kmem_cache_free(iommu_iova_cache
, iova
);
518 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
523 sagaw
= cap_sagaw(iommu
->cap
);
524 for (agaw
= width_to_agaw(max_gaw
);
526 if (test_bit(agaw
, &sagaw
))
534 * Calculate max SAGAW for each iommu.
536 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
538 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
542 * calculate agaw for each iommu.
543 * "SAGAW" may be different across iommus, use a default agaw, and
544 * get a supported less agaw for iommus that don't support the default agaw.
546 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
548 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
551 /* This functionin only returns single iommu in a domain */
552 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
556 /* si_domain and vm domain should not get here. */
557 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
558 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
560 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
561 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
564 return g_iommus
[iommu_id
];
567 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
571 i
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
573 domain
->iommu_coherency
= i
< g_num_of_iommus
? 1 : 0;
575 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
576 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
577 domain
->iommu_coherency
= 0;
583 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
587 domain
->iommu_snooping
= 1;
589 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
590 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
591 domain
->iommu_snooping
= 0;
597 static void domain_update_iommu_superpage(struct dmar_domain
*domain
)
599 struct dmar_drhd_unit
*drhd
;
600 struct intel_iommu
*iommu
= NULL
;
603 if (!intel_iommu_superpage
) {
604 domain
->iommu_superpage
= 0;
608 /* set iommu_superpage to the smallest common denominator */
609 for_each_active_iommu(iommu
, drhd
) {
610 mask
&= cap_super_page_val(iommu
->cap
);
615 domain
->iommu_superpage
= fls(mask
);
618 /* Some capabilities may be different across iommus */
619 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
621 domain_update_iommu_coherency(domain
);
622 domain_update_iommu_snooping(domain
);
623 domain_update_iommu_superpage(domain
);
626 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
628 struct dmar_drhd_unit
*drhd
= NULL
;
631 for_each_active_drhd_unit(drhd
) {
632 if (segment
!= drhd
->segment
)
635 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
636 if (drhd
->devices
[i
] &&
637 drhd
->devices
[i
]->bus
->number
== bus
&&
638 drhd
->devices
[i
]->devfn
== devfn
)
640 if (drhd
->devices
[i
] &&
641 drhd
->devices
[i
]->subordinate
&&
642 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
643 drhd
->devices
[i
]->subordinate
->busn_res
.end
>= bus
)
647 if (drhd
->include_all
)
654 static void domain_flush_cache(struct dmar_domain
*domain
,
655 void *addr
, int size
)
657 if (!domain
->iommu_coherency
)
658 clflush_cache_range(addr
, size
);
661 /* Gets context entry for a given bus and devfn */
662 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
665 struct root_entry
*root
;
666 struct context_entry
*context
;
667 unsigned long phy_addr
;
670 spin_lock_irqsave(&iommu
->lock
, flags
);
671 root
= &iommu
->root_entry
[bus
];
672 context
= get_context_addr_from_root(root
);
674 context
= (struct context_entry
*)
675 alloc_pgtable_page(iommu
->node
);
677 spin_unlock_irqrestore(&iommu
->lock
, flags
);
680 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
681 phy_addr
= virt_to_phys((void *)context
);
682 set_root_value(root
, phy_addr
);
683 set_root_present(root
);
684 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
686 spin_unlock_irqrestore(&iommu
->lock
, flags
);
687 return &context
[devfn
];
690 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
692 struct root_entry
*root
;
693 struct context_entry
*context
;
697 spin_lock_irqsave(&iommu
->lock
, flags
);
698 root
= &iommu
->root_entry
[bus
];
699 context
= get_context_addr_from_root(root
);
704 ret
= context_present(&context
[devfn
]);
706 spin_unlock_irqrestore(&iommu
->lock
, flags
);
710 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
712 struct root_entry
*root
;
713 struct context_entry
*context
;
716 spin_lock_irqsave(&iommu
->lock
, flags
);
717 root
= &iommu
->root_entry
[bus
];
718 context
= get_context_addr_from_root(root
);
720 context_clear_entry(&context
[devfn
]);
721 __iommu_flush_cache(iommu
, &context
[devfn
], \
724 spin_unlock_irqrestore(&iommu
->lock
, flags
);
727 static void free_context_table(struct intel_iommu
*iommu
)
729 struct root_entry
*root
;
732 struct context_entry
*context
;
734 spin_lock_irqsave(&iommu
->lock
, flags
);
735 if (!iommu
->root_entry
) {
738 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
739 root
= &iommu
->root_entry
[i
];
740 context
= get_context_addr_from_root(root
);
742 free_pgtable_page(context
);
744 free_pgtable_page(iommu
->root_entry
);
745 iommu
->root_entry
= NULL
;
747 spin_unlock_irqrestore(&iommu
->lock
, flags
);
750 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
751 unsigned long pfn
, int target_level
)
753 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
754 struct dma_pte
*parent
, *pte
= NULL
;
755 int level
= agaw_to_level(domain
->agaw
);
758 BUG_ON(!domain
->pgd
);
760 if (addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
)
761 /* Address beyond IOMMU's addressing capabilities. */
764 parent
= domain
->pgd
;
769 offset
= pfn_level_offset(pfn
, level
);
770 pte
= &parent
[offset
];
771 if (!target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
773 if (level
== target_level
)
776 if (!dma_pte_present(pte
)) {
779 tmp_page
= alloc_pgtable_page(domain
->nid
);
784 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
785 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
786 if (cmpxchg64(&pte
->val
, 0ULL, pteval
)) {
787 /* Someone else set it while we were thinking; use theirs. */
788 free_pgtable_page(tmp_page
);
791 domain_flush_cache(domain
, pte
, sizeof(*pte
));
794 parent
= phys_to_virt(dma_pte_addr(pte
));
802 /* return address's pte at specific level */
803 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
805 int level
, int *large_page
)
807 struct dma_pte
*parent
, *pte
= NULL
;
808 int total
= agaw_to_level(domain
->agaw
);
811 parent
= domain
->pgd
;
812 while (level
<= total
) {
813 offset
= pfn_level_offset(pfn
, total
);
814 pte
= &parent
[offset
];
818 if (!dma_pte_present(pte
)) {
823 if (pte
->val
& DMA_PTE_LARGE_PAGE
) {
828 parent
= phys_to_virt(dma_pte_addr(pte
));
834 /* clear last level pte, a tlb flush should be followed */
835 static int dma_pte_clear_range(struct dmar_domain
*domain
,
836 unsigned long start_pfn
,
837 unsigned long last_pfn
)
839 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
840 unsigned int large_page
= 1;
841 struct dma_pte
*first_pte
, *pte
;
843 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
844 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
845 BUG_ON(start_pfn
> last_pfn
);
847 /* we don't need lock here; nobody else touches the iova range */
850 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
852 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
857 start_pfn
+= lvl_to_nr_pages(large_page
);
859 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
861 domain_flush_cache(domain
, first_pte
,
862 (void *)pte
- (void *)first_pte
);
864 } while (start_pfn
&& start_pfn
<= last_pfn
);
866 return min_t(int, (large_page
- 1) * 9, MAX_AGAW_PFN_WIDTH
);
869 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
870 struct dma_pte
*pte
, unsigned long pfn
,
871 unsigned long start_pfn
, unsigned long last_pfn
)
873 pfn
= max(start_pfn
, pfn
);
874 pte
= &pte
[pfn_level_offset(pfn
, level
)];
877 unsigned long level_pfn
;
878 struct dma_pte
*level_pte
;
880 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
883 level_pfn
= pfn
& level_mask(level
- 1);
884 level_pte
= phys_to_virt(dma_pte_addr(pte
));
887 dma_pte_free_level(domain
, level
- 1, level_pte
,
888 level_pfn
, start_pfn
, last_pfn
);
890 /* If range covers entire pagetable, free it */
891 if (!(start_pfn
> level_pfn
||
892 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
894 domain_flush_cache(domain
, pte
, sizeof(*pte
));
895 free_pgtable_page(level_pte
);
898 pfn
+= level_size(level
);
899 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
902 /* free page table pages. last level pte should already be cleared */
903 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
904 unsigned long start_pfn
,
905 unsigned long last_pfn
)
907 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
909 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
910 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
911 BUG_ON(start_pfn
> last_pfn
);
913 /* We don't need lock here; nobody else touches the iova range */
914 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
915 domain
->pgd
, 0, start_pfn
, last_pfn
);
918 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
919 free_pgtable_page(domain
->pgd
);
925 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
927 struct root_entry
*root
;
930 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
934 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
936 spin_lock_irqsave(&iommu
->lock
, flags
);
937 iommu
->root_entry
= root
;
938 spin_unlock_irqrestore(&iommu
->lock
, flags
);
943 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
949 addr
= iommu
->root_entry
;
951 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
952 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
954 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
956 /* Make sure hardware complete it */
957 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
958 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
960 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
963 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
968 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
971 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
972 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
974 /* Make sure hardware complete it */
975 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
976 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
978 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
981 /* return value determine if we need a write buffer flush */
982 static void __iommu_flush_context(struct intel_iommu
*iommu
,
983 u16 did
, u16 source_id
, u8 function_mask
,
990 case DMA_CCMD_GLOBAL_INVL
:
991 val
= DMA_CCMD_GLOBAL_INVL
;
993 case DMA_CCMD_DOMAIN_INVL
:
994 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
996 case DMA_CCMD_DEVICE_INVL
:
997 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
998 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1003 val
|= DMA_CCMD_ICC
;
1005 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1006 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1008 /* Make sure hardware complete it */
1009 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1010 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1012 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1015 /* return value determine if we need a write buffer flush */
1016 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1017 u64 addr
, unsigned int size_order
, u64 type
)
1019 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1020 u64 val
= 0, val_iva
= 0;
1024 case DMA_TLB_GLOBAL_FLUSH
:
1025 /* global flush doesn't need set IVA_REG */
1026 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1028 case DMA_TLB_DSI_FLUSH
:
1029 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1031 case DMA_TLB_PSI_FLUSH
:
1032 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1033 /* Note: always flush non-leaf currently */
1034 val_iva
= size_order
| addr
;
1039 /* Note: set drain read/write */
1042 * This is probably to be super secure.. Looks like we can
1043 * ignore it without any impact.
1045 if (cap_read_drain(iommu
->cap
))
1046 val
|= DMA_TLB_READ_DRAIN
;
1048 if (cap_write_drain(iommu
->cap
))
1049 val
|= DMA_TLB_WRITE_DRAIN
;
1051 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1052 /* Note: Only uses first TLB reg currently */
1054 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1055 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1057 /* Make sure hardware complete it */
1058 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1059 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1061 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1063 /* check IOTLB invalidation granularity */
1064 if (DMA_TLB_IAIG(val
) == 0)
1065 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1066 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1067 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1068 (unsigned long long)DMA_TLB_IIRG(type
),
1069 (unsigned long long)DMA_TLB_IAIG(val
));
1072 static struct device_domain_info
*iommu_support_dev_iotlb(
1073 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
1076 unsigned long flags
;
1077 struct device_domain_info
*info
;
1078 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
1080 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1086 spin_lock_irqsave(&device_domain_lock
, flags
);
1087 list_for_each_entry(info
, &domain
->devices
, link
)
1088 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1092 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1094 if (!found
|| !info
->dev
)
1097 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1100 if (!dmar_find_matched_atsr_unit(info
->dev
))
1103 info
->iommu
= iommu
;
1108 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1113 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1116 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1118 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1121 pci_disable_ats(info
->dev
);
1124 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1125 u64 addr
, unsigned mask
)
1128 unsigned long flags
;
1129 struct device_domain_info
*info
;
1131 spin_lock_irqsave(&device_domain_lock
, flags
);
1132 list_for_each_entry(info
, &domain
->devices
, link
) {
1133 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1136 sid
= info
->bus
<< 8 | info
->devfn
;
1137 qdep
= pci_ats_queue_depth(info
->dev
);
1138 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1140 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1143 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1144 unsigned long pfn
, unsigned int pages
, int map
)
1146 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1147 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1152 * Fallback to domain selective flush if no PSI support or the size is
1154 * PSI requires page size to be 2 ^ x, and the base address is naturally
1155 * aligned to the size
1157 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1158 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1161 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1165 * In caching mode, changes of pages from non-present to present require
1166 * flush. However, device IOTLB doesn't need to be flushed in this case.
1168 if (!cap_caching_mode(iommu
->cap
) || !map
)
1169 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1172 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1175 unsigned long flags
;
1177 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1178 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1179 pmen
&= ~DMA_PMEN_EPM
;
1180 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1182 /* wait for the protected region status bit to clear */
1183 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1184 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1186 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1189 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1192 unsigned long flags
;
1194 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1195 iommu
->gcmd
|= DMA_GCMD_TE
;
1196 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1198 /* Make sure hardware complete it */
1199 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1200 readl
, (sts
& DMA_GSTS_TES
), sts
);
1202 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1206 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1211 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1212 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1213 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1215 /* Make sure hardware complete it */
1216 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1217 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1219 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1224 static int iommu_init_domains(struct intel_iommu
*iommu
)
1226 unsigned long ndomains
;
1227 unsigned long nlongs
;
1229 ndomains
= cap_ndoms(iommu
->cap
);
1230 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1231 iommu
->seq_id
, ndomains
);
1232 nlongs
= BITS_TO_LONGS(ndomains
);
1234 spin_lock_init(&iommu
->lock
);
1236 /* TBD: there might be 64K domains,
1237 * consider other allocation for future chip
1239 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1240 if (!iommu
->domain_ids
) {
1241 pr_err("IOMMU%d: allocating domain id array failed\n",
1245 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1247 if (!iommu
->domains
) {
1248 pr_err("IOMMU%d: allocating domain array failed\n",
1250 kfree(iommu
->domain_ids
);
1251 iommu
->domain_ids
= NULL
;
1256 * if Caching mode is set, then invalid translations are tagged
1257 * with domainid 0. Hence we need to pre-allocate it.
1259 if (cap_caching_mode(iommu
->cap
))
1260 set_bit(0, iommu
->domain_ids
);
1265 static void domain_exit(struct dmar_domain
*domain
);
1266 static void vm_domain_exit(struct dmar_domain
*domain
);
1268 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1270 struct dmar_domain
*domain
;
1272 unsigned long flags
;
1274 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1275 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1276 domain
= iommu
->domains
[i
];
1277 clear_bit(i
, iommu
->domain_ids
);
1279 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1280 count
= --domain
->iommu_count
;
1281 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1283 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1284 vm_domain_exit(domain
);
1286 domain_exit(domain
);
1291 if (iommu
->gcmd
& DMA_GCMD_TE
)
1292 iommu_disable_translation(iommu
);
1294 kfree(iommu
->domains
);
1295 kfree(iommu
->domain_ids
);
1296 iommu
->domains
= NULL
;
1297 iommu
->domain_ids
= NULL
;
1299 g_iommus
[iommu
->seq_id
] = NULL
;
1301 /* if all iommus are freed, free g_iommus */
1302 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1307 if (i
== g_num_of_iommus
)
1310 /* free context mapping */
1311 free_context_table(iommu
);
1314 static struct dmar_domain
*alloc_domain(void)
1316 struct dmar_domain
*domain
;
1318 domain
= alloc_domain_mem();
1323 memset(domain
->iommu_bmp
, 0, sizeof(domain
->iommu_bmp
));
1329 static int iommu_attach_domain(struct dmar_domain
*domain
,
1330 struct intel_iommu
*iommu
)
1333 unsigned long ndomains
;
1334 unsigned long flags
;
1336 ndomains
= cap_ndoms(iommu
->cap
);
1338 spin_lock_irqsave(&iommu
->lock
, flags
);
1340 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1341 if (num
>= ndomains
) {
1342 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1343 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1348 set_bit(num
, iommu
->domain_ids
);
1349 set_bit(iommu
->seq_id
, domain
->iommu_bmp
);
1350 iommu
->domains
[num
] = domain
;
1351 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1356 static void iommu_detach_domain(struct dmar_domain
*domain
,
1357 struct intel_iommu
*iommu
)
1359 unsigned long flags
;
1363 spin_lock_irqsave(&iommu
->lock
, flags
);
1364 ndomains
= cap_ndoms(iommu
->cap
);
1365 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1366 if (iommu
->domains
[num
] == domain
) {
1373 clear_bit(num
, iommu
->domain_ids
);
1374 clear_bit(iommu
->seq_id
, domain
->iommu_bmp
);
1375 iommu
->domains
[num
] = NULL
;
1377 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1380 static struct iova_domain reserved_iova_list
;
1381 static struct lock_class_key reserved_rbtree_key
;
1383 static int dmar_init_reserved_ranges(void)
1385 struct pci_dev
*pdev
= NULL
;
1389 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1391 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1392 &reserved_rbtree_key
);
1394 /* IOAPIC ranges shouldn't be accessed by DMA */
1395 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1396 IOVA_PFN(IOAPIC_RANGE_END
));
1398 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1402 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1403 for_each_pci_dev(pdev
) {
1406 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1407 r
= &pdev
->resource
[i
];
1408 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1410 iova
= reserve_iova(&reserved_iova_list
,
1414 printk(KERN_ERR
"Reserve iova failed\n");
1422 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1424 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1427 static inline int guestwidth_to_adjustwidth(int gaw
)
1430 int r
= (gaw
- 12) % 9;
1441 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1443 struct intel_iommu
*iommu
;
1444 int adjust_width
, agaw
;
1445 unsigned long sagaw
;
1447 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1448 spin_lock_init(&domain
->iommu_lock
);
1450 domain_reserve_special_ranges(domain
);
1452 /* calculate AGAW */
1453 iommu
= domain_get_iommu(domain
);
1454 if (guest_width
> cap_mgaw(iommu
->cap
))
1455 guest_width
= cap_mgaw(iommu
->cap
);
1456 domain
->gaw
= guest_width
;
1457 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1458 agaw
= width_to_agaw(adjust_width
);
1459 sagaw
= cap_sagaw(iommu
->cap
);
1460 if (!test_bit(agaw
, &sagaw
)) {
1461 /* hardware doesn't support it, choose a bigger one */
1462 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1463 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1467 domain
->agaw
= agaw
;
1468 INIT_LIST_HEAD(&domain
->devices
);
1470 if (ecap_coherent(iommu
->ecap
))
1471 domain
->iommu_coherency
= 1;
1473 domain
->iommu_coherency
= 0;
1475 if (ecap_sc_support(iommu
->ecap
))
1476 domain
->iommu_snooping
= 1;
1478 domain
->iommu_snooping
= 0;
1480 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1481 domain
->iommu_count
= 1;
1482 domain
->nid
= iommu
->node
;
1484 /* always allocate the top pgd */
1485 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1488 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1492 static void domain_exit(struct dmar_domain
*domain
)
1494 struct dmar_drhd_unit
*drhd
;
1495 struct intel_iommu
*iommu
;
1497 /* Domain 0 is reserved, so dont process it */
1501 /* Flush any lazy unmaps that may reference this domain */
1502 if (!intel_iommu_strict
)
1503 flush_unmaps_timeout(0);
1505 domain_remove_dev_info(domain
);
1507 put_iova_domain(&domain
->iovad
);
1510 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1512 /* free page tables */
1513 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1515 for_each_active_iommu(iommu
, drhd
)
1516 if (test_bit(iommu
->seq_id
, domain
->iommu_bmp
))
1517 iommu_detach_domain(domain
, iommu
);
1519 free_domain_mem(domain
);
1522 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1523 u8 bus
, u8 devfn
, int translation
)
1525 struct context_entry
*context
;
1526 unsigned long flags
;
1527 struct intel_iommu
*iommu
;
1528 struct dma_pte
*pgd
;
1530 unsigned long ndomains
;
1533 struct device_domain_info
*info
= NULL
;
1535 pr_debug("Set context mapping for %02x:%02x.%d\n",
1536 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1538 BUG_ON(!domain
->pgd
);
1539 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1540 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1542 iommu
= device_to_iommu(segment
, bus
, devfn
);
1546 context
= device_to_context_entry(iommu
, bus
, devfn
);
1549 spin_lock_irqsave(&iommu
->lock
, flags
);
1550 if (context_present(context
)) {
1551 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1558 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1559 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1562 /* find an available domain id for this device in iommu */
1563 ndomains
= cap_ndoms(iommu
->cap
);
1564 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1565 if (iommu
->domains
[num
] == domain
) {
1573 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1574 if (num
>= ndomains
) {
1575 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1576 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1580 set_bit(num
, iommu
->domain_ids
);
1581 iommu
->domains
[num
] = domain
;
1585 /* Skip top levels of page tables for
1586 * iommu which has less agaw than default.
1587 * Unnecessary for PT mode.
1589 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1590 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1591 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1592 if (!dma_pte_present(pgd
)) {
1593 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1600 context_set_domain_id(context
, id
);
1602 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1603 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1604 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1605 CONTEXT_TT_MULTI_LEVEL
;
1608 * In pass through mode, AW must be programmed to indicate the largest
1609 * AGAW value supported by hardware. And ASR is ignored by hardware.
1611 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1612 context_set_address_width(context
, iommu
->msagaw
);
1614 context_set_address_root(context
, virt_to_phys(pgd
));
1615 context_set_address_width(context
, iommu
->agaw
);
1618 context_set_translation_type(context
, translation
);
1619 context_set_fault_enable(context
);
1620 context_set_present(context
);
1621 domain_flush_cache(domain
, context
, sizeof(*context
));
1624 * It's a non-present to present mapping. If hardware doesn't cache
1625 * non-present entry we only need to flush the write-buffer. If the
1626 * _does_ cache non-present entries, then it does so in the special
1627 * domain #0, which we have to flush:
1629 if (cap_caching_mode(iommu
->cap
)) {
1630 iommu
->flush
.flush_context(iommu
, 0,
1631 (((u16
)bus
) << 8) | devfn
,
1632 DMA_CCMD_MASK_NOBIT
,
1633 DMA_CCMD_DEVICE_INVL
);
1634 iommu
->flush
.flush_iotlb(iommu
, domain
->id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1636 iommu_flush_write_buffer(iommu
);
1638 iommu_enable_dev_iotlb(info
);
1639 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1641 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1642 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1643 domain
->iommu_count
++;
1644 if (domain
->iommu_count
== 1)
1645 domain
->nid
= iommu
->node
;
1646 domain_update_iommu_cap(domain
);
1648 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1653 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1657 struct pci_dev
*tmp
, *parent
;
1659 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1660 pdev
->bus
->number
, pdev
->devfn
,
1665 /* dependent device mapping */
1666 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1669 /* Secondary interface's bus number and devfn 0 */
1670 parent
= pdev
->bus
->self
;
1671 while (parent
!= tmp
) {
1672 ret
= domain_context_mapping_one(domain
,
1673 pci_domain_nr(parent
->bus
),
1674 parent
->bus
->number
,
1675 parent
->devfn
, translation
);
1678 parent
= parent
->bus
->self
;
1680 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
1681 return domain_context_mapping_one(domain
,
1682 pci_domain_nr(tmp
->subordinate
),
1683 tmp
->subordinate
->number
, 0,
1685 else /* this is a legacy PCI bridge */
1686 return domain_context_mapping_one(domain
,
1687 pci_domain_nr(tmp
->bus
),
1693 static int domain_context_mapped(struct pci_dev
*pdev
)
1696 struct pci_dev
*tmp
, *parent
;
1697 struct intel_iommu
*iommu
;
1699 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1704 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1707 /* dependent device mapping */
1708 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1711 /* Secondary interface's bus number and devfn 0 */
1712 parent
= pdev
->bus
->self
;
1713 while (parent
!= tmp
) {
1714 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1718 parent
= parent
->bus
->self
;
1720 if (pci_is_pcie(tmp
))
1721 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1724 return device_context_mapped(iommu
, tmp
->bus
->number
,
1728 /* Returns a number of VTD pages, but aligned to MM page size */
1729 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1732 host_addr
&= ~PAGE_MASK
;
1733 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1736 /* Return largest possible superpage level for a given mapping */
1737 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1738 unsigned long iov_pfn
,
1739 unsigned long phy_pfn
,
1740 unsigned long pages
)
1742 int support
, level
= 1;
1743 unsigned long pfnmerge
;
1745 support
= domain
->iommu_superpage
;
1747 /* To use a large page, the virtual *and* physical addresses
1748 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1749 of them will mean we have to use smaller pages. So just
1750 merge them and check both at once. */
1751 pfnmerge
= iov_pfn
| phy_pfn
;
1753 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1754 pages
>>= VTD_STRIDE_SHIFT
;
1757 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1764 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1765 struct scatterlist
*sg
, unsigned long phys_pfn
,
1766 unsigned long nr_pages
, int prot
)
1768 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1769 phys_addr_t
uninitialized_var(pteval
);
1770 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1771 unsigned long sg_res
;
1772 unsigned int largepage_lvl
= 0;
1773 unsigned long lvl_pages
= 0;
1775 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1777 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1780 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1785 sg_res
= nr_pages
+ 1;
1786 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1789 while (nr_pages
> 0) {
1793 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
1794 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1795 sg
->dma_length
= sg
->length
;
1796 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1797 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
1801 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
1803 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, largepage_lvl
);
1806 /* It is large page*/
1807 if (largepage_lvl
> 1) {
1808 pteval
|= DMA_PTE_LARGE_PAGE
;
1809 /* Ensure that old small page tables are removed to make room
1810 for superpage, if they exist. */
1811 dma_pte_clear_range(domain
, iov_pfn
,
1812 iov_pfn
+ lvl_to_nr_pages(largepage_lvl
) - 1);
1813 dma_pte_free_pagetable(domain
, iov_pfn
,
1814 iov_pfn
+ lvl_to_nr_pages(largepage_lvl
) - 1);
1816 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
1820 /* We don't need lock here, nobody else
1821 * touches the iova range
1823 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
1825 static int dumps
= 5;
1826 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1827 iov_pfn
, tmp
, (unsigned long long)pteval
);
1830 debug_dma_dump_mappings(NULL
);
1835 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
1837 BUG_ON(nr_pages
< lvl_pages
);
1838 BUG_ON(sg_res
< lvl_pages
);
1840 nr_pages
-= lvl_pages
;
1841 iov_pfn
+= lvl_pages
;
1842 phys_pfn
+= lvl_pages
;
1843 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
1844 sg_res
-= lvl_pages
;
1846 /* If the next PTE would be the first in a new page, then we
1847 need to flush the cache on the entries we've just written.
1848 And then we'll need to recalculate 'pte', so clear it and
1849 let it get set again in the if (!pte) block above.
1851 If we're done (!nr_pages) we need to flush the cache too.
1853 Also if we've been setting superpages, we may need to
1854 recalculate 'pte' and switch back to smaller pages for the
1855 end of the mapping, if the trailing size is not enough to
1856 use another superpage (i.e. sg_res < lvl_pages). */
1858 if (!nr_pages
|| first_pte_in_page(pte
) ||
1859 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
1860 domain_flush_cache(domain
, first_pte
,
1861 (void *)pte
- (void *)first_pte
);
1865 if (!sg_res
&& nr_pages
)
1871 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1872 struct scatterlist
*sg
, unsigned long nr_pages
,
1875 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
1878 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1879 unsigned long phys_pfn
, unsigned long nr_pages
,
1882 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
1885 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1890 clear_context_table(iommu
, bus
, devfn
);
1891 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1892 DMA_CCMD_GLOBAL_INVL
);
1893 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1896 static inline void unlink_domain_info(struct device_domain_info
*info
)
1898 assert_spin_locked(&device_domain_lock
);
1899 list_del(&info
->link
);
1900 list_del(&info
->global
);
1902 info
->dev
->dev
.archdata
.iommu
= NULL
;
1905 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1907 struct device_domain_info
*info
;
1908 unsigned long flags
;
1909 struct intel_iommu
*iommu
;
1911 spin_lock_irqsave(&device_domain_lock
, flags
);
1912 while (!list_empty(&domain
->devices
)) {
1913 info
= list_entry(domain
->devices
.next
,
1914 struct device_domain_info
, link
);
1915 unlink_domain_info(info
);
1916 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1918 iommu_disable_dev_iotlb(info
);
1919 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1920 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1921 free_devinfo_mem(info
);
1923 spin_lock_irqsave(&device_domain_lock
, flags
);
1925 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1930 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1932 static struct dmar_domain
*
1933 find_domain(struct pci_dev
*pdev
)
1935 struct device_domain_info
*info
;
1937 /* No lock here, assumes no domain exit in normal case */
1938 info
= pdev
->dev
.archdata
.iommu
;
1940 return info
->domain
;
1944 /* domain is initialized */
1945 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1947 struct dmar_domain
*domain
, *found
= NULL
;
1948 struct intel_iommu
*iommu
;
1949 struct dmar_drhd_unit
*drhd
;
1950 struct device_domain_info
*info
, *tmp
;
1951 struct pci_dev
*dev_tmp
;
1952 unsigned long flags
;
1953 int bus
= 0, devfn
= 0;
1957 domain
= find_domain(pdev
);
1961 segment
= pci_domain_nr(pdev
->bus
);
1963 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1965 if (pci_is_pcie(dev_tmp
)) {
1966 bus
= dev_tmp
->subordinate
->number
;
1969 bus
= dev_tmp
->bus
->number
;
1970 devfn
= dev_tmp
->devfn
;
1972 spin_lock_irqsave(&device_domain_lock
, flags
);
1973 list_for_each_entry(info
, &device_domain_list
, global
) {
1974 if (info
->segment
== segment
&&
1975 info
->bus
== bus
&& info
->devfn
== devfn
) {
1976 found
= info
->domain
;
1980 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1981 /* pcie-pci bridge already has a domain, uses it */
1988 domain
= alloc_domain();
1992 /* Allocate new domain for the device */
1993 drhd
= dmar_find_matched_drhd_unit(pdev
);
1995 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1997 free_domain_mem(domain
);
2000 iommu
= drhd
->iommu
;
2002 ret
= iommu_attach_domain(domain
, iommu
);
2004 free_domain_mem(domain
);
2008 if (domain_init(domain
, gaw
)) {
2009 domain_exit(domain
);
2013 /* register pcie-to-pci device */
2015 info
= alloc_devinfo_mem();
2017 domain_exit(domain
);
2020 info
->segment
= segment
;
2022 info
->devfn
= devfn
;
2024 info
->domain
= domain
;
2025 /* This domain is shared by devices under p2p bridge */
2026 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
2028 /* pcie-to-pci bridge already has a domain, uses it */
2030 spin_lock_irqsave(&device_domain_lock
, flags
);
2031 list_for_each_entry(tmp
, &device_domain_list
, global
) {
2032 if (tmp
->segment
== segment
&&
2033 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
2034 found
= tmp
->domain
;
2039 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2040 free_devinfo_mem(info
);
2041 domain_exit(domain
);
2044 list_add(&info
->link
, &domain
->devices
);
2045 list_add(&info
->global
, &device_domain_list
);
2046 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2051 info
= alloc_devinfo_mem();
2054 info
->segment
= segment
;
2055 info
->bus
= pdev
->bus
->number
;
2056 info
->devfn
= pdev
->devfn
;
2058 info
->domain
= domain
;
2059 spin_lock_irqsave(&device_domain_lock
, flags
);
2060 /* somebody is fast */
2061 found
= find_domain(pdev
);
2062 if (found
!= NULL
) {
2063 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2064 if (found
!= domain
) {
2065 domain_exit(domain
);
2068 free_devinfo_mem(info
);
2071 list_add(&info
->link
, &domain
->devices
);
2072 list_add(&info
->global
, &device_domain_list
);
2073 pdev
->dev
.archdata
.iommu
= info
;
2074 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2077 /* recheck it here, maybe others set it */
2078 return find_domain(pdev
);
2081 static int iommu_identity_mapping
;
2082 #define IDENTMAP_ALL 1
2083 #define IDENTMAP_GFX 2
2084 #define IDENTMAP_AZALIA 4
2086 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2087 unsigned long long start
,
2088 unsigned long long end
)
2090 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2091 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2093 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2094 dma_to_mm_pfn(last_vpfn
))) {
2095 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
2099 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2100 start
, end
, domain
->id
);
2102 * RMRR range might have overlap with physical memory range,
2105 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2107 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2108 last_vpfn
- first_vpfn
+ 1,
2109 DMA_PTE_READ
|DMA_PTE_WRITE
);
2112 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
2113 unsigned long long start
,
2114 unsigned long long end
)
2116 struct dmar_domain
*domain
;
2119 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2123 /* For _hardware_ passthrough, don't bother. But for software
2124 passthrough, we do it anyway -- it may indicate a memory
2125 range which is reserved in E820, so which didn't get set
2126 up to start with in si_domain */
2127 if (domain
== si_domain
&& hw_pass_through
) {
2128 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2129 pci_name(pdev
), start
, end
);
2134 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2135 pci_name(pdev
), start
, end
);
2138 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2139 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2140 dmi_get_system_info(DMI_BIOS_VENDOR
),
2141 dmi_get_system_info(DMI_BIOS_VERSION
),
2142 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2147 if (end
>> agaw_to_width(domain
->agaw
)) {
2148 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2149 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2150 agaw_to_width(domain
->agaw
),
2151 dmi_get_system_info(DMI_BIOS_VENDOR
),
2152 dmi_get_system_info(DMI_BIOS_VERSION
),
2153 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2158 ret
= iommu_domain_identity_map(domain
, start
, end
);
2162 /* context entry init */
2163 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
2170 domain_exit(domain
);
2174 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2175 struct pci_dev
*pdev
)
2177 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2179 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
2183 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2184 static inline void iommu_prepare_isa(void)
2186 struct pci_dev
*pdev
;
2189 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2193 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2194 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024 - 1);
2197 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2198 "floppy might not work\n");
2202 static inline void iommu_prepare_isa(void)
2206 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2208 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2210 static int __init
si_domain_init(int hw
)
2212 struct dmar_drhd_unit
*drhd
;
2213 struct intel_iommu
*iommu
;
2216 si_domain
= alloc_domain();
2220 for_each_active_iommu(iommu
, drhd
) {
2221 ret
= iommu_attach_domain(si_domain
, iommu
);
2223 domain_exit(si_domain
);
2228 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2229 domain_exit(si_domain
);
2233 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2234 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2240 for_each_online_node(nid
) {
2241 unsigned long start_pfn
, end_pfn
;
2244 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2245 ret
= iommu_domain_identity_map(si_domain
,
2246 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2255 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2256 struct pci_dev
*pdev
);
2257 static int identity_mapping(struct pci_dev
*pdev
)
2259 struct device_domain_info
*info
;
2261 if (likely(!iommu_identity_mapping
))
2264 info
= pdev
->dev
.archdata
.iommu
;
2265 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2266 return (info
->domain
== si_domain
);
2271 static int domain_add_dev_info(struct dmar_domain
*domain
,
2272 struct pci_dev
*pdev
,
2275 struct device_domain_info
*info
;
2276 unsigned long flags
;
2279 info
= alloc_devinfo_mem();
2283 info
->segment
= pci_domain_nr(pdev
->bus
);
2284 info
->bus
= pdev
->bus
->number
;
2285 info
->devfn
= pdev
->devfn
;
2287 info
->domain
= domain
;
2289 spin_lock_irqsave(&device_domain_lock
, flags
);
2290 list_add(&info
->link
, &domain
->devices
);
2291 list_add(&info
->global
, &device_domain_list
);
2292 pdev
->dev
.archdata
.iommu
= info
;
2293 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2295 ret
= domain_context_mapping(domain
, pdev
, translation
);
2297 spin_lock_irqsave(&device_domain_lock
, flags
);
2298 unlink_domain_info(info
);
2299 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2300 free_devinfo_mem(info
);
2307 static bool device_has_rmrr(struct pci_dev
*dev
)
2309 struct dmar_rmrr_unit
*rmrr
;
2312 for_each_rmrr_units(rmrr
) {
2313 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2315 * Return TRUE if this RMRR contains the device that
2318 if (rmrr
->devices
[i
] == dev
)
2325 static int iommu_should_identity_map(struct pci_dev
*pdev
, int startup
)
2329 * We want to prevent any device associated with an RMRR from
2330 * getting placed into the SI Domain. This is done because
2331 * problems exist when devices are moved in and out of domains
2332 * and their respective RMRR info is lost. We exempt USB devices
2333 * from this process due to their usage of RMRRs that are known
2334 * to not be needed after BIOS hand-off to OS.
2336 if (device_has_rmrr(pdev
) &&
2337 (pdev
->class >> 8) != PCI_CLASS_SERIAL_USB
)
2340 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2343 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2346 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2350 * We want to start off with all devices in the 1:1 domain, and
2351 * take them out later if we find they can't access all of memory.
2353 * However, we can't do this for PCI devices behind bridges,
2354 * because all PCI devices behind the same bridge will end up
2355 * with the same source-id on their transactions.
2357 * Practically speaking, we can't change things around for these
2358 * devices at run-time, because we can't be sure there'll be no
2359 * DMA transactions in flight for any of their siblings.
2361 * So PCI devices (unless they're on the root bus) as well as
2362 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2363 * the 1:1 domain, just in _case_ one of their siblings turns out
2364 * not to be able to map all of memory.
2366 if (!pci_is_pcie(pdev
)) {
2367 if (!pci_is_root_bus(pdev
->bus
))
2369 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2371 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2375 * At boot time, we don't yet know if devices will be 64-bit capable.
2376 * Assume that they will -- if they turn out not to be, then we can
2377 * take them out of the 1:1 domain later.
2381 * If the device's dma_mask is less than the system's memory
2382 * size then this is not a candidate for identity mapping.
2384 u64 dma_mask
= pdev
->dma_mask
;
2386 if (pdev
->dev
.coherent_dma_mask
&&
2387 pdev
->dev
.coherent_dma_mask
< dma_mask
)
2388 dma_mask
= pdev
->dev
.coherent_dma_mask
;
2390 return dma_mask
>= dma_get_required_mask(&pdev
->dev
);
2396 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2398 struct pci_dev
*pdev
= NULL
;
2401 ret
= si_domain_init(hw
);
2405 for_each_pci_dev(pdev
) {
2406 if (iommu_should_identity_map(pdev
, 1)) {
2407 ret
= domain_add_dev_info(si_domain
, pdev
,
2408 hw
? CONTEXT_TT_PASS_THROUGH
:
2409 CONTEXT_TT_MULTI_LEVEL
);
2411 /* device not associated with an iommu */
2416 pr_info("IOMMU: %s identity mapping for device %s\n",
2417 hw
? "hardware" : "software", pci_name(pdev
));
2424 static int __init
init_dmars(void)
2426 struct dmar_drhd_unit
*drhd
;
2427 struct dmar_rmrr_unit
*rmrr
;
2428 struct pci_dev
*pdev
;
2429 struct intel_iommu
*iommu
;
2435 * initialize and program root entry to not present
2438 for_each_drhd_unit(drhd
) {
2440 * lock not needed as this is only incremented in the single
2441 * threaded kernel __init code path all other access are read
2444 if (g_num_of_iommus
< IOMMU_UNITS_SUPPORTED
) {
2448 printk_once(KERN_ERR
"intel-iommu: exceeded %d IOMMUs\n",
2449 IOMMU_UNITS_SUPPORTED
);
2452 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2455 printk(KERN_ERR
"Allocating global iommu array failed\n");
2460 deferred_flush
= kzalloc(g_num_of_iommus
*
2461 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2462 if (!deferred_flush
) {
2467 for_each_active_iommu(iommu
, drhd
) {
2468 g_iommus
[iommu
->seq_id
] = iommu
;
2470 ret
= iommu_init_domains(iommu
);
2476 * we could share the same root & context tables
2477 * among all IOMMU's. Need to Split it later.
2479 ret
= iommu_alloc_root_entry(iommu
);
2481 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2484 if (!ecap_pass_through(iommu
->ecap
))
2485 hw_pass_through
= 0;
2489 * Start from the sane iommu hardware state.
2491 for_each_active_iommu(iommu
, drhd
) {
2493 * If the queued invalidation is already initialized by us
2494 * (for example, while enabling interrupt-remapping) then
2495 * we got the things already rolling from a sane state.
2501 * Clear any previous faults.
2503 dmar_fault(-1, iommu
);
2505 * Disable queued invalidation if supported and already enabled
2506 * before OS handover.
2508 dmar_disable_qi(iommu
);
2511 for_each_active_iommu(iommu
, drhd
) {
2512 if (dmar_enable_qi(iommu
)) {
2514 * Queued Invalidate not enabled, use Register Based
2517 iommu
->flush
.flush_context
= __iommu_flush_context
;
2518 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2519 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Register based "
2522 (unsigned long long)drhd
->reg_base_addr
);
2524 iommu
->flush
.flush_context
= qi_flush_context
;
2525 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2526 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Queued "
2529 (unsigned long long)drhd
->reg_base_addr
);
2533 if (iommu_pass_through
)
2534 iommu_identity_mapping
|= IDENTMAP_ALL
;
2536 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2537 iommu_identity_mapping
|= IDENTMAP_GFX
;
2540 check_tylersburg_isoch();
2543 * If pass through is not set or not enabled, setup context entries for
2544 * identity mappings for rmrr, gfx, and isa and may fall back to static
2545 * identity mapping if iommu_identity_mapping is set.
2547 if (iommu_identity_mapping
) {
2548 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2550 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2556 * for each dev attached to rmrr
2558 * locate drhd for dev, alloc domain for dev
2559 * allocate free domain
2560 * allocate page table entries for rmrr
2561 * if context not allocated for bus
2562 * allocate and init context
2563 * set present in root table for this bus
2564 * init context with domain, translation etc
2568 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2569 for_each_rmrr_units(rmrr
) {
2570 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2571 pdev
= rmrr
->devices
[i
];
2573 * some BIOS lists non-exist devices in DMAR
2578 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2581 "IOMMU: mapping reserved region failed\n");
2585 iommu_prepare_isa();
2590 * global invalidate context cache
2591 * global invalidate iotlb
2592 * enable translation
2594 for_each_iommu(iommu
, drhd
) {
2595 if (drhd
->ignored
) {
2597 * we always have to disable PMRs or DMA may fail on
2601 iommu_disable_protect_mem_regions(iommu
);
2605 iommu_flush_write_buffer(iommu
);
2607 ret
= dmar_set_interrupt(iommu
);
2611 iommu_set_root_entry(iommu
);
2613 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2614 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2616 ret
= iommu_enable_translation(iommu
);
2620 iommu_disable_protect_mem_regions(iommu
);
2625 for_each_active_iommu(iommu
, drhd
)
2626 free_dmar_iommu(iommu
);
2627 kfree(deferred_flush
);
2632 /* This takes a number of _MM_ pages, not VTD pages */
2633 static struct iova
*intel_alloc_iova(struct device
*dev
,
2634 struct dmar_domain
*domain
,
2635 unsigned long nrpages
, uint64_t dma_mask
)
2637 struct pci_dev
*pdev
= to_pci_dev(dev
);
2638 struct iova
*iova
= NULL
;
2640 /* Restrict dma_mask to the width that the iommu can handle */
2641 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2643 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2645 * First try to allocate an io virtual address in
2646 * DMA_BIT_MASK(32) and if that fails then try allocating
2649 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2650 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2654 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2655 if (unlikely(!iova
)) {
2656 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2657 nrpages
, pci_name(pdev
));
2664 static struct dmar_domain
*__get_valid_domain_for_dev(struct pci_dev
*pdev
)
2666 struct dmar_domain
*domain
;
2669 domain
= get_domain_for_dev(pdev
,
2670 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2673 "Allocating domain for %s failed", pci_name(pdev
));
2677 /* make sure context mapping is ok */
2678 if (unlikely(!domain_context_mapped(pdev
))) {
2679 ret
= domain_context_mapping(domain
, pdev
,
2680 CONTEXT_TT_MULTI_LEVEL
);
2683 "Domain context map for %s failed",
2692 static inline struct dmar_domain
*get_valid_domain_for_dev(struct pci_dev
*dev
)
2694 struct device_domain_info
*info
;
2696 /* No lock here, assumes no domain exit in normal case */
2697 info
= dev
->dev
.archdata
.iommu
;
2699 return info
->domain
;
2701 return __get_valid_domain_for_dev(dev
);
2704 static int iommu_dummy(struct pci_dev
*pdev
)
2706 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2709 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2710 static int iommu_no_mapping(struct device
*dev
)
2712 struct pci_dev
*pdev
;
2715 if (unlikely(!dev_is_pci(dev
)))
2718 pdev
= to_pci_dev(dev
);
2719 if (iommu_dummy(pdev
))
2722 if (!iommu_identity_mapping
)
2725 found
= identity_mapping(pdev
);
2727 if (iommu_should_identity_map(pdev
, 0))
2731 * 32 bit DMA is removed from si_domain and fall back
2732 * to non-identity mapping.
2734 domain_remove_one_dev_info(si_domain
, pdev
);
2735 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2741 * In case of a detached 64 bit DMA device from vm, the device
2742 * is put into si_domain for identity mapping.
2744 if (iommu_should_identity_map(pdev
, 0)) {
2746 ret
= domain_add_dev_info(si_domain
, pdev
,
2748 CONTEXT_TT_PASS_THROUGH
:
2749 CONTEXT_TT_MULTI_LEVEL
);
2751 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2761 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2762 size_t size
, int dir
, u64 dma_mask
)
2764 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2765 struct dmar_domain
*domain
;
2766 phys_addr_t start_paddr
;
2770 struct intel_iommu
*iommu
;
2771 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
2773 BUG_ON(dir
== DMA_NONE
);
2775 if (iommu_no_mapping(hwdev
))
2778 domain
= get_valid_domain_for_dev(pdev
);
2782 iommu
= domain_get_iommu(domain
);
2783 size
= aligned_nrpages(paddr
, size
);
2785 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
2790 * Check if DMAR supports zero-length reads on write only
2793 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2794 !cap_zlr(iommu
->cap
))
2795 prot
|= DMA_PTE_READ
;
2796 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2797 prot
|= DMA_PTE_WRITE
;
2799 * paddr - (paddr + size) might be partial page, we should map the whole
2800 * page. Note: if two part of one page are separately mapped, we
2801 * might have two guest_addr mapping to the same host paddr, but this
2802 * is not a big problem
2804 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2805 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
2809 /* it's a non-present to present mapping. Only flush if caching mode */
2810 if (cap_caching_mode(iommu
->cap
))
2811 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 1);
2813 iommu_flush_write_buffer(iommu
);
2815 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2816 start_paddr
+= paddr
& ~PAGE_MASK
;
2821 __free_iova(&domain
->iovad
, iova
);
2822 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2823 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2827 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2828 unsigned long offset
, size_t size
,
2829 enum dma_data_direction dir
,
2830 struct dma_attrs
*attrs
)
2832 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2833 dir
, to_pci_dev(dev
)->dma_mask
);
2836 static void flush_unmaps(void)
2842 /* just flush them all */
2843 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2844 struct intel_iommu
*iommu
= g_iommus
[i
];
2848 if (!deferred_flush
[i
].next
)
2851 /* In caching mode, global flushes turn emulation expensive */
2852 if (!cap_caching_mode(iommu
->cap
))
2853 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2854 DMA_TLB_GLOBAL_FLUSH
);
2855 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2857 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2858 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
2860 /* On real hardware multiple invalidations are expensive */
2861 if (cap_caching_mode(iommu
->cap
))
2862 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2863 iova
->pfn_lo
, iova
->pfn_hi
- iova
->pfn_lo
+ 1, 0);
2865 mask
= ilog2(mm_to_dma_pfn(iova
->pfn_hi
- iova
->pfn_lo
+ 1));
2866 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2867 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2869 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2871 deferred_flush
[i
].next
= 0;
2877 static void flush_unmaps_timeout(unsigned long data
)
2879 unsigned long flags
;
2881 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2883 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2886 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2888 unsigned long flags
;
2890 struct intel_iommu
*iommu
;
2892 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2893 if (list_size
== HIGH_WATER_MARK
)
2896 iommu
= domain_get_iommu(dom
);
2897 iommu_id
= iommu
->seq_id
;
2899 next
= deferred_flush
[iommu_id
].next
;
2900 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2901 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2902 deferred_flush
[iommu_id
].next
++;
2905 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2909 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2912 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2913 size_t size
, enum dma_data_direction dir
,
2914 struct dma_attrs
*attrs
)
2916 struct pci_dev
*pdev
= to_pci_dev(dev
);
2917 struct dmar_domain
*domain
;
2918 unsigned long start_pfn
, last_pfn
;
2920 struct intel_iommu
*iommu
;
2922 if (iommu_no_mapping(dev
))
2925 domain
= find_domain(pdev
);
2928 iommu
= domain_get_iommu(domain
);
2930 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2931 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
2932 (unsigned long long)dev_addr
))
2935 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2936 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2938 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2939 pci_name(pdev
), start_pfn
, last_pfn
);
2941 /* clear the whole page */
2942 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2944 /* free page tables */
2945 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2947 if (intel_iommu_strict
) {
2948 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2949 last_pfn
- start_pfn
+ 1, 0);
2951 __free_iova(&domain
->iovad
, iova
);
2953 add_unmap(domain
, iova
);
2955 * queue up the release of the unmap to save the 1/6th of the
2956 * cpu used up by the iotlb flush operation...
2961 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2962 dma_addr_t
*dma_handle
, gfp_t flags
,
2963 struct dma_attrs
*attrs
)
2968 size
= PAGE_ALIGN(size
);
2969 order
= get_order(size
);
2971 if (!iommu_no_mapping(hwdev
))
2972 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2973 else if (hwdev
->coherent_dma_mask
< dma_get_required_mask(hwdev
)) {
2974 if (hwdev
->coherent_dma_mask
< DMA_BIT_MASK(32))
2980 vaddr
= (void *)__get_free_pages(flags
, order
);
2983 memset(vaddr
, 0, size
);
2985 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2987 hwdev
->coherent_dma_mask
);
2990 free_pages((unsigned long)vaddr
, order
);
2994 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2995 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
2999 size
= PAGE_ALIGN(size
);
3000 order
= get_order(size
);
3002 intel_unmap_page(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
, NULL
);
3003 free_pages((unsigned long)vaddr
, order
);
3006 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
3007 int nelems
, enum dma_data_direction dir
,
3008 struct dma_attrs
*attrs
)
3010 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3011 struct dmar_domain
*domain
;
3012 unsigned long start_pfn
, last_pfn
;
3014 struct intel_iommu
*iommu
;
3016 if (iommu_no_mapping(hwdev
))
3019 domain
= find_domain(pdev
);
3022 iommu
= domain_get_iommu(domain
);
3024 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
3025 if (WARN_ONCE(!iova
, "Driver unmaps unmatched sglist at PFN %llx\n",
3026 (unsigned long long)sglist
[0].dma_address
))
3029 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3030 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3032 /* clear the whole page */
3033 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
3035 /* free page tables */
3036 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
3038 if (intel_iommu_strict
) {
3039 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3040 last_pfn
- start_pfn
+ 1, 0);
3042 __free_iova(&domain
->iovad
, iova
);
3044 add_unmap(domain
, iova
);
3046 * queue up the release of the unmap to save the 1/6th of the
3047 * cpu used up by the iotlb flush operation...
3052 static int intel_nontranslate_map_sg(struct device
*hddev
,
3053 struct scatterlist
*sglist
, int nelems
, int dir
)
3056 struct scatterlist
*sg
;
3058 for_each_sg(sglist
, sg
, nelems
, i
) {
3059 BUG_ON(!sg_page(sg
));
3060 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3061 sg
->dma_length
= sg
->length
;
3066 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
3067 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3070 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3071 struct dmar_domain
*domain
;
3074 struct iova
*iova
= NULL
;
3076 struct scatterlist
*sg
;
3077 unsigned long start_vpfn
;
3078 struct intel_iommu
*iommu
;
3080 BUG_ON(dir
== DMA_NONE
);
3081 if (iommu_no_mapping(hwdev
))
3082 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
3084 domain
= get_valid_domain_for_dev(pdev
);
3088 iommu
= domain_get_iommu(domain
);
3090 for_each_sg(sglist
, sg
, nelems
, i
)
3091 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3093 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
),
3096 sglist
->dma_length
= 0;
3101 * Check if DMAR supports zero-length reads on write only
3104 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3105 !cap_zlr(iommu
->cap
))
3106 prot
|= DMA_PTE_READ
;
3107 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3108 prot
|= DMA_PTE_WRITE
;
3110 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3112 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3113 if (unlikely(ret
)) {
3114 /* clear the page */
3115 dma_pte_clear_range(domain
, start_vpfn
,
3116 start_vpfn
+ size
- 1);
3117 /* free page tables */
3118 dma_pte_free_pagetable(domain
, start_vpfn
,
3119 start_vpfn
+ size
- 1);
3121 __free_iova(&domain
->iovad
, iova
);
3125 /* it's a non-present to present mapping. Only flush if caching mode */
3126 if (cap_caching_mode(iommu
->cap
))
3127 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 1);
3129 iommu_flush_write_buffer(iommu
);
3134 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3139 struct dma_map_ops intel_dma_ops
= {
3140 .alloc
= intel_alloc_coherent
,
3141 .free
= intel_free_coherent
,
3142 .map_sg
= intel_map_sg
,
3143 .unmap_sg
= intel_unmap_sg
,
3144 .map_page
= intel_map_page
,
3145 .unmap_page
= intel_unmap_page
,
3146 .mapping_error
= intel_mapping_error
,
3149 static inline int iommu_domain_cache_init(void)
3153 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3154 sizeof(struct dmar_domain
),
3159 if (!iommu_domain_cache
) {
3160 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
3167 static inline int iommu_devinfo_cache_init(void)
3171 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3172 sizeof(struct device_domain_info
),
3176 if (!iommu_devinfo_cache
) {
3177 printk(KERN_ERR
"Couldn't create devinfo cache\n");
3184 static inline int iommu_iova_cache_init(void)
3188 iommu_iova_cache
= kmem_cache_create("iommu_iova",
3189 sizeof(struct iova
),
3193 if (!iommu_iova_cache
) {
3194 printk(KERN_ERR
"Couldn't create iova cache\n");
3201 static int __init
iommu_init_mempool(void)
3204 ret
= iommu_iova_cache_init();
3208 ret
= iommu_domain_cache_init();
3212 ret
= iommu_devinfo_cache_init();
3216 kmem_cache_destroy(iommu_domain_cache
);
3218 kmem_cache_destroy(iommu_iova_cache
);
3223 static void __init
iommu_exit_mempool(void)
3225 kmem_cache_destroy(iommu_devinfo_cache
);
3226 kmem_cache_destroy(iommu_domain_cache
);
3227 kmem_cache_destroy(iommu_iova_cache
);
3231 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3233 struct dmar_drhd_unit
*drhd
;
3237 /* We know that this device on this chipset has its own IOMMU.
3238 * If we find it under a different IOMMU, then the BIOS is lying
3239 * to us. Hope that the IOMMU for this device is actually
3240 * disabled, and it needs no translation...
3242 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3244 /* "can't" happen */
3245 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3248 vtbar
&= 0xffff0000;
3250 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3251 drhd
= dmar_find_matched_drhd_unit(pdev
);
3252 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3253 TAINT_FIRMWARE_WORKAROUND
,
3254 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3255 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3257 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3259 static void __init
init_no_remapping_devices(void)
3261 struct dmar_drhd_unit
*drhd
;
3263 for_each_drhd_unit(drhd
) {
3264 if (!drhd
->include_all
) {
3266 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3267 if (drhd
->devices
[i
] != NULL
)
3269 /* ignore DMAR unit if no pci devices exist */
3270 if (i
== drhd
->devices_cnt
)
3275 for_each_active_drhd_unit(drhd
) {
3277 if (drhd
->include_all
)
3280 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3281 if (drhd
->devices
[i
] &&
3282 !IS_GFX_DEVICE(drhd
->devices
[i
]))
3285 if (i
< drhd
->devices_cnt
)
3288 /* This IOMMU has *only* gfx devices. Either bypass it or
3289 set the gfx_mapped flag, as appropriate */
3291 intel_iommu_gfx_mapped
= 1;
3294 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
3295 if (!drhd
->devices
[i
])
3297 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3303 #ifdef CONFIG_SUSPEND
3304 static int init_iommu_hw(void)
3306 struct dmar_drhd_unit
*drhd
;
3307 struct intel_iommu
*iommu
= NULL
;
3309 for_each_active_iommu(iommu
, drhd
)
3311 dmar_reenable_qi(iommu
);
3313 for_each_iommu(iommu
, drhd
) {
3314 if (drhd
->ignored
) {
3316 * we always have to disable PMRs or DMA may fail on
3320 iommu_disable_protect_mem_regions(iommu
);
3324 iommu_flush_write_buffer(iommu
);
3326 iommu_set_root_entry(iommu
);
3328 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3329 DMA_CCMD_GLOBAL_INVL
);
3330 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3331 DMA_TLB_GLOBAL_FLUSH
);
3332 if (iommu_enable_translation(iommu
))
3334 iommu_disable_protect_mem_regions(iommu
);
3340 static void iommu_flush_all(void)
3342 struct dmar_drhd_unit
*drhd
;
3343 struct intel_iommu
*iommu
;
3345 for_each_active_iommu(iommu
, drhd
) {
3346 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3347 DMA_CCMD_GLOBAL_INVL
);
3348 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3349 DMA_TLB_GLOBAL_FLUSH
);
3353 static int iommu_suspend(void)
3355 struct dmar_drhd_unit
*drhd
;
3356 struct intel_iommu
*iommu
= NULL
;
3359 for_each_active_iommu(iommu
, drhd
) {
3360 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3362 if (!iommu
->iommu_state
)
3368 for_each_active_iommu(iommu
, drhd
) {
3369 iommu_disable_translation(iommu
);
3371 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3373 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3374 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3375 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3376 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3377 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3378 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3379 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3380 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3382 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3387 for_each_active_iommu(iommu
, drhd
)
3388 kfree(iommu
->iommu_state
);
3393 static void iommu_resume(void)
3395 struct dmar_drhd_unit
*drhd
;
3396 struct intel_iommu
*iommu
= NULL
;
3399 if (init_iommu_hw()) {
3401 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3403 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3407 for_each_active_iommu(iommu
, drhd
) {
3409 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3411 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3412 iommu
->reg
+ DMAR_FECTL_REG
);
3413 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3414 iommu
->reg
+ DMAR_FEDATA_REG
);
3415 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3416 iommu
->reg
+ DMAR_FEADDR_REG
);
3417 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3418 iommu
->reg
+ DMAR_FEUADDR_REG
);
3420 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3423 for_each_active_iommu(iommu
, drhd
)
3424 kfree(iommu
->iommu_state
);
3427 static struct syscore_ops iommu_syscore_ops
= {
3428 .resume
= iommu_resume
,
3429 .suspend
= iommu_suspend
,
3432 static void __init
init_iommu_pm_ops(void)
3434 register_syscore_ops(&iommu_syscore_ops
);
3438 static inline void init_iommu_pm_ops(void) {}
3439 #endif /* CONFIG_PM */
3441 LIST_HEAD(dmar_rmrr_units
);
3443 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
3445 list_add(&rmrr
->list
, &dmar_rmrr_units
);
3449 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
3451 struct acpi_dmar_reserved_memory
*rmrr
;
3452 struct dmar_rmrr_unit
*rmrru
;
3454 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3458 rmrru
->hdr
= header
;
3459 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3460 rmrru
->base_address
= rmrr
->base_address
;
3461 rmrru
->end_address
= rmrr
->end_address
;
3463 dmar_register_rmrr_unit(rmrru
);
3468 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
3470 struct acpi_dmar_reserved_memory
*rmrr
;
3472 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
3473 return dmar_parse_dev_scope((void *)(rmrr
+ 1),
3474 ((void *)rmrr
) + rmrr
->header
.length
,
3475 &rmrru
->devices_cnt
, &rmrru
->devices
,
3479 static LIST_HEAD(dmar_atsr_units
);
3481 int __init
dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
)
3483 struct acpi_dmar_atsr
*atsr
;
3484 struct dmar_atsr_unit
*atsru
;
3486 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3487 atsru
= kzalloc(sizeof(*atsru
), GFP_KERNEL
);
3492 atsru
->include_all
= atsr
->flags
& 0x1;
3494 list_add(&atsru
->list
, &dmar_atsr_units
);
3499 static int __init
atsr_parse_dev(struct dmar_atsr_unit
*atsru
)
3501 struct acpi_dmar_atsr
*atsr
;
3503 if (atsru
->include_all
)
3506 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3507 return dmar_parse_dev_scope((void *)(atsr
+ 1),
3508 (void *)atsr
+ atsr
->header
.length
,
3509 &atsru
->devices_cnt
, &atsru
->devices
,
3513 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
3515 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
3519 static void intel_iommu_free_dmars(void)
3521 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
3522 struct dmar_atsr_unit
*atsru
, *atsr_n
;
3524 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
3525 list_del(&rmrru
->list
);
3526 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
3530 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
3531 list_del(&atsru
->list
);
3532 intel_iommu_free_atsr(atsru
);
3536 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
3539 struct pci_bus
*bus
;
3540 struct acpi_dmar_atsr
*atsr
;
3541 struct dmar_atsr_unit
*atsru
;
3543 dev
= pci_physfn(dev
);
3545 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3546 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3547 if (atsr
->segment
== pci_domain_nr(dev
->bus
))
3554 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3555 struct pci_dev
*bridge
= bus
->self
;
3557 if (!bridge
|| !pci_is_pcie(bridge
) ||
3558 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3561 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
) {
3562 for (i
= 0; i
< atsru
->devices_cnt
; i
++)
3563 if (atsru
->devices
[i
] == bridge
)
3569 if (atsru
->include_all
)
3575 int __init
dmar_parse_rmrr_atsr_dev(void)
3577 struct dmar_rmrr_unit
*rmrr
;
3578 struct dmar_atsr_unit
*atsr
;
3581 list_for_each_entry(rmrr
, &dmar_rmrr_units
, list
) {
3582 ret
= rmrr_parse_dev(rmrr
);
3587 list_for_each_entry(atsr
, &dmar_atsr_units
, list
) {
3588 ret
= atsr_parse_dev(atsr
);
3597 * Here we only respond to action of unbound device from driver.
3599 * Added device is not attached to its DMAR domain here yet. That will happen
3600 * when mapping the device to iova.
3602 static int device_notifier(struct notifier_block
*nb
,
3603 unsigned long action
, void *data
)
3605 struct device
*dev
= data
;
3606 struct pci_dev
*pdev
= to_pci_dev(dev
);
3607 struct dmar_domain
*domain
;
3609 if (iommu_no_mapping(dev
))
3612 domain
= find_domain(pdev
);
3616 if (action
== BUS_NOTIFY_UNBOUND_DRIVER
&& !iommu_pass_through
) {
3617 domain_remove_one_dev_info(domain
, pdev
);
3619 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3620 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) &&
3621 list_empty(&domain
->devices
))
3622 domain_exit(domain
);
3628 static struct notifier_block device_nb
= {
3629 .notifier_call
= device_notifier
,
3632 int __init
intel_iommu_init(void)
3635 struct dmar_drhd_unit
*drhd
;
3636 struct intel_iommu
*iommu
;
3638 /* VT-d is required for a TXT/tboot launch, so enforce that */
3639 force_on
= tboot_force_iommu();
3641 if (dmar_table_init()) {
3643 panic("tboot: Failed to initialize DMAR table\n");
3648 * Disable translation if already enabled prior to OS handover.
3650 for_each_active_iommu(iommu
, drhd
)
3651 if (iommu
->gcmd
& DMA_GCMD_TE
)
3652 iommu_disable_translation(iommu
);
3654 if (dmar_dev_scope_init() < 0) {
3656 panic("tboot: Failed to initialize DMAR device scope\n");
3660 if (no_iommu
|| dmar_disabled
)
3663 if (iommu_init_mempool()) {
3665 panic("tboot: Failed to initialize iommu memory\n");
3669 if (list_empty(&dmar_rmrr_units
))
3670 printk(KERN_INFO
"DMAR: No RMRR found\n");
3672 if (list_empty(&dmar_atsr_units
))
3673 printk(KERN_INFO
"DMAR: No ATSR found\n");
3675 if (dmar_init_reserved_ranges()) {
3677 panic("tboot: Failed to reserve iommu ranges\n");
3678 goto out_free_mempool
;
3681 init_no_remapping_devices();
3686 panic("tboot: Failed to initialize DMARs\n");
3687 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3688 goto out_free_reserved_range
;
3691 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3693 init_timer(&unmap_timer
);
3694 #ifdef CONFIG_SWIOTLB
3697 dma_ops
= &intel_dma_ops
;
3699 init_iommu_pm_ops();
3701 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
3703 bus_register_notifier(&pci_bus_type
, &device_nb
);
3705 intel_iommu_enabled
= 1;
3709 out_free_reserved_range
:
3710 put_iova_domain(&reserved_iova_list
);
3712 iommu_exit_mempool();
3714 intel_iommu_free_dmars();
3718 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3719 struct pci_dev
*pdev
)
3721 struct pci_dev
*tmp
, *parent
;
3723 if (!iommu
|| !pdev
)
3726 /* dependent device detach */
3727 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3728 /* Secondary interface's bus number and devfn 0 */
3730 parent
= pdev
->bus
->self
;
3731 while (parent
!= tmp
) {
3732 iommu_detach_dev(iommu
, parent
->bus
->number
,
3734 parent
= parent
->bus
->self
;
3736 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
3737 iommu_detach_dev(iommu
,
3738 tmp
->subordinate
->number
, 0);
3739 else /* this is a legacy PCI bridge */
3740 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3745 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3746 struct pci_dev
*pdev
)
3748 struct device_domain_info
*info
, *tmp
;
3749 struct intel_iommu
*iommu
;
3750 unsigned long flags
;
3753 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3758 spin_lock_irqsave(&device_domain_lock
, flags
);
3759 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
3760 if (info
->segment
== pci_domain_nr(pdev
->bus
) &&
3761 info
->bus
== pdev
->bus
->number
&&
3762 info
->devfn
== pdev
->devfn
) {
3763 unlink_domain_info(info
);
3764 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3766 iommu_disable_dev_iotlb(info
);
3767 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3768 iommu_detach_dependent_devices(iommu
, pdev
);
3769 free_devinfo_mem(info
);
3771 spin_lock_irqsave(&device_domain_lock
, flags
);
3779 /* if there is no other devices under the same iommu
3780 * owned by this domain, clear this iommu in iommu_bmp
3781 * update iommu count and coherency
3783 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3788 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3791 unsigned long tmp_flags
;
3792 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3793 clear_bit(iommu
->seq_id
, domain
->iommu_bmp
);
3794 domain
->iommu_count
--;
3795 domain_update_iommu_cap(domain
);
3796 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3798 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3799 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)) {
3800 spin_lock_irqsave(&iommu
->lock
, tmp_flags
);
3801 clear_bit(domain
->id
, iommu
->domain_ids
);
3802 iommu
->domains
[domain
->id
] = NULL
;
3803 spin_unlock_irqrestore(&iommu
->lock
, tmp_flags
);
3808 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3810 struct device_domain_info
*info
;
3811 struct intel_iommu
*iommu
;
3812 unsigned long flags1
, flags2
;
3814 spin_lock_irqsave(&device_domain_lock
, flags1
);
3815 while (!list_empty(&domain
->devices
)) {
3816 info
= list_entry(domain
->devices
.next
,
3817 struct device_domain_info
, link
);
3818 unlink_domain_info(info
);
3819 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3821 iommu_disable_dev_iotlb(info
);
3822 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3823 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3824 iommu_detach_dependent_devices(iommu
, info
->dev
);
3826 /* clear this iommu in iommu_bmp, update iommu count
3829 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3830 if (test_and_clear_bit(iommu
->seq_id
,
3831 domain
->iommu_bmp
)) {
3832 domain
->iommu_count
--;
3833 domain_update_iommu_cap(domain
);
3835 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3837 free_devinfo_mem(info
);
3838 spin_lock_irqsave(&device_domain_lock
, flags1
);
3840 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3843 /* domain id for virtual machine, it won't be set in context */
3844 static atomic_t vm_domid
= ATOMIC_INIT(0);
3846 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3848 struct dmar_domain
*domain
;
3850 domain
= alloc_domain_mem();
3854 domain
->id
= atomic_inc_return(&vm_domid
);
3856 memset(domain
->iommu_bmp
, 0, sizeof(domain
->iommu_bmp
));
3857 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3862 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3866 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3867 spin_lock_init(&domain
->iommu_lock
);
3869 domain_reserve_special_ranges(domain
);
3871 /* calculate AGAW */
3872 domain
->gaw
= guest_width
;
3873 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3874 domain
->agaw
= width_to_agaw(adjust_width
);
3876 INIT_LIST_HEAD(&domain
->devices
);
3878 domain
->iommu_count
= 0;
3879 domain
->iommu_coherency
= 0;
3880 domain
->iommu_snooping
= 0;
3881 domain
->iommu_superpage
= 0;
3882 domain
->max_addr
= 0;
3885 /* always allocate the top pgd */
3886 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
3889 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3893 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3895 unsigned long flags
;
3896 struct dmar_drhd_unit
*drhd
;
3897 struct intel_iommu
*iommu
;
3899 unsigned long ndomains
;
3901 for_each_active_iommu(iommu
, drhd
) {
3902 ndomains
= cap_ndoms(iommu
->cap
);
3903 for_each_set_bit(i
, iommu
->domain_ids
, ndomains
) {
3904 if (iommu
->domains
[i
] == domain
) {
3905 spin_lock_irqsave(&iommu
->lock
, flags
);
3906 clear_bit(i
, iommu
->domain_ids
);
3907 iommu
->domains
[i
] = NULL
;
3908 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3915 static void vm_domain_exit(struct dmar_domain
*domain
)
3917 /* Domain 0 is reserved, so dont process it */
3921 vm_domain_remove_all_dev_info(domain
);
3923 put_iova_domain(&domain
->iovad
);
3926 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3928 /* free page tables */
3929 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3931 iommu_free_vm_domain(domain
);
3932 free_domain_mem(domain
);
3935 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3937 struct dmar_domain
*dmar_domain
;
3939 dmar_domain
= iommu_alloc_vm_domain();
3942 "intel_iommu_domain_init: dmar_domain == NULL\n");
3945 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3947 "intel_iommu_domain_init() failed\n");
3948 vm_domain_exit(dmar_domain
);
3951 domain_update_iommu_cap(dmar_domain
);
3952 domain
->priv
= dmar_domain
;
3954 domain
->geometry
.aperture_start
= 0;
3955 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
3956 domain
->geometry
.force_aperture
= true;
3961 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3963 struct dmar_domain
*dmar_domain
= domain
->priv
;
3965 domain
->priv
= NULL
;
3966 vm_domain_exit(dmar_domain
);
3969 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3972 struct dmar_domain
*dmar_domain
= domain
->priv
;
3973 struct pci_dev
*pdev
= to_pci_dev(dev
);
3974 struct intel_iommu
*iommu
;
3977 /* normally pdev is not mapped */
3978 if (unlikely(domain_context_mapped(pdev
))) {
3979 struct dmar_domain
*old_domain
;
3981 old_domain
= find_domain(pdev
);
3983 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3984 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3985 domain_remove_one_dev_info(old_domain
, pdev
);
3987 domain_remove_dev_info(old_domain
);
3991 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3996 /* check if this iommu agaw is sufficient for max mapped address */
3997 addr_width
= agaw_to_width(iommu
->agaw
);
3998 if (addr_width
> cap_mgaw(iommu
->cap
))
3999 addr_width
= cap_mgaw(iommu
->cap
);
4001 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4002 printk(KERN_ERR
"%s: iommu width (%d) is not "
4003 "sufficient for the mapped address (%llx)\n",
4004 __func__
, addr_width
, dmar_domain
->max_addr
);
4007 dmar_domain
->gaw
= addr_width
;
4010 * Knock out extra levels of page tables if necessary
4012 while (iommu
->agaw
< dmar_domain
->agaw
) {
4013 struct dma_pte
*pte
;
4015 pte
= dmar_domain
->pgd
;
4016 if (dma_pte_present(pte
)) {
4017 dmar_domain
->pgd
= (struct dma_pte
*)
4018 phys_to_virt(dma_pte_addr(pte
));
4019 free_pgtable_page(pte
);
4021 dmar_domain
->agaw
--;
4024 return domain_add_dev_info(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
4027 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4030 struct dmar_domain
*dmar_domain
= domain
->priv
;
4031 struct pci_dev
*pdev
= to_pci_dev(dev
);
4033 domain_remove_one_dev_info(dmar_domain
, pdev
);
4036 static int intel_iommu_map(struct iommu_domain
*domain
,
4037 unsigned long iova
, phys_addr_t hpa
,
4038 size_t size
, int iommu_prot
)
4040 struct dmar_domain
*dmar_domain
= domain
->priv
;
4045 if (iommu_prot
& IOMMU_READ
)
4046 prot
|= DMA_PTE_READ
;
4047 if (iommu_prot
& IOMMU_WRITE
)
4048 prot
|= DMA_PTE_WRITE
;
4049 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4050 prot
|= DMA_PTE_SNP
;
4052 max_addr
= iova
+ size
;
4053 if (dmar_domain
->max_addr
< max_addr
) {
4056 /* check if minimum agaw is sufficient for mapped address */
4057 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4058 if (end
< max_addr
) {
4059 printk(KERN_ERR
"%s: iommu width (%d) is not "
4060 "sufficient for the mapped address (%llx)\n",
4061 __func__
, dmar_domain
->gaw
, max_addr
);
4064 dmar_domain
->max_addr
= max_addr
;
4066 /* Round up size to next multiple of PAGE_SIZE, if it and
4067 the low bits of hpa would take us onto the next page */
4068 size
= aligned_nrpages(hpa
, size
);
4069 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4070 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4074 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4075 unsigned long iova
, size_t size
)
4077 struct dmar_domain
*dmar_domain
= domain
->priv
;
4080 order
= dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4081 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
4083 if (dmar_domain
->max_addr
== iova
+ size
)
4084 dmar_domain
->max_addr
= iova
;
4086 return PAGE_SIZE
<< order
;
4089 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4092 struct dmar_domain
*dmar_domain
= domain
->priv
;
4093 struct dma_pte
*pte
;
4096 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, 0);
4098 phys
= dma_pte_addr(pte
);
4103 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
4106 struct dmar_domain
*dmar_domain
= domain
->priv
;
4108 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4109 return dmar_domain
->iommu_snooping
;
4110 if (cap
== IOMMU_CAP_INTR_REMAP
)
4111 return irq_remapping_enabled
;
4116 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4118 static int intel_iommu_add_device(struct device
*dev
)
4120 struct pci_dev
*pdev
= to_pci_dev(dev
);
4121 struct pci_dev
*bridge
, *dma_pdev
= NULL
;
4122 struct iommu_group
*group
;
4125 if (!device_to_iommu(pci_domain_nr(pdev
->bus
),
4126 pdev
->bus
->number
, pdev
->devfn
))
4129 bridge
= pci_find_upstream_pcie_bridge(pdev
);
4131 if (pci_is_pcie(bridge
))
4132 dma_pdev
= pci_get_domain_bus_and_slot(
4133 pci_domain_nr(pdev
->bus
),
4134 bridge
->subordinate
->number
, 0);
4136 dma_pdev
= pci_dev_get(bridge
);
4138 dma_pdev
= pci_dev_get(pdev
);
4140 /* Account for quirked devices */
4141 swap_pci_ref(&dma_pdev
, pci_get_dma_source(dma_pdev
));
4144 * If it's a multifunction device that does not support our
4145 * required ACS flags, add to the same group as lowest numbered
4146 * function that also does not suport the required ACS flags.
4148 if (dma_pdev
->multifunction
&&
4149 !pci_acs_enabled(dma_pdev
, REQ_ACS_FLAGS
)) {
4150 u8 i
, slot
= PCI_SLOT(dma_pdev
->devfn
);
4152 for (i
= 0; i
< 8; i
++) {
4153 struct pci_dev
*tmp
;
4155 tmp
= pci_get_slot(dma_pdev
->bus
, PCI_DEVFN(slot
, i
));
4159 if (!pci_acs_enabled(tmp
, REQ_ACS_FLAGS
)) {
4160 swap_pci_ref(&dma_pdev
, tmp
);
4168 * Devices on the root bus go through the iommu. If that's not us,
4169 * find the next upstream device and test ACS up to the root bus.
4170 * Finding the next device may require skipping virtual buses.
4172 while (!pci_is_root_bus(dma_pdev
->bus
)) {
4173 struct pci_bus
*bus
= dma_pdev
->bus
;
4175 while (!bus
->self
) {
4176 if (!pci_is_root_bus(bus
))
4182 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
4185 swap_pci_ref(&dma_pdev
, pci_dev_get(bus
->self
));
4189 group
= iommu_group_get(&dma_pdev
->dev
);
4190 pci_dev_put(dma_pdev
);
4192 group
= iommu_group_alloc();
4194 return PTR_ERR(group
);
4197 ret
= iommu_group_add_device(group
, dev
);
4199 iommu_group_put(group
);
4203 static void intel_iommu_remove_device(struct device
*dev
)
4205 iommu_group_remove_device(dev
);
4208 static struct iommu_ops intel_iommu_ops
= {
4209 .domain_init
= intel_iommu_domain_init
,
4210 .domain_destroy
= intel_iommu_domain_destroy
,
4211 .attach_dev
= intel_iommu_attach_device
,
4212 .detach_dev
= intel_iommu_detach_device
,
4213 .map
= intel_iommu_map
,
4214 .unmap
= intel_iommu_unmap
,
4215 .iova_to_phys
= intel_iommu_iova_to_phys
,
4216 .domain_has_cap
= intel_iommu_domain_has_cap
,
4217 .add_device
= intel_iommu_add_device
,
4218 .remove_device
= intel_iommu_remove_device
,
4219 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4222 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4224 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4225 printk(KERN_INFO
"DMAR: Disabling IOMMU for graphics on this chipset\n");
4229 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4230 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4231 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4232 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4233 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4234 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4235 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4237 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4240 * Mobile 4 Series Chipset neglects to set RWBF capability,
4241 * but needs it. Same seems to hold for the desktop versions.
4243 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
4247 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4248 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4249 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4250 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4251 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4252 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4253 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4256 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4257 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4258 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4259 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4260 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4261 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4262 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4263 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4265 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4269 if (pci_read_config_word(dev
, GGC
, &ggc
))
4272 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4273 printk(KERN_INFO
"DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4275 } else if (dmar_map_gfx
) {
4276 /* we have to ensure the gfx device is idle before we flush */
4277 printk(KERN_INFO
"DMAR: Disabling batched IOTLB flush on Ironlake\n");
4278 intel_iommu_strict
= 1;
4281 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4282 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4283 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4284 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4286 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4287 ISOCH DMAR unit for the Azalia sound device, but not give it any
4288 TLB entries, which causes it to deadlock. Check for that. We do
4289 this in a function called from init_dmars(), instead of in a PCI
4290 quirk, because we don't want to print the obnoxious "BIOS broken"
4291 message if VT-d is actually disabled.
4293 static void __init
check_tylersburg_isoch(void)
4295 struct pci_dev
*pdev
;
4296 uint32_t vtisochctrl
;
4298 /* If there's no Azalia in the system anyway, forget it. */
4299 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4304 /* System Management Registers. Might be hidden, in which case
4305 we can't do the sanity check. But that's OK, because the
4306 known-broken BIOSes _don't_ actually hide it, so far. */
4307 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4311 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4318 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4319 if (vtisochctrl
& 1)
4322 /* Drop all bits other than the number of TLB entries */
4323 vtisochctrl
&= 0x1c;
4325 /* If we have the recommended number of TLB entries (16), fine. */
4326 if (vtisochctrl
== 0x10)
4329 /* Zero TLB entries? You get to ride the short bus to school. */
4331 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4332 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4333 dmi_get_system_info(DMI_BIOS_VENDOR
),
4334 dmi_get_system_info(DMI_BIOS_VERSION
),
4335 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4336 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4340 printk(KERN_WARNING
"DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",