2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/syscore_ops.h>
40 #include <linux/tboot.h>
41 #include <linux/dmi.h>
42 #include <linux/pci-ats.h>
43 #include <asm/cacheflush.h>
44 #include <asm/iommu.h>
46 #define ROOT_SIZE VTD_PAGE_SIZE
47 #define CONTEXT_SIZE VTD_PAGE_SIZE
49 #define IS_BRIDGE_HOST_DEVICE(pdev) \
50 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
51 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
53 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
55 #define IOAPIC_RANGE_START (0xfee00000)
56 #define IOAPIC_RANGE_END (0xfeefffff)
57 #define IOVA_START_ADDR (0x1000)
59 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61 #define MAX_AGAW_WIDTH 64
63 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
72 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
73 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
74 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
76 /* page table handling */
77 #define LEVEL_STRIDE (9)
78 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80 static inline int agaw_to_level(int agaw
)
85 static inline int agaw_to_width(int agaw
)
87 return 30 + agaw
* LEVEL_STRIDE
;
90 static inline int width_to_agaw(int width
)
92 return (width
- 30) / LEVEL_STRIDE
;
95 static inline unsigned int level_to_offset_bits(int level
)
97 return (level
- 1) * LEVEL_STRIDE
;
100 static inline int pfn_level_offset(unsigned long pfn
, int level
)
102 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
105 static inline unsigned long level_mask(int level
)
107 return -1UL << level_to_offset_bits(level
);
110 static inline unsigned long level_size(int level
)
112 return 1UL << level_to_offset_bits(level
);
115 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
117 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
120 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
122 return 1 << ((lvl
- 1) * LEVEL_STRIDE
);
125 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
126 are never going to work. */
127 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
129 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
132 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
134 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
136 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
138 return mm_to_dma_pfn(page_to_pfn(pg
));
140 static inline unsigned long virt_to_dma_pfn(void *p
)
142 return page_to_dma_pfn(virt_to_page(p
));
145 /* global iommu list, set NULL for ignored DMAR units */
146 static struct intel_iommu
**g_iommus
;
148 static void __init
check_tylersburg_isoch(void);
149 static int rwbf_quirk
;
152 * set to 1 to panic kernel if can't successfully enable VT-d
153 * (used when kernel is launched w/ TXT)
155 static int force_on
= 0;
160 * 12-63: Context Ptr (12 - (haw-1))
167 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
168 static inline bool root_present(struct root_entry
*root
)
170 return (root
->val
& 1);
172 static inline void set_root_present(struct root_entry
*root
)
176 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
178 root
->val
|= value
& VTD_PAGE_MASK
;
181 static inline struct context_entry
*
182 get_context_addr_from_root(struct root_entry
*root
)
184 return (struct context_entry
*)
185 (root_present(root
)?phys_to_virt(
186 root
->val
& VTD_PAGE_MASK
) :
193 * 1: fault processing disable
194 * 2-3: translation type
195 * 12-63: address space root
201 struct context_entry
{
206 static inline bool context_present(struct context_entry
*context
)
208 return (context
->lo
& 1);
210 static inline void context_set_present(struct context_entry
*context
)
215 static inline void context_set_fault_enable(struct context_entry
*context
)
217 context
->lo
&= (((u64
)-1) << 2) | 1;
220 static inline void context_set_translation_type(struct context_entry
*context
,
223 context
->lo
&= (((u64
)-1) << 4) | 3;
224 context
->lo
|= (value
& 3) << 2;
227 static inline void context_set_address_root(struct context_entry
*context
,
230 context
->lo
|= value
& VTD_PAGE_MASK
;
233 static inline void context_set_address_width(struct context_entry
*context
,
236 context
->hi
|= value
& 7;
239 static inline void context_set_domain_id(struct context_entry
*context
,
242 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
245 static inline void context_clear_entry(struct context_entry
*context
)
258 * 12-63: Host physcial address
264 static inline void dma_clear_pte(struct dma_pte
*pte
)
269 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
271 pte
->val
|= DMA_PTE_READ
;
274 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
276 pte
->val
|= DMA_PTE_WRITE
;
279 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
281 pte
->val
|= DMA_PTE_SNP
;
284 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
286 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
289 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
292 return pte
->val
& VTD_PAGE_MASK
;
294 /* Must have a full atomic 64-bit read */
295 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
299 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
301 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
304 static inline bool dma_pte_present(struct dma_pte
*pte
)
306 return (pte
->val
& 3) != 0;
309 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
311 return (pte
->val
& (1 << 7));
314 static inline int first_pte_in_page(struct dma_pte
*pte
)
316 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
320 * This domain is a statically identity mapping domain.
321 * 1. This domain creats a static 1:1 mapping to all usable memory.
322 * 2. It maps to each iommu if successful.
323 * 3. Each iommu mapps to this domain if successful.
325 static struct dmar_domain
*si_domain
;
326 static int hw_pass_through
= 1;
328 /* devices under the same p2p bridge are owned in one domain */
329 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
331 /* domain represents a virtual machine, more than one devices
332 * across iommus may be owned in one domain, e.g. kvm guest.
334 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
336 /* si_domain contains mulitple devices */
337 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
340 int id
; /* domain id */
341 int nid
; /* node id */
342 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
344 struct list_head devices
; /* all devices' list */
345 struct iova_domain iovad
; /* iova's that belong to this domain */
347 struct dma_pte
*pgd
; /* virtual address */
348 int gaw
; /* max guest address width */
350 /* adjusted guest address width, 0 is level 2 30-bit */
353 int flags
; /* flags to find out type of domain */
355 int iommu_coherency
;/* indicate coherency of iommu access */
356 int iommu_snooping
; /* indicate snooping control feature*/
357 int iommu_count
; /* reference count of iommu */
358 int iommu_superpage
;/* Level of superpages supported:
359 0 == 4KiB (no superpages), 1 == 2MiB,
360 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
361 spinlock_t iommu_lock
; /* protect iommu set in domain */
362 u64 max_addr
; /* maximum mapped address */
365 /* PCI domain-device relationship */
366 struct device_domain_info
{
367 struct list_head link
; /* link to domain siblings */
368 struct list_head global
; /* link to global list */
369 int segment
; /* PCI domain */
370 u8 bus
; /* PCI bus number */
371 u8 devfn
; /* PCI devfn number */
372 struct pci_dev
*dev
; /* it's NULL for PCIe-to-PCI bridge */
373 struct intel_iommu
*iommu
; /* IOMMU used by this device */
374 struct dmar_domain
*domain
; /* pointer to domain */
377 static void flush_unmaps_timeout(unsigned long data
);
379 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
381 #define HIGH_WATER_MARK 250
382 struct deferred_flush_tables
{
384 struct iova
*iova
[HIGH_WATER_MARK
];
385 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
388 static struct deferred_flush_tables
*deferred_flush
;
390 /* bitmap for indexing intel_iommus */
391 static int g_num_of_iommus
;
393 static DEFINE_SPINLOCK(async_umap_flush_lock
);
394 static LIST_HEAD(unmaps_to_do
);
397 static long list_size
;
399 static void domain_remove_dev_info(struct dmar_domain
*domain
);
401 #ifdef CONFIG_DMAR_DEFAULT_ON
402 int dmar_disabled
= 0;
404 int dmar_disabled
= 1;
405 #endif /*CONFIG_DMAR_DEFAULT_ON*/
407 static int dmar_map_gfx
= 1;
408 static int dmar_forcedac
;
409 static int intel_iommu_strict
;
410 static int intel_iommu_superpage
= 1;
412 int intel_iommu_gfx_mapped
;
413 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
415 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
416 static DEFINE_SPINLOCK(device_domain_lock
);
417 static LIST_HEAD(device_domain_list
);
419 static struct iommu_ops intel_iommu_ops
;
421 static int __init
intel_iommu_setup(char *str
)
426 if (!strncmp(str
, "on", 2)) {
428 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
429 } else if (!strncmp(str
, "off", 3)) {
431 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
432 } else if (!strncmp(str
, "igfx_off", 8)) {
435 "Intel-IOMMU: disable GFX device mapping\n");
436 } else if (!strncmp(str
, "forcedac", 8)) {
438 "Intel-IOMMU: Forcing DAC for PCI devices\n");
440 } else if (!strncmp(str
, "strict", 6)) {
442 "Intel-IOMMU: disable batched IOTLB flush\n");
443 intel_iommu_strict
= 1;
444 } else if (!strncmp(str
, "sp_off", 6)) {
446 "Intel-IOMMU: disable supported super page\n");
447 intel_iommu_superpage
= 0;
450 str
+= strcspn(str
, ",");
456 __setup("intel_iommu=", intel_iommu_setup
);
458 static struct kmem_cache
*iommu_domain_cache
;
459 static struct kmem_cache
*iommu_devinfo_cache
;
460 static struct kmem_cache
*iommu_iova_cache
;
462 static inline void *alloc_pgtable_page(int node
)
467 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
469 vaddr
= page_address(page
);
473 static inline void free_pgtable_page(void *vaddr
)
475 free_page((unsigned long)vaddr
);
478 static inline void *alloc_domain_mem(void)
480 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
483 static void free_domain_mem(void *vaddr
)
485 kmem_cache_free(iommu_domain_cache
, vaddr
);
488 static inline void * alloc_devinfo_mem(void)
490 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
493 static inline void free_devinfo_mem(void *vaddr
)
495 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
498 struct iova
*alloc_iova_mem(void)
500 return kmem_cache_alloc(iommu_iova_cache
, GFP_ATOMIC
);
503 void free_iova_mem(struct iova
*iova
)
505 kmem_cache_free(iommu_iova_cache
, iova
);
509 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
514 sagaw
= cap_sagaw(iommu
->cap
);
515 for (agaw
= width_to_agaw(max_gaw
);
517 if (test_bit(agaw
, &sagaw
))
525 * Calculate max SAGAW for each iommu.
527 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
529 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
533 * calculate agaw for each iommu.
534 * "SAGAW" may be different across iommus, use a default agaw, and
535 * get a supported less agaw for iommus that don't support the default agaw.
537 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
539 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
542 /* This functionin only returns single iommu in a domain */
543 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
547 /* si_domain and vm domain should not get here. */
548 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
549 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
551 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
552 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
555 return g_iommus
[iommu_id
];
558 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
562 domain
->iommu_coherency
= 1;
564 for_each_set_bit(i
, &domain
->iommu_bmp
, g_num_of_iommus
) {
565 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
566 domain
->iommu_coherency
= 0;
572 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
576 domain
->iommu_snooping
= 1;
578 for_each_set_bit(i
, &domain
->iommu_bmp
, g_num_of_iommus
) {
579 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
580 domain
->iommu_snooping
= 0;
586 static void domain_update_iommu_superpage(struct dmar_domain
*domain
)
588 struct dmar_drhd_unit
*drhd
;
589 struct intel_iommu
*iommu
= NULL
;
592 if (!intel_iommu_superpage
) {
593 domain
->iommu_superpage
= 0;
597 /* set iommu_superpage to the smallest common denominator */
598 for_each_active_iommu(iommu
, drhd
) {
599 mask
&= cap_super_page_val(iommu
->cap
);
604 domain
->iommu_superpage
= fls(mask
);
607 /* Some capabilities may be different across iommus */
608 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
610 domain_update_iommu_coherency(domain
);
611 domain_update_iommu_snooping(domain
);
612 domain_update_iommu_superpage(domain
);
615 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
617 struct dmar_drhd_unit
*drhd
= NULL
;
620 for_each_drhd_unit(drhd
) {
623 if (segment
!= drhd
->segment
)
626 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
627 if (drhd
->devices
[i
] &&
628 drhd
->devices
[i
]->bus
->number
== bus
&&
629 drhd
->devices
[i
]->devfn
== devfn
)
631 if (drhd
->devices
[i
] &&
632 drhd
->devices
[i
]->subordinate
&&
633 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
634 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
638 if (drhd
->include_all
)
645 static void domain_flush_cache(struct dmar_domain
*domain
,
646 void *addr
, int size
)
648 if (!domain
->iommu_coherency
)
649 clflush_cache_range(addr
, size
);
652 /* Gets context entry for a given bus and devfn */
653 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
656 struct root_entry
*root
;
657 struct context_entry
*context
;
658 unsigned long phy_addr
;
661 spin_lock_irqsave(&iommu
->lock
, flags
);
662 root
= &iommu
->root_entry
[bus
];
663 context
= get_context_addr_from_root(root
);
665 context
= (struct context_entry
*)
666 alloc_pgtable_page(iommu
->node
);
668 spin_unlock_irqrestore(&iommu
->lock
, flags
);
671 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
672 phy_addr
= virt_to_phys((void *)context
);
673 set_root_value(root
, phy_addr
);
674 set_root_present(root
);
675 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
677 spin_unlock_irqrestore(&iommu
->lock
, flags
);
678 return &context
[devfn
];
681 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
683 struct root_entry
*root
;
684 struct context_entry
*context
;
688 spin_lock_irqsave(&iommu
->lock
, flags
);
689 root
= &iommu
->root_entry
[bus
];
690 context
= get_context_addr_from_root(root
);
695 ret
= context_present(&context
[devfn
]);
697 spin_unlock_irqrestore(&iommu
->lock
, flags
);
701 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
703 struct root_entry
*root
;
704 struct context_entry
*context
;
707 spin_lock_irqsave(&iommu
->lock
, flags
);
708 root
= &iommu
->root_entry
[bus
];
709 context
= get_context_addr_from_root(root
);
711 context_clear_entry(&context
[devfn
]);
712 __iommu_flush_cache(iommu
, &context
[devfn
], \
715 spin_unlock_irqrestore(&iommu
->lock
, flags
);
718 static void free_context_table(struct intel_iommu
*iommu
)
720 struct root_entry
*root
;
723 struct context_entry
*context
;
725 spin_lock_irqsave(&iommu
->lock
, flags
);
726 if (!iommu
->root_entry
) {
729 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
730 root
= &iommu
->root_entry
[i
];
731 context
= get_context_addr_from_root(root
);
733 free_pgtable_page(context
);
735 free_pgtable_page(iommu
->root_entry
);
736 iommu
->root_entry
= NULL
;
738 spin_unlock_irqrestore(&iommu
->lock
, flags
);
741 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
742 unsigned long pfn
, int target_level
)
744 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
745 struct dma_pte
*parent
, *pte
= NULL
;
746 int level
= agaw_to_level(domain
->agaw
);
749 BUG_ON(!domain
->pgd
);
750 BUG_ON(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
751 parent
= domain
->pgd
;
756 offset
= pfn_level_offset(pfn
, level
);
757 pte
= &parent
[offset
];
758 if (!target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
760 if (level
== target_level
)
763 if (!dma_pte_present(pte
)) {
766 tmp_page
= alloc_pgtable_page(domain
->nid
);
771 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
772 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
773 if (cmpxchg64(&pte
->val
, 0ULL, pteval
)) {
774 /* Someone else set it while we were thinking; use theirs. */
775 free_pgtable_page(tmp_page
);
778 domain_flush_cache(domain
, pte
, sizeof(*pte
));
781 parent
= phys_to_virt(dma_pte_addr(pte
));
789 /* return address's pte at specific level */
790 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
792 int level
, int *large_page
)
794 struct dma_pte
*parent
, *pte
= NULL
;
795 int total
= agaw_to_level(domain
->agaw
);
798 parent
= domain
->pgd
;
799 while (level
<= total
) {
800 offset
= pfn_level_offset(pfn
, total
);
801 pte
= &parent
[offset
];
805 if (!dma_pte_present(pte
)) {
810 if (pte
->val
& DMA_PTE_LARGE_PAGE
) {
815 parent
= phys_to_virt(dma_pte_addr(pte
));
821 /* clear last level pte, a tlb flush should be followed */
822 static int dma_pte_clear_range(struct dmar_domain
*domain
,
823 unsigned long start_pfn
,
824 unsigned long last_pfn
)
826 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
827 unsigned int large_page
= 1;
828 struct dma_pte
*first_pte
, *pte
;
831 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
832 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
833 BUG_ON(start_pfn
> last_pfn
);
835 /* we don't need lock here; nobody else touches the iova range */
838 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
840 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
845 start_pfn
+= lvl_to_nr_pages(large_page
);
847 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
849 domain_flush_cache(domain
, first_pte
,
850 (void *)pte
- (void *)first_pte
);
852 } while (start_pfn
&& start_pfn
<= last_pfn
);
854 order
= (large_page
- 1) * 9;
858 /* free page table pages. last level pte should already be cleared */
859 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
860 unsigned long start_pfn
,
861 unsigned long last_pfn
)
863 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
864 struct dma_pte
*first_pte
, *pte
;
865 int total
= agaw_to_level(domain
->agaw
);
870 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
871 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
872 BUG_ON(start_pfn
> last_pfn
);
874 /* We don't need lock here; nobody else touches the iova range */
876 while (level
<= total
) {
877 tmp
= align_to_level(start_pfn
, level
);
879 /* If we can't even clear one PTE at this level, we're done */
880 if (tmp
+ level_size(level
) - 1 > last_pfn
)
885 first_pte
= pte
= dma_pfn_level_pte(domain
, tmp
, level
, &large_page
);
886 if (large_page
> level
)
887 level
= large_page
+ 1;
889 tmp
= align_to_level(tmp
+ 1, level
+ 1);
893 if (dma_pte_present(pte
)) {
894 free_pgtable_page(phys_to_virt(dma_pte_addr(pte
)));
898 tmp
+= level_size(level
);
899 } while (!first_pte_in_page(pte
) &&
900 tmp
+ level_size(level
) - 1 <= last_pfn
);
902 domain_flush_cache(domain
, first_pte
,
903 (void *)pte
- (void *)first_pte
);
905 } while (tmp
&& tmp
+ level_size(level
) - 1 <= last_pfn
);
909 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
910 free_pgtable_page(domain
->pgd
);
916 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
918 struct root_entry
*root
;
921 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
925 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
927 spin_lock_irqsave(&iommu
->lock
, flags
);
928 iommu
->root_entry
= root
;
929 spin_unlock_irqrestore(&iommu
->lock
, flags
);
934 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
940 addr
= iommu
->root_entry
;
942 spin_lock_irqsave(&iommu
->register_lock
, flag
);
943 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
945 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
947 /* Make sure hardware complete it */
948 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
949 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
951 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
954 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
959 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
962 spin_lock_irqsave(&iommu
->register_lock
, flag
);
963 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
965 /* Make sure hardware complete it */
966 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
967 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
969 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
972 /* return value determine if we need a write buffer flush */
973 static void __iommu_flush_context(struct intel_iommu
*iommu
,
974 u16 did
, u16 source_id
, u8 function_mask
,
981 case DMA_CCMD_GLOBAL_INVL
:
982 val
= DMA_CCMD_GLOBAL_INVL
;
984 case DMA_CCMD_DOMAIN_INVL
:
985 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
987 case DMA_CCMD_DEVICE_INVL
:
988 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
989 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
996 spin_lock_irqsave(&iommu
->register_lock
, flag
);
997 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1001 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1003 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1006 /* return value determine if we need a write buffer flush */
1007 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1008 u64 addr
, unsigned int size_order
, u64 type
)
1010 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1011 u64 val
= 0, val_iva
= 0;
1015 case DMA_TLB_GLOBAL_FLUSH
:
1016 /* global flush doesn't need set IVA_REG */
1017 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1019 case DMA_TLB_DSI_FLUSH
:
1020 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1022 case DMA_TLB_PSI_FLUSH
:
1023 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1024 /* Note: always flush non-leaf currently */
1025 val_iva
= size_order
| addr
;
1030 /* Note: set drain read/write */
1033 * This is probably to be super secure.. Looks like we can
1034 * ignore it without any impact.
1036 if (cap_read_drain(iommu
->cap
))
1037 val
|= DMA_TLB_READ_DRAIN
;
1039 if (cap_write_drain(iommu
->cap
))
1040 val
|= DMA_TLB_WRITE_DRAIN
;
1042 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1043 /* Note: Only uses first TLB reg currently */
1045 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1046 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1048 /* Make sure hardware complete it */
1049 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1050 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1052 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1054 /* check IOTLB invalidation granularity */
1055 if (DMA_TLB_IAIG(val
) == 0)
1056 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1057 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1058 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1059 (unsigned long long)DMA_TLB_IIRG(type
),
1060 (unsigned long long)DMA_TLB_IAIG(val
));
1063 static struct device_domain_info
*iommu_support_dev_iotlb(
1064 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
1067 unsigned long flags
;
1068 struct device_domain_info
*info
;
1069 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
1071 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1077 spin_lock_irqsave(&device_domain_lock
, flags
);
1078 list_for_each_entry(info
, &domain
->devices
, link
)
1079 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1083 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1085 if (!found
|| !info
->dev
)
1088 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1091 if (!dmar_find_matched_atsr_unit(info
->dev
))
1094 info
->iommu
= iommu
;
1099 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1104 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1107 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1109 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1112 pci_disable_ats(info
->dev
);
1115 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1116 u64 addr
, unsigned mask
)
1119 unsigned long flags
;
1120 struct device_domain_info
*info
;
1122 spin_lock_irqsave(&device_domain_lock
, flags
);
1123 list_for_each_entry(info
, &domain
->devices
, link
) {
1124 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1127 sid
= info
->bus
<< 8 | info
->devfn
;
1128 qdep
= pci_ats_queue_depth(info
->dev
);
1129 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1131 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1134 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1135 unsigned long pfn
, unsigned int pages
, int map
)
1137 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1138 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1143 * Fallback to domain selective flush if no PSI support or the size is
1145 * PSI requires page size to be 2 ^ x, and the base address is naturally
1146 * aligned to the size
1148 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1149 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1152 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1156 * In caching mode, changes of pages from non-present to present require
1157 * flush. However, device IOTLB doesn't need to be flushed in this case.
1159 if (!cap_caching_mode(iommu
->cap
) || !map
)
1160 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1163 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1166 unsigned long flags
;
1168 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1169 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1170 pmen
&= ~DMA_PMEN_EPM
;
1171 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1173 /* wait for the protected region status bit to clear */
1174 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1175 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1177 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1180 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1183 unsigned long flags
;
1185 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1186 iommu
->gcmd
|= DMA_GCMD_TE
;
1187 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1189 /* Make sure hardware complete it */
1190 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1191 readl
, (sts
& DMA_GSTS_TES
), sts
);
1193 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1197 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1202 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1203 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1204 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1206 /* Make sure hardware complete it */
1207 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1208 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1210 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1215 static int iommu_init_domains(struct intel_iommu
*iommu
)
1217 unsigned long ndomains
;
1218 unsigned long nlongs
;
1220 ndomains
= cap_ndoms(iommu
->cap
);
1221 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu
->seq_id
,
1223 nlongs
= BITS_TO_LONGS(ndomains
);
1225 spin_lock_init(&iommu
->lock
);
1227 /* TBD: there might be 64K domains,
1228 * consider other allocation for future chip
1230 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1231 if (!iommu
->domain_ids
) {
1232 printk(KERN_ERR
"Allocating domain id array failed\n");
1235 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1237 if (!iommu
->domains
) {
1238 printk(KERN_ERR
"Allocating domain array failed\n");
1243 * if Caching mode is set, then invalid translations are tagged
1244 * with domainid 0. Hence we need to pre-allocate it.
1246 if (cap_caching_mode(iommu
->cap
))
1247 set_bit(0, iommu
->domain_ids
);
1252 static void domain_exit(struct dmar_domain
*domain
);
1253 static void vm_domain_exit(struct dmar_domain
*domain
);
1255 void free_dmar_iommu(struct intel_iommu
*iommu
)
1257 struct dmar_domain
*domain
;
1259 unsigned long flags
;
1261 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1262 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1263 domain
= iommu
->domains
[i
];
1264 clear_bit(i
, iommu
->domain_ids
);
1266 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1267 if (--domain
->iommu_count
== 0) {
1268 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1269 vm_domain_exit(domain
);
1271 domain_exit(domain
);
1273 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1277 if (iommu
->gcmd
& DMA_GCMD_TE
)
1278 iommu_disable_translation(iommu
);
1281 irq_set_handler_data(iommu
->irq
, NULL
);
1282 /* This will mask the irq */
1283 free_irq(iommu
->irq
, iommu
);
1284 destroy_irq(iommu
->irq
);
1287 kfree(iommu
->domains
);
1288 kfree(iommu
->domain_ids
);
1290 g_iommus
[iommu
->seq_id
] = NULL
;
1292 /* if all iommus are freed, free g_iommus */
1293 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1298 if (i
== g_num_of_iommus
)
1301 /* free context mapping */
1302 free_context_table(iommu
);
1305 static struct dmar_domain
*alloc_domain(void)
1307 struct dmar_domain
*domain
;
1309 domain
= alloc_domain_mem();
1314 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1320 static int iommu_attach_domain(struct dmar_domain
*domain
,
1321 struct intel_iommu
*iommu
)
1324 unsigned long ndomains
;
1325 unsigned long flags
;
1327 ndomains
= cap_ndoms(iommu
->cap
);
1329 spin_lock_irqsave(&iommu
->lock
, flags
);
1331 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1332 if (num
>= ndomains
) {
1333 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1334 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1339 set_bit(num
, iommu
->domain_ids
);
1340 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1341 iommu
->domains
[num
] = domain
;
1342 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1347 static void iommu_detach_domain(struct dmar_domain
*domain
,
1348 struct intel_iommu
*iommu
)
1350 unsigned long flags
;
1354 spin_lock_irqsave(&iommu
->lock
, flags
);
1355 ndomains
= cap_ndoms(iommu
->cap
);
1356 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1357 if (iommu
->domains
[num
] == domain
) {
1364 clear_bit(num
, iommu
->domain_ids
);
1365 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1366 iommu
->domains
[num
] = NULL
;
1368 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1371 static struct iova_domain reserved_iova_list
;
1372 static struct lock_class_key reserved_rbtree_key
;
1374 static int dmar_init_reserved_ranges(void)
1376 struct pci_dev
*pdev
= NULL
;
1380 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1382 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1383 &reserved_rbtree_key
);
1385 /* IOAPIC ranges shouldn't be accessed by DMA */
1386 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1387 IOVA_PFN(IOAPIC_RANGE_END
));
1389 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1393 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1394 for_each_pci_dev(pdev
) {
1397 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1398 r
= &pdev
->resource
[i
];
1399 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1401 iova
= reserve_iova(&reserved_iova_list
,
1405 printk(KERN_ERR
"Reserve iova failed\n");
1413 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1415 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1418 static inline int guestwidth_to_adjustwidth(int gaw
)
1421 int r
= (gaw
- 12) % 9;
1432 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1434 struct intel_iommu
*iommu
;
1435 int adjust_width
, agaw
;
1436 unsigned long sagaw
;
1438 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1439 spin_lock_init(&domain
->iommu_lock
);
1441 domain_reserve_special_ranges(domain
);
1443 /* calculate AGAW */
1444 iommu
= domain_get_iommu(domain
);
1445 if (guest_width
> cap_mgaw(iommu
->cap
))
1446 guest_width
= cap_mgaw(iommu
->cap
);
1447 domain
->gaw
= guest_width
;
1448 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1449 agaw
= width_to_agaw(adjust_width
);
1450 sagaw
= cap_sagaw(iommu
->cap
);
1451 if (!test_bit(agaw
, &sagaw
)) {
1452 /* hardware doesn't support it, choose a bigger one */
1453 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1454 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1458 domain
->agaw
= agaw
;
1459 INIT_LIST_HEAD(&domain
->devices
);
1461 if (ecap_coherent(iommu
->ecap
))
1462 domain
->iommu_coherency
= 1;
1464 domain
->iommu_coherency
= 0;
1466 if (ecap_sc_support(iommu
->ecap
))
1467 domain
->iommu_snooping
= 1;
1469 domain
->iommu_snooping
= 0;
1471 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1472 domain
->iommu_count
= 1;
1473 domain
->nid
= iommu
->node
;
1475 /* always allocate the top pgd */
1476 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1479 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1483 static void domain_exit(struct dmar_domain
*domain
)
1485 struct dmar_drhd_unit
*drhd
;
1486 struct intel_iommu
*iommu
;
1488 /* Domain 0 is reserved, so dont process it */
1492 /* Flush any lazy unmaps that may reference this domain */
1493 if (!intel_iommu_strict
)
1494 flush_unmaps_timeout(0);
1496 domain_remove_dev_info(domain
);
1498 put_iova_domain(&domain
->iovad
);
1501 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1503 /* free page tables */
1504 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1506 for_each_active_iommu(iommu
, drhd
)
1507 if (test_bit(iommu
->seq_id
, &domain
->iommu_bmp
))
1508 iommu_detach_domain(domain
, iommu
);
1510 free_domain_mem(domain
);
1513 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1514 u8 bus
, u8 devfn
, int translation
)
1516 struct context_entry
*context
;
1517 unsigned long flags
;
1518 struct intel_iommu
*iommu
;
1519 struct dma_pte
*pgd
;
1521 unsigned long ndomains
;
1524 struct device_domain_info
*info
= NULL
;
1526 pr_debug("Set context mapping for %02x:%02x.%d\n",
1527 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1529 BUG_ON(!domain
->pgd
);
1530 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1531 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1533 iommu
= device_to_iommu(segment
, bus
, devfn
);
1537 context
= device_to_context_entry(iommu
, bus
, devfn
);
1540 spin_lock_irqsave(&iommu
->lock
, flags
);
1541 if (context_present(context
)) {
1542 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1549 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1550 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1553 /* find an available domain id for this device in iommu */
1554 ndomains
= cap_ndoms(iommu
->cap
);
1555 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1556 if (iommu
->domains
[num
] == domain
) {
1564 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1565 if (num
>= ndomains
) {
1566 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1567 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1571 set_bit(num
, iommu
->domain_ids
);
1572 iommu
->domains
[num
] = domain
;
1576 /* Skip top levels of page tables for
1577 * iommu which has less agaw than default.
1578 * Unnecessary for PT mode.
1580 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1581 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1582 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1583 if (!dma_pte_present(pgd
)) {
1584 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1591 context_set_domain_id(context
, id
);
1593 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1594 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1595 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1596 CONTEXT_TT_MULTI_LEVEL
;
1599 * In pass through mode, AW must be programmed to indicate the largest
1600 * AGAW value supported by hardware. And ASR is ignored by hardware.
1602 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1603 context_set_address_width(context
, iommu
->msagaw
);
1605 context_set_address_root(context
, virt_to_phys(pgd
));
1606 context_set_address_width(context
, iommu
->agaw
);
1609 context_set_translation_type(context
, translation
);
1610 context_set_fault_enable(context
);
1611 context_set_present(context
);
1612 domain_flush_cache(domain
, context
, sizeof(*context
));
1615 * It's a non-present to present mapping. If hardware doesn't cache
1616 * non-present entry we only need to flush the write-buffer. If the
1617 * _does_ cache non-present entries, then it does so in the special
1618 * domain #0, which we have to flush:
1620 if (cap_caching_mode(iommu
->cap
)) {
1621 iommu
->flush
.flush_context(iommu
, 0,
1622 (((u16
)bus
) << 8) | devfn
,
1623 DMA_CCMD_MASK_NOBIT
,
1624 DMA_CCMD_DEVICE_INVL
);
1625 iommu
->flush
.flush_iotlb(iommu
, domain
->id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1627 iommu_flush_write_buffer(iommu
);
1629 iommu_enable_dev_iotlb(info
);
1630 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1632 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1633 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1634 domain
->iommu_count
++;
1635 if (domain
->iommu_count
== 1)
1636 domain
->nid
= iommu
->node
;
1637 domain_update_iommu_cap(domain
);
1639 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1644 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1648 struct pci_dev
*tmp
, *parent
;
1650 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1651 pdev
->bus
->number
, pdev
->devfn
,
1656 /* dependent device mapping */
1657 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1660 /* Secondary interface's bus number and devfn 0 */
1661 parent
= pdev
->bus
->self
;
1662 while (parent
!= tmp
) {
1663 ret
= domain_context_mapping_one(domain
,
1664 pci_domain_nr(parent
->bus
),
1665 parent
->bus
->number
,
1666 parent
->devfn
, translation
);
1669 parent
= parent
->bus
->self
;
1671 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
1672 return domain_context_mapping_one(domain
,
1673 pci_domain_nr(tmp
->subordinate
),
1674 tmp
->subordinate
->number
, 0,
1676 else /* this is a legacy PCI bridge */
1677 return domain_context_mapping_one(domain
,
1678 pci_domain_nr(tmp
->bus
),
1684 static int domain_context_mapped(struct pci_dev
*pdev
)
1687 struct pci_dev
*tmp
, *parent
;
1688 struct intel_iommu
*iommu
;
1690 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1695 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1698 /* dependent device mapping */
1699 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1702 /* Secondary interface's bus number and devfn 0 */
1703 parent
= pdev
->bus
->self
;
1704 while (parent
!= tmp
) {
1705 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1709 parent
= parent
->bus
->self
;
1711 if (pci_is_pcie(tmp
))
1712 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1715 return device_context_mapped(iommu
, tmp
->bus
->number
,
1719 /* Returns a number of VTD pages, but aligned to MM page size */
1720 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1723 host_addr
&= ~PAGE_MASK
;
1724 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1727 /* Return largest possible superpage level for a given mapping */
1728 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1729 unsigned long iov_pfn
,
1730 unsigned long phy_pfn
,
1731 unsigned long pages
)
1733 int support
, level
= 1;
1734 unsigned long pfnmerge
;
1736 support
= domain
->iommu_superpage
;
1738 /* To use a large page, the virtual *and* physical addresses
1739 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1740 of them will mean we have to use smaller pages. So just
1741 merge them and check both at once. */
1742 pfnmerge
= iov_pfn
| phy_pfn
;
1744 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1745 pages
>>= VTD_STRIDE_SHIFT
;
1748 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1755 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1756 struct scatterlist
*sg
, unsigned long phys_pfn
,
1757 unsigned long nr_pages
, int prot
)
1759 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1760 phys_addr_t
uninitialized_var(pteval
);
1761 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1762 unsigned long sg_res
;
1763 unsigned int largepage_lvl
= 0;
1764 unsigned long lvl_pages
= 0;
1766 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1768 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1771 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1776 sg_res
= nr_pages
+ 1;
1777 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1780 while (nr_pages
> 0) {
1784 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
1785 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1786 sg
->dma_length
= sg
->length
;
1787 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1788 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
1792 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
1794 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, largepage_lvl
);
1797 /* It is large page*/
1798 if (largepage_lvl
> 1)
1799 pteval
|= DMA_PTE_LARGE_PAGE
;
1801 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
1804 /* We don't need lock here, nobody else
1805 * touches the iova range
1807 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
1809 static int dumps
= 5;
1810 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1811 iov_pfn
, tmp
, (unsigned long long)pteval
);
1814 debug_dma_dump_mappings(NULL
);
1819 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
1821 BUG_ON(nr_pages
< lvl_pages
);
1822 BUG_ON(sg_res
< lvl_pages
);
1824 nr_pages
-= lvl_pages
;
1825 iov_pfn
+= lvl_pages
;
1826 phys_pfn
+= lvl_pages
;
1827 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
1828 sg_res
-= lvl_pages
;
1830 /* If the next PTE would be the first in a new page, then we
1831 need to flush the cache on the entries we've just written.
1832 And then we'll need to recalculate 'pte', so clear it and
1833 let it get set again in the if (!pte) block above.
1835 If we're done (!nr_pages) we need to flush the cache too.
1837 Also if we've been setting superpages, we may need to
1838 recalculate 'pte' and switch back to smaller pages for the
1839 end of the mapping, if the trailing size is not enough to
1840 use another superpage (i.e. sg_res < lvl_pages). */
1842 if (!nr_pages
|| first_pte_in_page(pte
) ||
1843 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
1844 domain_flush_cache(domain
, first_pte
,
1845 (void *)pte
- (void *)first_pte
);
1849 if (!sg_res
&& nr_pages
)
1855 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1856 struct scatterlist
*sg
, unsigned long nr_pages
,
1859 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
1862 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1863 unsigned long phys_pfn
, unsigned long nr_pages
,
1866 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
1869 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1874 clear_context_table(iommu
, bus
, devfn
);
1875 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1876 DMA_CCMD_GLOBAL_INVL
);
1877 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1880 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1882 struct device_domain_info
*info
;
1883 unsigned long flags
;
1884 struct intel_iommu
*iommu
;
1886 spin_lock_irqsave(&device_domain_lock
, flags
);
1887 while (!list_empty(&domain
->devices
)) {
1888 info
= list_entry(domain
->devices
.next
,
1889 struct device_domain_info
, link
);
1890 list_del(&info
->link
);
1891 list_del(&info
->global
);
1893 info
->dev
->dev
.archdata
.iommu
= NULL
;
1894 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1896 iommu_disable_dev_iotlb(info
);
1897 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1898 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1899 free_devinfo_mem(info
);
1901 spin_lock_irqsave(&device_domain_lock
, flags
);
1903 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1908 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1910 static struct dmar_domain
*
1911 find_domain(struct pci_dev
*pdev
)
1913 struct device_domain_info
*info
;
1915 /* No lock here, assumes no domain exit in normal case */
1916 info
= pdev
->dev
.archdata
.iommu
;
1918 return info
->domain
;
1922 /* domain is initialized */
1923 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1925 struct dmar_domain
*domain
, *found
= NULL
;
1926 struct intel_iommu
*iommu
;
1927 struct dmar_drhd_unit
*drhd
;
1928 struct device_domain_info
*info
, *tmp
;
1929 struct pci_dev
*dev_tmp
;
1930 unsigned long flags
;
1931 int bus
= 0, devfn
= 0;
1935 domain
= find_domain(pdev
);
1939 segment
= pci_domain_nr(pdev
->bus
);
1941 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1943 if (pci_is_pcie(dev_tmp
)) {
1944 bus
= dev_tmp
->subordinate
->number
;
1947 bus
= dev_tmp
->bus
->number
;
1948 devfn
= dev_tmp
->devfn
;
1950 spin_lock_irqsave(&device_domain_lock
, flags
);
1951 list_for_each_entry(info
, &device_domain_list
, global
) {
1952 if (info
->segment
== segment
&&
1953 info
->bus
== bus
&& info
->devfn
== devfn
) {
1954 found
= info
->domain
;
1958 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1959 /* pcie-pci bridge already has a domain, uses it */
1966 domain
= alloc_domain();
1970 /* Allocate new domain for the device */
1971 drhd
= dmar_find_matched_drhd_unit(pdev
);
1973 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1977 iommu
= drhd
->iommu
;
1979 ret
= iommu_attach_domain(domain
, iommu
);
1981 free_domain_mem(domain
);
1985 if (domain_init(domain
, gaw
)) {
1986 domain_exit(domain
);
1990 /* register pcie-to-pci device */
1992 info
= alloc_devinfo_mem();
1994 domain_exit(domain
);
1997 info
->segment
= segment
;
1999 info
->devfn
= devfn
;
2001 info
->domain
= domain
;
2002 /* This domain is shared by devices under p2p bridge */
2003 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
2005 /* pcie-to-pci bridge already has a domain, uses it */
2007 spin_lock_irqsave(&device_domain_lock
, flags
);
2008 list_for_each_entry(tmp
, &device_domain_list
, global
) {
2009 if (tmp
->segment
== segment
&&
2010 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
2011 found
= tmp
->domain
;
2016 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2017 free_devinfo_mem(info
);
2018 domain_exit(domain
);
2021 list_add(&info
->link
, &domain
->devices
);
2022 list_add(&info
->global
, &device_domain_list
);
2023 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2028 info
= alloc_devinfo_mem();
2031 info
->segment
= segment
;
2032 info
->bus
= pdev
->bus
->number
;
2033 info
->devfn
= pdev
->devfn
;
2035 info
->domain
= domain
;
2036 spin_lock_irqsave(&device_domain_lock
, flags
);
2037 /* somebody is fast */
2038 found
= find_domain(pdev
);
2039 if (found
!= NULL
) {
2040 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2041 if (found
!= domain
) {
2042 domain_exit(domain
);
2045 free_devinfo_mem(info
);
2048 list_add(&info
->link
, &domain
->devices
);
2049 list_add(&info
->global
, &device_domain_list
);
2050 pdev
->dev
.archdata
.iommu
= info
;
2051 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2054 /* recheck it here, maybe others set it */
2055 return find_domain(pdev
);
2058 static int iommu_identity_mapping
;
2059 #define IDENTMAP_ALL 1
2060 #define IDENTMAP_GFX 2
2061 #define IDENTMAP_AZALIA 4
2063 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2064 unsigned long long start
,
2065 unsigned long long end
)
2067 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2068 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2070 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2071 dma_to_mm_pfn(last_vpfn
))) {
2072 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
2076 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2077 start
, end
, domain
->id
);
2079 * RMRR range might have overlap with physical memory range,
2082 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2084 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2085 last_vpfn
- first_vpfn
+ 1,
2086 DMA_PTE_READ
|DMA_PTE_WRITE
);
2089 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
2090 unsigned long long start
,
2091 unsigned long long end
)
2093 struct dmar_domain
*domain
;
2096 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2100 /* For _hardware_ passthrough, don't bother. But for software
2101 passthrough, we do it anyway -- it may indicate a memory
2102 range which is reserved in E820, so which didn't get set
2103 up to start with in si_domain */
2104 if (domain
== si_domain
&& hw_pass_through
) {
2105 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2106 pci_name(pdev
), start
, end
);
2111 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2112 pci_name(pdev
), start
, end
);
2115 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2116 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2117 dmi_get_system_info(DMI_BIOS_VENDOR
),
2118 dmi_get_system_info(DMI_BIOS_VERSION
),
2119 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2124 if (end
>> agaw_to_width(domain
->agaw
)) {
2125 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2126 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2127 agaw_to_width(domain
->agaw
),
2128 dmi_get_system_info(DMI_BIOS_VENDOR
),
2129 dmi_get_system_info(DMI_BIOS_VERSION
),
2130 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2135 ret
= iommu_domain_identity_map(domain
, start
, end
);
2139 /* context entry init */
2140 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
2147 domain_exit(domain
);
2151 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2152 struct pci_dev
*pdev
)
2154 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2156 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
2160 #ifdef CONFIG_DMAR_FLOPPY_WA
2161 static inline void iommu_prepare_isa(void)
2163 struct pci_dev
*pdev
;
2166 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2170 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2171 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024 - 1);
2174 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2175 "floppy might not work\n");
2179 static inline void iommu_prepare_isa(void)
2183 #endif /* !CONFIG_DMAR_FLPY_WA */
2185 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2187 static int __init
si_domain_work_fn(unsigned long start_pfn
,
2188 unsigned long end_pfn
, void *datax
)
2192 *ret
= iommu_domain_identity_map(si_domain
,
2193 (uint64_t)start_pfn
<< PAGE_SHIFT
,
2194 (uint64_t)end_pfn
<< PAGE_SHIFT
);
2199 static int __init
si_domain_init(int hw
)
2201 struct dmar_drhd_unit
*drhd
;
2202 struct intel_iommu
*iommu
;
2205 si_domain
= alloc_domain();
2209 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2211 for_each_active_iommu(iommu
, drhd
) {
2212 ret
= iommu_attach_domain(si_domain
, iommu
);
2214 domain_exit(si_domain
);
2219 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2220 domain_exit(si_domain
);
2224 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2229 for_each_online_node(nid
) {
2230 work_with_active_regions(nid
, si_domain_work_fn
, &ret
);
2238 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2239 struct pci_dev
*pdev
);
2240 static int identity_mapping(struct pci_dev
*pdev
)
2242 struct device_domain_info
*info
;
2244 if (likely(!iommu_identity_mapping
))
2247 info
= pdev
->dev
.archdata
.iommu
;
2248 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2249 return (info
->domain
== si_domain
);
2254 static int domain_add_dev_info(struct dmar_domain
*domain
,
2255 struct pci_dev
*pdev
,
2258 struct device_domain_info
*info
;
2259 unsigned long flags
;
2262 info
= alloc_devinfo_mem();
2266 ret
= domain_context_mapping(domain
, pdev
, translation
);
2268 free_devinfo_mem(info
);
2272 info
->segment
= pci_domain_nr(pdev
->bus
);
2273 info
->bus
= pdev
->bus
->number
;
2274 info
->devfn
= pdev
->devfn
;
2276 info
->domain
= domain
;
2278 spin_lock_irqsave(&device_domain_lock
, flags
);
2279 list_add(&info
->link
, &domain
->devices
);
2280 list_add(&info
->global
, &device_domain_list
);
2281 pdev
->dev
.archdata
.iommu
= info
;
2282 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2287 static int iommu_should_identity_map(struct pci_dev
*pdev
, int startup
)
2289 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2292 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2295 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2299 * We want to start off with all devices in the 1:1 domain, and
2300 * take them out later if we find they can't access all of memory.
2302 * However, we can't do this for PCI devices behind bridges,
2303 * because all PCI devices behind the same bridge will end up
2304 * with the same source-id on their transactions.
2306 * Practically speaking, we can't change things around for these
2307 * devices at run-time, because we can't be sure there'll be no
2308 * DMA transactions in flight for any of their siblings.
2310 * So PCI devices (unless they're on the root bus) as well as
2311 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2312 * the 1:1 domain, just in _case_ one of their siblings turns out
2313 * not to be able to map all of memory.
2315 if (!pci_is_pcie(pdev
)) {
2316 if (!pci_is_root_bus(pdev
->bus
))
2318 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2320 } else if (pdev
->pcie_type
== PCI_EXP_TYPE_PCI_BRIDGE
)
2324 * At boot time, we don't yet know if devices will be 64-bit capable.
2325 * Assume that they will -- if they turn out not to be, then we can
2326 * take them out of the 1:1 domain later.
2330 * If the device's dma_mask is less than the system's memory
2331 * size then this is not a candidate for identity mapping.
2333 u64 dma_mask
= pdev
->dma_mask
;
2335 if (pdev
->dev
.coherent_dma_mask
&&
2336 pdev
->dev
.coherent_dma_mask
< dma_mask
)
2337 dma_mask
= pdev
->dev
.coherent_dma_mask
;
2339 return dma_mask
>= dma_get_required_mask(&pdev
->dev
);
2345 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2347 struct pci_dev
*pdev
= NULL
;
2350 ret
= si_domain_init(hw
);
2354 for_each_pci_dev(pdev
) {
2355 /* Skip Host/PCI Bridge devices */
2356 if (IS_BRIDGE_HOST_DEVICE(pdev
))
2358 if (iommu_should_identity_map(pdev
, 1)) {
2359 printk(KERN_INFO
"IOMMU: %s identity mapping for device %s\n",
2360 hw
? "hardware" : "software", pci_name(pdev
));
2362 ret
= domain_add_dev_info(si_domain
, pdev
,
2363 hw
? CONTEXT_TT_PASS_THROUGH
:
2364 CONTEXT_TT_MULTI_LEVEL
);
2373 static int __init
init_dmars(void)
2375 struct dmar_drhd_unit
*drhd
;
2376 struct dmar_rmrr_unit
*rmrr
;
2377 struct pci_dev
*pdev
;
2378 struct intel_iommu
*iommu
;
2384 * initialize and program root entry to not present
2387 for_each_drhd_unit(drhd
) {
2390 * lock not needed as this is only incremented in the single
2391 * threaded kernel __init code path all other access are read
2396 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2399 printk(KERN_ERR
"Allocating global iommu array failed\n");
2404 deferred_flush
= kzalloc(g_num_of_iommus
*
2405 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2406 if (!deferred_flush
) {
2411 for_each_drhd_unit(drhd
) {
2415 iommu
= drhd
->iommu
;
2416 g_iommus
[iommu
->seq_id
] = iommu
;
2418 ret
= iommu_init_domains(iommu
);
2424 * we could share the same root & context tables
2425 * among all IOMMU's. Need to Split it later.
2427 ret
= iommu_alloc_root_entry(iommu
);
2429 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2432 if (!ecap_pass_through(iommu
->ecap
))
2433 hw_pass_through
= 0;
2437 * Start from the sane iommu hardware state.
2439 for_each_drhd_unit(drhd
) {
2443 iommu
= drhd
->iommu
;
2446 * If the queued invalidation is already initialized by us
2447 * (for example, while enabling interrupt-remapping) then
2448 * we got the things already rolling from a sane state.
2454 * Clear any previous faults.
2456 dmar_fault(-1, iommu
);
2458 * Disable queued invalidation if supported and already enabled
2459 * before OS handover.
2461 dmar_disable_qi(iommu
);
2464 for_each_drhd_unit(drhd
) {
2468 iommu
= drhd
->iommu
;
2470 if (dmar_enable_qi(iommu
)) {
2472 * Queued Invalidate not enabled, use Register Based
2475 iommu
->flush
.flush_context
= __iommu_flush_context
;
2476 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2477 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Register based "
2480 (unsigned long long)drhd
->reg_base_addr
);
2482 iommu
->flush
.flush_context
= qi_flush_context
;
2483 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2484 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Queued "
2487 (unsigned long long)drhd
->reg_base_addr
);
2491 if (iommu_pass_through
)
2492 iommu_identity_mapping
|= IDENTMAP_ALL
;
2494 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2495 iommu_identity_mapping
|= IDENTMAP_GFX
;
2498 check_tylersburg_isoch();
2501 * If pass through is not set or not enabled, setup context entries for
2502 * identity mappings for rmrr, gfx, and isa and may fall back to static
2503 * identity mapping if iommu_identity_mapping is set.
2505 if (iommu_identity_mapping
) {
2506 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2508 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2514 * for each dev attached to rmrr
2516 * locate drhd for dev, alloc domain for dev
2517 * allocate free domain
2518 * allocate page table entries for rmrr
2519 * if context not allocated for bus
2520 * allocate and init context
2521 * set present in root table for this bus
2522 * init context with domain, translation etc
2526 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2527 for_each_rmrr_units(rmrr
) {
2528 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2529 pdev
= rmrr
->devices
[i
];
2531 * some BIOS lists non-exist devices in DMAR
2536 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2539 "IOMMU: mapping reserved region failed\n");
2543 iommu_prepare_isa();
2548 * global invalidate context cache
2549 * global invalidate iotlb
2550 * enable translation
2552 for_each_drhd_unit(drhd
) {
2553 if (drhd
->ignored
) {
2555 * we always have to disable PMRs or DMA may fail on
2559 iommu_disable_protect_mem_regions(drhd
->iommu
);
2562 iommu
= drhd
->iommu
;
2564 iommu_flush_write_buffer(iommu
);
2566 ret
= dmar_set_interrupt(iommu
);
2570 iommu_set_root_entry(iommu
);
2572 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2573 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2575 ret
= iommu_enable_translation(iommu
);
2579 iommu_disable_protect_mem_regions(iommu
);
2584 for_each_drhd_unit(drhd
) {
2587 iommu
= drhd
->iommu
;
2594 /* This takes a number of _MM_ pages, not VTD pages */
2595 static struct iova
*intel_alloc_iova(struct device
*dev
,
2596 struct dmar_domain
*domain
,
2597 unsigned long nrpages
, uint64_t dma_mask
)
2599 struct pci_dev
*pdev
= to_pci_dev(dev
);
2600 struct iova
*iova
= NULL
;
2602 /* Restrict dma_mask to the width that the iommu can handle */
2603 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2605 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2607 * First try to allocate an io virtual address in
2608 * DMA_BIT_MASK(32) and if that fails then try allocating
2611 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2612 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2616 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2617 if (unlikely(!iova
)) {
2618 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2619 nrpages
, pci_name(pdev
));
2626 static struct dmar_domain
*__get_valid_domain_for_dev(struct pci_dev
*pdev
)
2628 struct dmar_domain
*domain
;
2631 domain
= get_domain_for_dev(pdev
,
2632 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2635 "Allocating domain for %s failed", pci_name(pdev
));
2639 /* make sure context mapping is ok */
2640 if (unlikely(!domain_context_mapped(pdev
))) {
2641 ret
= domain_context_mapping(domain
, pdev
,
2642 CONTEXT_TT_MULTI_LEVEL
);
2645 "Domain context map for %s failed",
2654 static inline struct dmar_domain
*get_valid_domain_for_dev(struct pci_dev
*dev
)
2656 struct device_domain_info
*info
;
2658 /* No lock here, assumes no domain exit in normal case */
2659 info
= dev
->dev
.archdata
.iommu
;
2661 return info
->domain
;
2663 return __get_valid_domain_for_dev(dev
);
2666 static int iommu_dummy(struct pci_dev
*pdev
)
2668 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2671 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2672 static int iommu_no_mapping(struct device
*dev
)
2674 struct pci_dev
*pdev
;
2677 if (unlikely(dev
->bus
!= &pci_bus_type
))
2680 pdev
= to_pci_dev(dev
);
2681 if (iommu_dummy(pdev
))
2684 if (!iommu_identity_mapping
)
2687 found
= identity_mapping(pdev
);
2689 if (iommu_should_identity_map(pdev
, 0))
2693 * 32 bit DMA is removed from si_domain and fall back
2694 * to non-identity mapping.
2696 domain_remove_one_dev_info(si_domain
, pdev
);
2697 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2703 * In case of a detached 64 bit DMA device from vm, the device
2704 * is put into si_domain for identity mapping.
2706 if (iommu_should_identity_map(pdev
, 0)) {
2708 ret
= domain_add_dev_info(si_domain
, pdev
,
2710 CONTEXT_TT_PASS_THROUGH
:
2711 CONTEXT_TT_MULTI_LEVEL
);
2713 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2723 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2724 size_t size
, int dir
, u64 dma_mask
)
2726 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2727 struct dmar_domain
*domain
;
2728 phys_addr_t start_paddr
;
2732 struct intel_iommu
*iommu
;
2733 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
2735 BUG_ON(dir
== DMA_NONE
);
2737 if (iommu_no_mapping(hwdev
))
2740 domain
= get_valid_domain_for_dev(pdev
);
2744 iommu
= domain_get_iommu(domain
);
2745 size
= aligned_nrpages(paddr
, size
);
2747 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
2752 * Check if DMAR supports zero-length reads on write only
2755 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2756 !cap_zlr(iommu
->cap
))
2757 prot
|= DMA_PTE_READ
;
2758 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2759 prot
|= DMA_PTE_WRITE
;
2761 * paddr - (paddr + size) might be partial page, we should map the whole
2762 * page. Note: if two part of one page are separately mapped, we
2763 * might have two guest_addr mapping to the same host paddr, but this
2764 * is not a big problem
2766 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2767 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
2771 /* it's a non-present to present mapping. Only flush if caching mode */
2772 if (cap_caching_mode(iommu
->cap
))
2773 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 1);
2775 iommu_flush_write_buffer(iommu
);
2777 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2778 start_paddr
+= paddr
& ~PAGE_MASK
;
2783 __free_iova(&domain
->iovad
, iova
);
2784 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2785 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2789 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2790 unsigned long offset
, size_t size
,
2791 enum dma_data_direction dir
,
2792 struct dma_attrs
*attrs
)
2794 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2795 dir
, to_pci_dev(dev
)->dma_mask
);
2798 static void flush_unmaps(void)
2804 /* just flush them all */
2805 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2806 struct intel_iommu
*iommu
= g_iommus
[i
];
2810 if (!deferred_flush
[i
].next
)
2813 /* In caching mode, global flushes turn emulation expensive */
2814 if (!cap_caching_mode(iommu
->cap
))
2815 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2816 DMA_TLB_GLOBAL_FLUSH
);
2817 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2819 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2820 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
2822 /* On real hardware multiple invalidations are expensive */
2823 if (cap_caching_mode(iommu
->cap
))
2824 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2825 iova
->pfn_lo
, iova
->pfn_hi
- iova
->pfn_lo
+ 1, 0);
2827 mask
= ilog2(mm_to_dma_pfn(iova
->pfn_hi
- iova
->pfn_lo
+ 1));
2828 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2829 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2831 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2833 deferred_flush
[i
].next
= 0;
2839 static void flush_unmaps_timeout(unsigned long data
)
2841 unsigned long flags
;
2843 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2845 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2848 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2850 unsigned long flags
;
2852 struct intel_iommu
*iommu
;
2854 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2855 if (list_size
== HIGH_WATER_MARK
)
2858 iommu
= domain_get_iommu(dom
);
2859 iommu_id
= iommu
->seq_id
;
2861 next
= deferred_flush
[iommu_id
].next
;
2862 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2863 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2864 deferred_flush
[iommu_id
].next
++;
2867 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2871 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2874 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2875 size_t size
, enum dma_data_direction dir
,
2876 struct dma_attrs
*attrs
)
2878 struct pci_dev
*pdev
= to_pci_dev(dev
);
2879 struct dmar_domain
*domain
;
2880 unsigned long start_pfn
, last_pfn
;
2882 struct intel_iommu
*iommu
;
2884 if (iommu_no_mapping(dev
))
2887 domain
= find_domain(pdev
);
2890 iommu
= domain_get_iommu(domain
);
2892 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2893 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
2894 (unsigned long long)dev_addr
))
2897 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2898 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2900 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2901 pci_name(pdev
), start_pfn
, last_pfn
);
2903 /* clear the whole page */
2904 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2906 /* free page tables */
2907 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2909 if (intel_iommu_strict
) {
2910 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2911 last_pfn
- start_pfn
+ 1, 0);
2913 __free_iova(&domain
->iovad
, iova
);
2915 add_unmap(domain
, iova
);
2917 * queue up the release of the unmap to save the 1/6th of the
2918 * cpu used up by the iotlb flush operation...
2923 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2924 dma_addr_t
*dma_handle
, gfp_t flags
)
2929 size
= PAGE_ALIGN(size
);
2930 order
= get_order(size
);
2932 if (!iommu_no_mapping(hwdev
))
2933 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2934 else if (hwdev
->coherent_dma_mask
< dma_get_required_mask(hwdev
)) {
2935 if (hwdev
->coherent_dma_mask
< DMA_BIT_MASK(32))
2941 vaddr
= (void *)__get_free_pages(flags
, order
);
2944 memset(vaddr
, 0, size
);
2946 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2948 hwdev
->coherent_dma_mask
);
2951 free_pages((unsigned long)vaddr
, order
);
2955 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2956 dma_addr_t dma_handle
)
2960 size
= PAGE_ALIGN(size
);
2961 order
= get_order(size
);
2963 intel_unmap_page(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
, NULL
);
2964 free_pages((unsigned long)vaddr
, order
);
2967 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2968 int nelems
, enum dma_data_direction dir
,
2969 struct dma_attrs
*attrs
)
2971 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2972 struct dmar_domain
*domain
;
2973 unsigned long start_pfn
, last_pfn
;
2975 struct intel_iommu
*iommu
;
2977 if (iommu_no_mapping(hwdev
))
2980 domain
= find_domain(pdev
);
2983 iommu
= domain_get_iommu(domain
);
2985 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2986 if (WARN_ONCE(!iova
, "Driver unmaps unmatched sglist at PFN %llx\n",
2987 (unsigned long long)sglist
[0].dma_address
))
2990 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2991 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2993 /* clear the whole page */
2994 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2996 /* free page tables */
2997 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2999 if (intel_iommu_strict
) {
3000 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3001 last_pfn
- start_pfn
+ 1, 0);
3003 __free_iova(&domain
->iovad
, iova
);
3005 add_unmap(domain
, iova
);
3007 * queue up the release of the unmap to save the 1/6th of the
3008 * cpu used up by the iotlb flush operation...
3013 static int intel_nontranslate_map_sg(struct device
*hddev
,
3014 struct scatterlist
*sglist
, int nelems
, int dir
)
3017 struct scatterlist
*sg
;
3019 for_each_sg(sglist
, sg
, nelems
, i
) {
3020 BUG_ON(!sg_page(sg
));
3021 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3022 sg
->dma_length
= sg
->length
;
3027 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
3028 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3031 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3032 struct dmar_domain
*domain
;
3035 struct iova
*iova
= NULL
;
3037 struct scatterlist
*sg
;
3038 unsigned long start_vpfn
;
3039 struct intel_iommu
*iommu
;
3041 BUG_ON(dir
== DMA_NONE
);
3042 if (iommu_no_mapping(hwdev
))
3043 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
3045 domain
= get_valid_domain_for_dev(pdev
);
3049 iommu
= domain_get_iommu(domain
);
3051 for_each_sg(sglist
, sg
, nelems
, i
)
3052 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3054 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
),
3057 sglist
->dma_length
= 0;
3062 * Check if DMAR supports zero-length reads on write only
3065 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3066 !cap_zlr(iommu
->cap
))
3067 prot
|= DMA_PTE_READ
;
3068 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3069 prot
|= DMA_PTE_WRITE
;
3071 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3073 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3074 if (unlikely(ret
)) {
3075 /* clear the page */
3076 dma_pte_clear_range(domain
, start_vpfn
,
3077 start_vpfn
+ size
- 1);
3078 /* free page tables */
3079 dma_pte_free_pagetable(domain
, start_vpfn
,
3080 start_vpfn
+ size
- 1);
3082 __free_iova(&domain
->iovad
, iova
);
3086 /* it's a non-present to present mapping. Only flush if caching mode */
3087 if (cap_caching_mode(iommu
->cap
))
3088 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 1);
3090 iommu_flush_write_buffer(iommu
);
3095 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3100 struct dma_map_ops intel_dma_ops
= {
3101 .alloc_coherent
= intel_alloc_coherent
,
3102 .free_coherent
= intel_free_coherent
,
3103 .map_sg
= intel_map_sg
,
3104 .unmap_sg
= intel_unmap_sg
,
3105 .map_page
= intel_map_page
,
3106 .unmap_page
= intel_unmap_page
,
3107 .mapping_error
= intel_mapping_error
,
3110 static inline int iommu_domain_cache_init(void)
3114 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3115 sizeof(struct dmar_domain
),
3120 if (!iommu_domain_cache
) {
3121 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
3128 static inline int iommu_devinfo_cache_init(void)
3132 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3133 sizeof(struct device_domain_info
),
3137 if (!iommu_devinfo_cache
) {
3138 printk(KERN_ERR
"Couldn't create devinfo cache\n");
3145 static inline int iommu_iova_cache_init(void)
3149 iommu_iova_cache
= kmem_cache_create("iommu_iova",
3150 sizeof(struct iova
),
3154 if (!iommu_iova_cache
) {
3155 printk(KERN_ERR
"Couldn't create iova cache\n");
3162 static int __init
iommu_init_mempool(void)
3165 ret
= iommu_iova_cache_init();
3169 ret
= iommu_domain_cache_init();
3173 ret
= iommu_devinfo_cache_init();
3177 kmem_cache_destroy(iommu_domain_cache
);
3179 kmem_cache_destroy(iommu_iova_cache
);
3184 static void __init
iommu_exit_mempool(void)
3186 kmem_cache_destroy(iommu_devinfo_cache
);
3187 kmem_cache_destroy(iommu_domain_cache
);
3188 kmem_cache_destroy(iommu_iova_cache
);
3192 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3194 struct dmar_drhd_unit
*drhd
;
3198 /* We know that this device on this chipset has its own IOMMU.
3199 * If we find it under a different IOMMU, then the BIOS is lying
3200 * to us. Hope that the IOMMU for this device is actually
3201 * disabled, and it needs no translation...
3203 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3205 /* "can't" happen */
3206 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3209 vtbar
&= 0xffff0000;
3211 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3212 drhd
= dmar_find_matched_drhd_unit(pdev
);
3213 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3214 TAINT_FIRMWARE_WORKAROUND
,
3215 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3216 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3218 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3220 static void __init
init_no_remapping_devices(void)
3222 struct dmar_drhd_unit
*drhd
;
3224 for_each_drhd_unit(drhd
) {
3225 if (!drhd
->include_all
) {
3227 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3228 if (drhd
->devices
[i
] != NULL
)
3230 /* ignore DMAR unit if no pci devices exist */
3231 if (i
== drhd
->devices_cnt
)
3236 for_each_drhd_unit(drhd
) {
3238 if (drhd
->ignored
|| drhd
->include_all
)
3241 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3242 if (drhd
->devices
[i
] &&
3243 !IS_GFX_DEVICE(drhd
->devices
[i
]))
3246 if (i
< drhd
->devices_cnt
)
3249 /* This IOMMU has *only* gfx devices. Either bypass it or
3250 set the gfx_mapped flag, as appropriate */
3252 intel_iommu_gfx_mapped
= 1;
3255 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
3256 if (!drhd
->devices
[i
])
3258 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3264 #ifdef CONFIG_SUSPEND
3265 static int init_iommu_hw(void)
3267 struct dmar_drhd_unit
*drhd
;
3268 struct intel_iommu
*iommu
= NULL
;
3270 for_each_active_iommu(iommu
, drhd
)
3272 dmar_reenable_qi(iommu
);
3274 for_each_iommu(iommu
, drhd
) {
3275 if (drhd
->ignored
) {
3277 * we always have to disable PMRs or DMA may fail on
3281 iommu_disable_protect_mem_regions(iommu
);
3285 iommu_flush_write_buffer(iommu
);
3287 iommu_set_root_entry(iommu
);
3289 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3290 DMA_CCMD_GLOBAL_INVL
);
3291 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3292 DMA_TLB_GLOBAL_FLUSH
);
3293 if (iommu_enable_translation(iommu
))
3295 iommu_disable_protect_mem_regions(iommu
);
3301 static void iommu_flush_all(void)
3303 struct dmar_drhd_unit
*drhd
;
3304 struct intel_iommu
*iommu
;
3306 for_each_active_iommu(iommu
, drhd
) {
3307 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3308 DMA_CCMD_GLOBAL_INVL
);
3309 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3310 DMA_TLB_GLOBAL_FLUSH
);
3314 static int iommu_suspend(void)
3316 struct dmar_drhd_unit
*drhd
;
3317 struct intel_iommu
*iommu
= NULL
;
3320 for_each_active_iommu(iommu
, drhd
) {
3321 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3323 if (!iommu
->iommu_state
)
3329 for_each_active_iommu(iommu
, drhd
) {
3330 iommu_disable_translation(iommu
);
3332 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3334 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3335 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3336 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3337 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3338 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3339 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3340 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3341 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3343 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3348 for_each_active_iommu(iommu
, drhd
)
3349 kfree(iommu
->iommu_state
);
3354 static void iommu_resume(void)
3356 struct dmar_drhd_unit
*drhd
;
3357 struct intel_iommu
*iommu
= NULL
;
3360 if (init_iommu_hw()) {
3362 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3364 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3368 for_each_active_iommu(iommu
, drhd
) {
3370 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3372 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3373 iommu
->reg
+ DMAR_FECTL_REG
);
3374 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3375 iommu
->reg
+ DMAR_FEDATA_REG
);
3376 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3377 iommu
->reg
+ DMAR_FEADDR_REG
);
3378 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3379 iommu
->reg
+ DMAR_FEUADDR_REG
);
3381 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3384 for_each_active_iommu(iommu
, drhd
)
3385 kfree(iommu
->iommu_state
);
3388 static struct syscore_ops iommu_syscore_ops
= {
3389 .resume
= iommu_resume
,
3390 .suspend
= iommu_suspend
,
3393 static void __init
init_iommu_pm_ops(void)
3395 register_syscore_ops(&iommu_syscore_ops
);
3399 static inline void init_iommu_pm_ops(void) {}
3400 #endif /* CONFIG_PM */
3403 * Here we only respond to action of unbound device from driver.
3405 * Added device is not attached to its DMAR domain here yet. That will happen
3406 * when mapping the device to iova.
3408 static int device_notifier(struct notifier_block
*nb
,
3409 unsigned long action
, void *data
)
3411 struct device
*dev
= data
;
3412 struct pci_dev
*pdev
= to_pci_dev(dev
);
3413 struct dmar_domain
*domain
;
3415 if (iommu_no_mapping(dev
))
3418 domain
= find_domain(pdev
);
3422 if (action
== BUS_NOTIFY_UNBOUND_DRIVER
&& !iommu_pass_through
) {
3423 domain_remove_one_dev_info(domain
, pdev
);
3425 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3426 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) &&
3427 list_empty(&domain
->devices
))
3428 domain_exit(domain
);
3434 static struct notifier_block device_nb
= {
3435 .notifier_call
= device_notifier
,
3438 int __init
intel_iommu_init(void)
3442 /* VT-d is required for a TXT/tboot launch, so enforce that */
3443 force_on
= tboot_force_iommu();
3445 if (dmar_table_init()) {
3447 panic("tboot: Failed to initialize DMAR table\n");
3451 if (dmar_dev_scope_init()) {
3453 panic("tboot: Failed to initialize DMAR device scope\n");
3458 * Check the need for DMA-remapping initialization now.
3459 * Above initialization will also be used by Interrupt-remapping.
3461 if (no_iommu
|| dmar_disabled
)
3464 if (iommu_init_mempool()) {
3466 panic("tboot: Failed to initialize iommu memory\n");
3470 if (dmar_init_reserved_ranges()) {
3472 panic("tboot: Failed to reserve iommu ranges\n");
3476 init_no_remapping_devices();
3481 panic("tboot: Failed to initialize DMARs\n");
3482 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3483 put_iova_domain(&reserved_iova_list
);
3484 iommu_exit_mempool();
3488 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3490 init_timer(&unmap_timer
);
3491 #ifdef CONFIG_SWIOTLB
3494 dma_ops
= &intel_dma_ops
;
3496 init_iommu_pm_ops();
3498 register_iommu(&intel_iommu_ops
);
3500 bus_register_notifier(&pci_bus_type
, &device_nb
);
3505 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3506 struct pci_dev
*pdev
)
3508 struct pci_dev
*tmp
, *parent
;
3510 if (!iommu
|| !pdev
)
3513 /* dependent device detach */
3514 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3515 /* Secondary interface's bus number and devfn 0 */
3517 parent
= pdev
->bus
->self
;
3518 while (parent
!= tmp
) {
3519 iommu_detach_dev(iommu
, parent
->bus
->number
,
3521 parent
= parent
->bus
->self
;
3523 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
3524 iommu_detach_dev(iommu
,
3525 tmp
->subordinate
->number
, 0);
3526 else /* this is a legacy PCI bridge */
3527 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3532 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3533 struct pci_dev
*pdev
)
3535 struct device_domain_info
*info
;
3536 struct intel_iommu
*iommu
;
3537 unsigned long flags
;
3539 struct list_head
*entry
, *tmp
;
3541 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3546 spin_lock_irqsave(&device_domain_lock
, flags
);
3547 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3548 info
= list_entry(entry
, struct device_domain_info
, link
);
3549 if (info
->segment
== pci_domain_nr(pdev
->bus
) &&
3550 info
->bus
== pdev
->bus
->number
&&
3551 info
->devfn
== pdev
->devfn
) {
3552 list_del(&info
->link
);
3553 list_del(&info
->global
);
3555 info
->dev
->dev
.archdata
.iommu
= NULL
;
3556 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3558 iommu_disable_dev_iotlb(info
);
3559 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3560 iommu_detach_dependent_devices(iommu
, pdev
);
3561 free_devinfo_mem(info
);
3563 spin_lock_irqsave(&device_domain_lock
, flags
);
3571 /* if there is no other devices under the same iommu
3572 * owned by this domain, clear this iommu in iommu_bmp
3573 * update iommu count and coherency
3575 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3580 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3583 unsigned long tmp_flags
;
3584 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3585 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3586 domain
->iommu_count
--;
3587 domain_update_iommu_cap(domain
);
3588 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3590 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3591 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)) {
3592 spin_lock_irqsave(&iommu
->lock
, tmp_flags
);
3593 clear_bit(domain
->id
, iommu
->domain_ids
);
3594 iommu
->domains
[domain
->id
] = NULL
;
3595 spin_unlock_irqrestore(&iommu
->lock
, tmp_flags
);
3600 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3602 struct device_domain_info
*info
;
3603 struct intel_iommu
*iommu
;
3604 unsigned long flags1
, flags2
;
3606 spin_lock_irqsave(&device_domain_lock
, flags1
);
3607 while (!list_empty(&domain
->devices
)) {
3608 info
= list_entry(domain
->devices
.next
,
3609 struct device_domain_info
, link
);
3610 list_del(&info
->link
);
3611 list_del(&info
->global
);
3613 info
->dev
->dev
.archdata
.iommu
= NULL
;
3615 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3617 iommu_disable_dev_iotlb(info
);
3618 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3619 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3620 iommu_detach_dependent_devices(iommu
, info
->dev
);
3622 /* clear this iommu in iommu_bmp, update iommu count
3625 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3626 if (test_and_clear_bit(iommu
->seq_id
,
3627 &domain
->iommu_bmp
)) {
3628 domain
->iommu_count
--;
3629 domain_update_iommu_cap(domain
);
3631 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3633 free_devinfo_mem(info
);
3634 spin_lock_irqsave(&device_domain_lock
, flags1
);
3636 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3639 /* domain id for virtual machine, it won't be set in context */
3640 static unsigned long vm_domid
;
3642 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3644 struct dmar_domain
*domain
;
3646 domain
= alloc_domain_mem();
3650 domain
->id
= vm_domid
++;
3652 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3653 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3658 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3662 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3663 spin_lock_init(&domain
->iommu_lock
);
3665 domain_reserve_special_ranges(domain
);
3667 /* calculate AGAW */
3668 domain
->gaw
= guest_width
;
3669 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3670 domain
->agaw
= width_to_agaw(adjust_width
);
3672 INIT_LIST_HEAD(&domain
->devices
);
3674 domain
->iommu_count
= 0;
3675 domain
->iommu_coherency
= 0;
3676 domain
->iommu_snooping
= 0;
3677 domain
->iommu_superpage
= 0;
3678 domain
->max_addr
= 0;
3681 /* always allocate the top pgd */
3682 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
3685 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3689 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3691 unsigned long flags
;
3692 struct dmar_drhd_unit
*drhd
;
3693 struct intel_iommu
*iommu
;
3695 unsigned long ndomains
;
3697 for_each_drhd_unit(drhd
) {
3700 iommu
= drhd
->iommu
;
3702 ndomains
= cap_ndoms(iommu
->cap
);
3703 for_each_set_bit(i
, iommu
->domain_ids
, ndomains
) {
3704 if (iommu
->domains
[i
] == domain
) {
3705 spin_lock_irqsave(&iommu
->lock
, flags
);
3706 clear_bit(i
, iommu
->domain_ids
);
3707 iommu
->domains
[i
] = NULL
;
3708 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3715 static void vm_domain_exit(struct dmar_domain
*domain
)
3717 /* Domain 0 is reserved, so dont process it */
3721 vm_domain_remove_all_dev_info(domain
);
3723 put_iova_domain(&domain
->iovad
);
3726 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3728 /* free page tables */
3729 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3731 iommu_free_vm_domain(domain
);
3732 free_domain_mem(domain
);
3735 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3737 struct dmar_domain
*dmar_domain
;
3739 dmar_domain
= iommu_alloc_vm_domain();
3742 "intel_iommu_domain_init: dmar_domain == NULL\n");
3745 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3747 "intel_iommu_domain_init() failed\n");
3748 vm_domain_exit(dmar_domain
);
3751 domain_update_iommu_cap(dmar_domain
);
3752 domain
->priv
= dmar_domain
;
3757 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3759 struct dmar_domain
*dmar_domain
= domain
->priv
;
3761 domain
->priv
= NULL
;
3762 vm_domain_exit(dmar_domain
);
3765 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3768 struct dmar_domain
*dmar_domain
= domain
->priv
;
3769 struct pci_dev
*pdev
= to_pci_dev(dev
);
3770 struct intel_iommu
*iommu
;
3773 /* normally pdev is not mapped */
3774 if (unlikely(domain_context_mapped(pdev
))) {
3775 struct dmar_domain
*old_domain
;
3777 old_domain
= find_domain(pdev
);
3779 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3780 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3781 domain_remove_one_dev_info(old_domain
, pdev
);
3783 domain_remove_dev_info(old_domain
);
3787 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3792 /* check if this iommu agaw is sufficient for max mapped address */
3793 addr_width
= agaw_to_width(iommu
->agaw
);
3794 if (addr_width
> cap_mgaw(iommu
->cap
))
3795 addr_width
= cap_mgaw(iommu
->cap
);
3797 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
3798 printk(KERN_ERR
"%s: iommu width (%d) is not "
3799 "sufficient for the mapped address (%llx)\n",
3800 __func__
, addr_width
, dmar_domain
->max_addr
);
3803 dmar_domain
->gaw
= addr_width
;
3806 * Knock out extra levels of page tables if necessary
3808 while (iommu
->agaw
< dmar_domain
->agaw
) {
3809 struct dma_pte
*pte
;
3811 pte
= dmar_domain
->pgd
;
3812 if (dma_pte_present(pte
)) {
3813 dmar_domain
->pgd
= (struct dma_pte
*)
3814 phys_to_virt(dma_pte_addr(pte
));
3815 free_pgtable_page(pte
);
3817 dmar_domain
->agaw
--;
3820 return domain_add_dev_info(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3823 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3826 struct dmar_domain
*dmar_domain
= domain
->priv
;
3827 struct pci_dev
*pdev
= to_pci_dev(dev
);
3829 domain_remove_one_dev_info(dmar_domain
, pdev
);
3832 static int intel_iommu_map(struct iommu_domain
*domain
,
3833 unsigned long iova
, phys_addr_t hpa
,
3834 int gfp_order
, int iommu_prot
)
3836 struct dmar_domain
*dmar_domain
= domain
->priv
;
3842 if (iommu_prot
& IOMMU_READ
)
3843 prot
|= DMA_PTE_READ
;
3844 if (iommu_prot
& IOMMU_WRITE
)
3845 prot
|= DMA_PTE_WRITE
;
3846 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3847 prot
|= DMA_PTE_SNP
;
3849 size
= PAGE_SIZE
<< gfp_order
;
3850 max_addr
= iova
+ size
;
3851 if (dmar_domain
->max_addr
< max_addr
) {
3854 /* check if minimum agaw is sufficient for mapped address */
3855 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
3856 if (end
< max_addr
) {
3857 printk(KERN_ERR
"%s: iommu width (%d) is not "
3858 "sufficient for the mapped address (%llx)\n",
3859 __func__
, dmar_domain
->gaw
, max_addr
);
3862 dmar_domain
->max_addr
= max_addr
;
3864 /* Round up size to next multiple of PAGE_SIZE, if it and
3865 the low bits of hpa would take us onto the next page */
3866 size
= aligned_nrpages(hpa
, size
);
3867 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3868 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
3872 static int intel_iommu_unmap(struct iommu_domain
*domain
,
3873 unsigned long iova
, int gfp_order
)
3875 struct dmar_domain
*dmar_domain
= domain
->priv
;
3876 size_t size
= PAGE_SIZE
<< gfp_order
;
3879 order
= dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3880 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
3882 if (dmar_domain
->max_addr
== iova
+ size
)
3883 dmar_domain
->max_addr
= iova
;
3888 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3891 struct dmar_domain
*dmar_domain
= domain
->priv
;
3892 struct dma_pte
*pte
;
3895 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, 0);
3897 phys
= dma_pte_addr(pte
);
3902 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3905 struct dmar_domain
*dmar_domain
= domain
->priv
;
3907 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3908 return dmar_domain
->iommu_snooping
;
3909 if (cap
== IOMMU_CAP_INTR_REMAP
)
3910 return intr_remapping_enabled
;
3915 static struct iommu_ops intel_iommu_ops
= {
3916 .domain_init
= intel_iommu_domain_init
,
3917 .domain_destroy
= intel_iommu_domain_destroy
,
3918 .attach_dev
= intel_iommu_attach_device
,
3919 .detach_dev
= intel_iommu_detach_device
,
3920 .map
= intel_iommu_map
,
3921 .unmap
= intel_iommu_unmap
,
3922 .iova_to_phys
= intel_iommu_iova_to_phys
,
3923 .domain_has_cap
= intel_iommu_domain_has_cap
,
3926 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3929 * Mobile 4 Series Chipset neglects to set RWBF capability,
3932 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3935 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
3936 if (dev
->revision
== 0x07) {
3937 printk(KERN_INFO
"DMAR: Disabling IOMMU for graphics on this chipset\n");
3942 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
3945 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
3946 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3947 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
3948 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
3949 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3950 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3951 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3952 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3954 static void __devinit
quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
3958 if (pci_read_config_word(dev
, GGC
, &ggc
))
3961 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
3962 printk(KERN_INFO
"DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3964 } else if (dmar_map_gfx
) {
3965 /* we have to ensure the gfx device is idle before we flush */
3966 printk(KERN_INFO
"DMAR: Disabling batched IOTLB flush on Ironlake\n");
3967 intel_iommu_strict
= 1;
3970 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
3971 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
3972 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
3973 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
3975 /* On Tylersburg chipsets, some BIOSes have been known to enable the
3976 ISOCH DMAR unit for the Azalia sound device, but not give it any
3977 TLB entries, which causes it to deadlock. Check for that. We do
3978 this in a function called from init_dmars(), instead of in a PCI
3979 quirk, because we don't want to print the obnoxious "BIOS broken"
3980 message if VT-d is actually disabled.
3982 static void __init
check_tylersburg_isoch(void)
3984 struct pci_dev
*pdev
;
3985 uint32_t vtisochctrl
;
3987 /* If there's no Azalia in the system anyway, forget it. */
3988 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
3993 /* System Management Registers. Might be hidden, in which case
3994 we can't do the sanity check. But that's OK, because the
3995 known-broken BIOSes _don't_ actually hide it, so far. */
3996 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4000 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4007 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4008 if (vtisochctrl
& 1)
4011 /* Drop all bits other than the number of TLB entries */
4012 vtisochctrl
&= 0x1c;
4014 /* If we have the recommended number of TLB entries (16), fine. */
4015 if (vtisochctrl
== 0x10)
4018 /* Zero TLB entries? You get to ride the short bus to school. */
4020 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4021 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4022 dmi_get_system_info(DMI_BIOS_VENDOR
),
4023 dmi_get_system_info(DMI_BIOS_VERSION
),
4024 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4025 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4029 printk(KERN_WARNING
"DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",