2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <asm/cacheflush.h>
40 #include <asm/iommu.h>
43 #define ROOT_SIZE VTD_PAGE_SIZE
44 #define CONTEXT_SIZE VTD_PAGE_SIZE
46 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
47 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49 #define IOAPIC_RANGE_START (0xfee00000)
50 #define IOAPIC_RANGE_END (0xfeefffff)
51 #define IOVA_START_ADDR (0x1000)
53 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
57 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
58 #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
59 #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
61 /* global iommu list, set NULL for ignored DMAR units */
62 static struct intel_iommu
**g_iommus
;
64 static int rwbf_quirk
;
69 * 12-63: Context Ptr (12 - (haw-1))
76 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
77 static inline bool root_present(struct root_entry
*root
)
79 return (root
->val
& 1);
81 static inline void set_root_present(struct root_entry
*root
)
85 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
87 root
->val
|= value
& VTD_PAGE_MASK
;
90 static inline struct context_entry
*
91 get_context_addr_from_root(struct root_entry
*root
)
93 return (struct context_entry
*)
94 (root_present(root
)?phys_to_virt(
95 root
->val
& VTD_PAGE_MASK
) :
102 * 1: fault processing disable
103 * 2-3: translation type
104 * 12-63: address space root
110 struct context_entry
{
115 static inline bool context_present(struct context_entry
*context
)
117 return (context
->lo
& 1);
119 static inline void context_set_present(struct context_entry
*context
)
124 static inline void context_set_fault_enable(struct context_entry
*context
)
126 context
->lo
&= (((u64
)-1) << 2) | 1;
129 #define CONTEXT_TT_MULTI_LEVEL 0
131 static inline void context_set_translation_type(struct context_entry
*context
,
134 context
->lo
&= (((u64
)-1) << 4) | 3;
135 context
->lo
|= (value
& 3) << 2;
138 static inline void context_set_address_root(struct context_entry
*context
,
141 context
->lo
|= value
& VTD_PAGE_MASK
;
144 static inline void context_set_address_width(struct context_entry
*context
,
147 context
->hi
|= value
& 7;
150 static inline void context_set_domain_id(struct context_entry
*context
,
153 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
156 static inline void context_clear_entry(struct context_entry
*context
)
168 * 12-63: Host physcial address
174 static inline void dma_clear_pte(struct dma_pte
*pte
)
179 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
181 pte
->val
|= DMA_PTE_READ
;
184 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
186 pte
->val
|= DMA_PTE_WRITE
;
189 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
191 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
194 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
196 return (pte
->val
& VTD_PAGE_MASK
);
199 static inline void dma_set_pte_addr(struct dma_pte
*pte
, u64 addr
)
201 pte
->val
|= (addr
& VTD_PAGE_MASK
);
204 static inline bool dma_pte_present(struct dma_pte
*pte
)
206 return (pte
->val
& 3) != 0;
209 /* devices under the same p2p bridge are owned in one domain */
210 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
212 /* domain represents a virtual machine, more than one devices
213 * across iommus may be owned in one domain, e.g. kvm guest.
215 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
218 int id
; /* domain id */
219 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
221 struct list_head devices
; /* all devices' list */
222 struct iova_domain iovad
; /* iova's that belong to this domain */
224 struct dma_pte
*pgd
; /* virtual address */
225 spinlock_t mapping_lock
; /* page table lock */
226 int gaw
; /* max guest address width */
228 /* adjusted guest address width, 0 is level 2 30-bit */
231 int flags
; /* flags to find out type of domain */
233 int iommu_coherency
;/* indicate coherency of iommu access */
234 int iommu_count
; /* reference count of iommu */
235 spinlock_t iommu_lock
; /* protect iommu set in domain */
236 u64 max_addr
; /* maximum mapped address */
239 /* PCI domain-device relationship */
240 struct device_domain_info
{
241 struct list_head link
; /* link to domain siblings */
242 struct list_head global
; /* link to global list */
243 u8 bus
; /* PCI bus numer */
244 u8 devfn
; /* PCI devfn number */
245 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
246 struct dmar_domain
*domain
; /* pointer to domain */
249 static void flush_unmaps_timeout(unsigned long data
);
251 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
253 #define HIGH_WATER_MARK 250
254 struct deferred_flush_tables
{
256 struct iova
*iova
[HIGH_WATER_MARK
];
257 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
260 static struct deferred_flush_tables
*deferred_flush
;
262 /* bitmap for indexing intel_iommus */
263 static int g_num_of_iommus
;
265 static DEFINE_SPINLOCK(async_umap_flush_lock
);
266 static LIST_HEAD(unmaps_to_do
);
269 static long list_size
;
271 static void domain_remove_dev_info(struct dmar_domain
*domain
);
273 #ifdef CONFIG_DMAR_DEFAULT_ON
274 int dmar_disabled
= 0;
276 int dmar_disabled
= 1;
277 #endif /*CONFIG_DMAR_DEFAULT_ON*/
279 static int __initdata dmar_map_gfx
= 1;
280 static int dmar_forcedac
;
281 static int intel_iommu_strict
;
283 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
284 static DEFINE_SPINLOCK(device_domain_lock
);
285 static LIST_HEAD(device_domain_list
);
287 static struct iommu_ops intel_iommu_ops
;
289 static int __init
intel_iommu_setup(char *str
)
294 if (!strncmp(str
, "on", 2)) {
296 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
297 } else if (!strncmp(str
, "off", 3)) {
299 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
300 } else if (!strncmp(str
, "igfx_off", 8)) {
303 "Intel-IOMMU: disable GFX device mapping\n");
304 } else if (!strncmp(str
, "forcedac", 8)) {
306 "Intel-IOMMU: Forcing DAC for PCI devices\n");
308 } else if (!strncmp(str
, "strict", 6)) {
310 "Intel-IOMMU: disable batched IOTLB flush\n");
311 intel_iommu_strict
= 1;
314 str
+= strcspn(str
, ",");
320 __setup("intel_iommu=", intel_iommu_setup
);
322 static struct kmem_cache
*iommu_domain_cache
;
323 static struct kmem_cache
*iommu_devinfo_cache
;
324 static struct kmem_cache
*iommu_iova_cache
;
326 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
331 /* trying to avoid low memory issues */
332 flags
= current
->flags
& PF_MEMALLOC
;
333 current
->flags
|= PF_MEMALLOC
;
334 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
335 current
->flags
&= (~PF_MEMALLOC
| flags
);
340 static inline void *alloc_pgtable_page(void)
345 /* trying to avoid low memory issues */
346 flags
= current
->flags
& PF_MEMALLOC
;
347 current
->flags
|= PF_MEMALLOC
;
348 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
349 current
->flags
&= (~PF_MEMALLOC
| flags
);
353 static inline void free_pgtable_page(void *vaddr
)
355 free_page((unsigned long)vaddr
);
358 static inline void *alloc_domain_mem(void)
360 return iommu_kmem_cache_alloc(iommu_domain_cache
);
363 static void free_domain_mem(void *vaddr
)
365 kmem_cache_free(iommu_domain_cache
, vaddr
);
368 static inline void * alloc_devinfo_mem(void)
370 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
373 static inline void free_devinfo_mem(void *vaddr
)
375 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
378 struct iova
*alloc_iova_mem(void)
380 return iommu_kmem_cache_alloc(iommu_iova_cache
);
383 void free_iova_mem(struct iova
*iova
)
385 kmem_cache_free(iommu_iova_cache
, iova
);
389 static inline int width_to_agaw(int width
);
391 /* calculate agaw for each iommu.
392 * "SAGAW" may be different across iommus, use a default agaw, and
393 * get a supported less agaw for iommus that don't support the default agaw.
395 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
400 sagaw
= cap_sagaw(iommu
->cap
);
401 for (agaw
= width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH
);
403 if (test_bit(agaw
, &sagaw
))
410 /* in native case, each domain is related to only one iommu */
411 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
415 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
417 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
418 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
421 return g_iommus
[iommu_id
];
424 /* "Coherency" capability may be different across iommus */
425 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
429 domain
->iommu_coherency
= 1;
431 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
432 for (; i
< g_num_of_iommus
; ) {
433 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
434 domain
->iommu_coherency
= 0;
437 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
441 static struct intel_iommu
*device_to_iommu(u8 bus
, u8 devfn
)
443 struct dmar_drhd_unit
*drhd
= NULL
;
446 for_each_drhd_unit(drhd
) {
450 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
451 if (drhd
->devices
[i
] &&
452 drhd
->devices
[i
]->bus
->number
== bus
&&
453 drhd
->devices
[i
]->devfn
== devfn
)
455 if (drhd
->devices
[i
] &&
456 drhd
->devices
[i
]->subordinate
&&
457 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
458 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
462 if (drhd
->include_all
)
469 static void domain_flush_cache(struct dmar_domain
*domain
,
470 void *addr
, int size
)
472 if (!domain
->iommu_coherency
)
473 clflush_cache_range(addr
, size
);
476 /* Gets context entry for a given bus and devfn */
477 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
480 struct root_entry
*root
;
481 struct context_entry
*context
;
482 unsigned long phy_addr
;
485 spin_lock_irqsave(&iommu
->lock
, flags
);
486 root
= &iommu
->root_entry
[bus
];
487 context
= get_context_addr_from_root(root
);
489 context
= (struct context_entry
*)alloc_pgtable_page();
491 spin_unlock_irqrestore(&iommu
->lock
, flags
);
494 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
495 phy_addr
= virt_to_phys((void *)context
);
496 set_root_value(root
, phy_addr
);
497 set_root_present(root
);
498 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
500 spin_unlock_irqrestore(&iommu
->lock
, flags
);
501 return &context
[devfn
];
504 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
506 struct root_entry
*root
;
507 struct context_entry
*context
;
511 spin_lock_irqsave(&iommu
->lock
, flags
);
512 root
= &iommu
->root_entry
[bus
];
513 context
= get_context_addr_from_root(root
);
518 ret
= context_present(&context
[devfn
]);
520 spin_unlock_irqrestore(&iommu
->lock
, flags
);
524 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
526 struct root_entry
*root
;
527 struct context_entry
*context
;
530 spin_lock_irqsave(&iommu
->lock
, flags
);
531 root
= &iommu
->root_entry
[bus
];
532 context
= get_context_addr_from_root(root
);
534 context_clear_entry(&context
[devfn
]);
535 __iommu_flush_cache(iommu
, &context
[devfn
], \
538 spin_unlock_irqrestore(&iommu
->lock
, flags
);
541 static void free_context_table(struct intel_iommu
*iommu
)
543 struct root_entry
*root
;
546 struct context_entry
*context
;
548 spin_lock_irqsave(&iommu
->lock
, flags
);
549 if (!iommu
->root_entry
) {
552 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
553 root
= &iommu
->root_entry
[i
];
554 context
= get_context_addr_from_root(root
);
556 free_pgtable_page(context
);
558 free_pgtable_page(iommu
->root_entry
);
559 iommu
->root_entry
= NULL
;
561 spin_unlock_irqrestore(&iommu
->lock
, flags
);
564 /* page table handling */
565 #define LEVEL_STRIDE (9)
566 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
568 static inline int agaw_to_level(int agaw
)
573 static inline int agaw_to_width(int agaw
)
575 return 30 + agaw
* LEVEL_STRIDE
;
579 static inline int width_to_agaw(int width
)
581 return (width
- 30) / LEVEL_STRIDE
;
584 static inline unsigned int level_to_offset_bits(int level
)
586 return (12 + (level
- 1) * LEVEL_STRIDE
);
589 static inline int address_level_offset(u64 addr
, int level
)
591 return ((addr
>> level_to_offset_bits(level
)) & LEVEL_MASK
);
594 static inline u64
level_mask(int level
)
596 return ((u64
)-1 << level_to_offset_bits(level
));
599 static inline u64
level_size(int level
)
601 return ((u64
)1 << level_to_offset_bits(level
));
604 static inline u64
align_to_level(u64 addr
, int level
)
606 return ((addr
+ level_size(level
) - 1) & level_mask(level
));
609 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
611 int addr_width
= agaw_to_width(domain
->agaw
);
612 struct dma_pte
*parent
, *pte
= NULL
;
613 int level
= agaw_to_level(domain
->agaw
);
617 BUG_ON(!domain
->pgd
);
619 addr
&= (((u64
)1) << addr_width
) - 1;
620 parent
= domain
->pgd
;
622 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
626 offset
= address_level_offset(addr
, level
);
627 pte
= &parent
[offset
];
631 if (!dma_pte_present(pte
)) {
632 tmp_page
= alloc_pgtable_page();
635 spin_unlock_irqrestore(&domain
->mapping_lock
,
639 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
640 dma_set_pte_addr(pte
, virt_to_phys(tmp_page
));
642 * high level table always sets r/w, last level page
643 * table control read/write
645 dma_set_pte_readable(pte
);
646 dma_set_pte_writable(pte
);
647 domain_flush_cache(domain
, pte
, sizeof(*pte
));
649 parent
= phys_to_virt(dma_pte_addr(pte
));
653 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
657 /* return address's pte at specific level */
658 static struct dma_pte
*dma_addr_level_pte(struct dmar_domain
*domain
, u64 addr
,
661 struct dma_pte
*parent
, *pte
= NULL
;
662 int total
= agaw_to_level(domain
->agaw
);
665 parent
= domain
->pgd
;
666 while (level
<= total
) {
667 offset
= address_level_offset(addr
, total
);
668 pte
= &parent
[offset
];
672 if (!dma_pte_present(pte
))
674 parent
= phys_to_virt(dma_pte_addr(pte
));
680 /* clear one page's page table */
681 static void dma_pte_clear_one(struct dmar_domain
*domain
, u64 addr
)
683 struct dma_pte
*pte
= NULL
;
685 /* get last level pte */
686 pte
= dma_addr_level_pte(domain
, addr
, 1);
690 domain_flush_cache(domain
, pte
, sizeof(*pte
));
694 /* clear last level pte, a tlb flush should be followed */
695 static void dma_pte_clear_range(struct dmar_domain
*domain
, u64 start
, u64 end
)
697 int addr_width
= agaw_to_width(domain
->agaw
);
699 start
&= (((u64
)1) << addr_width
) - 1;
700 end
&= (((u64
)1) << addr_width
) - 1;
701 /* in case it's partial page */
702 start
= PAGE_ALIGN(start
);
705 /* we don't need lock here, nobody else touches the iova range */
706 while (start
< end
) {
707 dma_pte_clear_one(domain
, start
);
708 start
+= VTD_PAGE_SIZE
;
712 /* free page table pages. last level pte should already be cleared */
713 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
716 int addr_width
= agaw_to_width(domain
->agaw
);
718 int total
= agaw_to_level(domain
->agaw
);
722 start
&= (((u64
)1) << addr_width
) - 1;
723 end
&= (((u64
)1) << addr_width
) - 1;
725 /* we don't need lock here, nobody else touches the iova range */
727 while (level
<= total
) {
728 tmp
= align_to_level(start
, level
);
729 if (tmp
>= end
|| (tmp
+ level_size(level
) > end
))
733 pte
= dma_addr_level_pte(domain
, tmp
, level
);
736 phys_to_virt(dma_pte_addr(pte
)));
738 domain_flush_cache(domain
, pte
, sizeof(*pte
));
740 tmp
+= level_size(level
);
745 if (start
== 0 && end
>= ((((u64
)1) << addr_width
) - 1)) {
746 free_pgtable_page(domain
->pgd
);
752 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
754 struct root_entry
*root
;
757 root
= (struct root_entry
*)alloc_pgtable_page();
761 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
763 spin_lock_irqsave(&iommu
->lock
, flags
);
764 iommu
->root_entry
= root
;
765 spin_unlock_irqrestore(&iommu
->lock
, flags
);
770 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
776 addr
= iommu
->root_entry
;
778 spin_lock_irqsave(&iommu
->register_lock
, flag
);
779 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
781 cmd
= iommu
->gcmd
| DMA_GCMD_SRTP
;
782 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
784 /* Make sure hardware complete it */
785 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
786 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
788 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
791 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
796 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
798 val
= iommu
->gcmd
| DMA_GCMD_WBF
;
800 spin_lock_irqsave(&iommu
->register_lock
, flag
);
801 writel(val
, iommu
->reg
+ DMAR_GCMD_REG
);
803 /* Make sure hardware complete it */
804 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
805 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
807 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
810 /* return value determine if we need a write buffer flush */
811 static int __iommu_flush_context(struct intel_iommu
*iommu
,
812 u16 did
, u16 source_id
, u8 function_mask
, u64 type
,
813 int non_present_entry_flush
)
819 * In the non-present entry flush case, if hardware doesn't cache
820 * non-present entry we do nothing and if hardware cache non-present
821 * entry, we flush entries of domain 0 (the domain id is used to cache
822 * any non-present entries)
824 if (non_present_entry_flush
) {
825 if (!cap_caching_mode(iommu
->cap
))
832 case DMA_CCMD_GLOBAL_INVL
:
833 val
= DMA_CCMD_GLOBAL_INVL
;
835 case DMA_CCMD_DOMAIN_INVL
:
836 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
838 case DMA_CCMD_DEVICE_INVL
:
839 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
840 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
847 spin_lock_irqsave(&iommu
->register_lock
, flag
);
848 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
850 /* Make sure hardware complete it */
851 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
852 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
854 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
856 /* flush context entry will implicitly flush write buffer */
860 /* return value determine if we need a write buffer flush */
861 static int __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
862 u64 addr
, unsigned int size_order
, u64 type
,
863 int non_present_entry_flush
)
865 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
866 u64 val
= 0, val_iva
= 0;
870 * In the non-present entry flush case, if hardware doesn't cache
871 * non-present entry we do nothing and if hardware cache non-present
872 * entry, we flush entries of domain 0 (the domain id is used to cache
873 * any non-present entries)
875 if (non_present_entry_flush
) {
876 if (!cap_caching_mode(iommu
->cap
))
883 case DMA_TLB_GLOBAL_FLUSH
:
884 /* global flush doesn't need set IVA_REG */
885 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
887 case DMA_TLB_DSI_FLUSH
:
888 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
890 case DMA_TLB_PSI_FLUSH
:
891 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
892 /* Note: always flush non-leaf currently */
893 val_iva
= size_order
| addr
;
898 /* Note: set drain read/write */
901 * This is probably to be super secure.. Looks like we can
902 * ignore it without any impact.
904 if (cap_read_drain(iommu
->cap
))
905 val
|= DMA_TLB_READ_DRAIN
;
907 if (cap_write_drain(iommu
->cap
))
908 val
|= DMA_TLB_WRITE_DRAIN
;
910 spin_lock_irqsave(&iommu
->register_lock
, flag
);
911 /* Note: Only uses first TLB reg currently */
913 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
914 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
916 /* Make sure hardware complete it */
917 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
918 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
920 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
922 /* check IOTLB invalidation granularity */
923 if (DMA_TLB_IAIG(val
) == 0)
924 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
925 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
926 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
927 (unsigned long long)DMA_TLB_IIRG(type
),
928 (unsigned long long)DMA_TLB_IAIG(val
));
929 /* flush iotlb entry will implicitly flush write buffer */
933 static int iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
934 u64 addr
, unsigned int pages
, int non_present_entry_flush
)
938 BUG_ON(addr
& (~VTD_PAGE_MASK
));
941 /* Fallback to domain selective flush if no PSI support */
942 if (!cap_pgsel_inv(iommu
->cap
))
943 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
945 non_present_entry_flush
);
948 * PSI requires page size to be 2 ^ x, and the base address is naturally
949 * aligned to the size
951 mask
= ilog2(__roundup_pow_of_two(pages
));
952 /* Fallback to domain selective flush if size is too big */
953 if (mask
> cap_max_amask_val(iommu
->cap
))
954 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
955 DMA_TLB_DSI_FLUSH
, non_present_entry_flush
);
957 return iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
959 non_present_entry_flush
);
962 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
967 spin_lock_irqsave(&iommu
->register_lock
, flags
);
968 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
969 pmen
&= ~DMA_PMEN_EPM
;
970 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
972 /* wait for the protected region status bit to clear */
973 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
974 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
976 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
979 static int iommu_enable_translation(struct intel_iommu
*iommu
)
984 spin_lock_irqsave(&iommu
->register_lock
, flags
);
985 writel(iommu
->gcmd
|DMA_GCMD_TE
, iommu
->reg
+ DMAR_GCMD_REG
);
987 /* Make sure hardware complete it */
988 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
989 readl
, (sts
& DMA_GSTS_TES
), sts
);
991 iommu
->gcmd
|= DMA_GCMD_TE
;
992 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
996 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1001 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1002 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1003 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1005 /* Make sure hardware complete it */
1006 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1007 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1009 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1014 static int iommu_init_domains(struct intel_iommu
*iommu
)
1016 unsigned long ndomains
;
1017 unsigned long nlongs
;
1019 ndomains
= cap_ndoms(iommu
->cap
);
1020 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1021 nlongs
= BITS_TO_LONGS(ndomains
);
1023 /* TBD: there might be 64K domains,
1024 * consider other allocation for future chip
1026 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1027 if (!iommu
->domain_ids
) {
1028 printk(KERN_ERR
"Allocating domain id array failed\n");
1031 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1033 if (!iommu
->domains
) {
1034 printk(KERN_ERR
"Allocating domain array failed\n");
1035 kfree(iommu
->domain_ids
);
1039 spin_lock_init(&iommu
->lock
);
1042 * if Caching mode is set, then invalid translations are tagged
1043 * with domainid 0. Hence we need to pre-allocate it.
1045 if (cap_caching_mode(iommu
->cap
))
1046 set_bit(0, iommu
->domain_ids
);
1051 static void domain_exit(struct dmar_domain
*domain
);
1052 static void vm_domain_exit(struct dmar_domain
*domain
);
1054 void free_dmar_iommu(struct intel_iommu
*iommu
)
1056 struct dmar_domain
*domain
;
1058 unsigned long flags
;
1060 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1061 for (; i
< cap_ndoms(iommu
->cap
); ) {
1062 domain
= iommu
->domains
[i
];
1063 clear_bit(i
, iommu
->domain_ids
);
1065 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1066 if (--domain
->iommu_count
== 0) {
1067 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1068 vm_domain_exit(domain
);
1070 domain_exit(domain
);
1072 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1074 i
= find_next_bit(iommu
->domain_ids
,
1075 cap_ndoms(iommu
->cap
), i
+1);
1078 if (iommu
->gcmd
& DMA_GCMD_TE
)
1079 iommu_disable_translation(iommu
);
1082 set_irq_data(iommu
->irq
, NULL
);
1083 /* This will mask the irq */
1084 free_irq(iommu
->irq
, iommu
);
1085 destroy_irq(iommu
->irq
);
1088 kfree(iommu
->domains
);
1089 kfree(iommu
->domain_ids
);
1091 g_iommus
[iommu
->seq_id
] = NULL
;
1093 /* if all iommus are freed, free g_iommus */
1094 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1099 if (i
== g_num_of_iommus
)
1102 /* free context mapping */
1103 free_context_table(iommu
);
1106 static struct dmar_domain
* iommu_alloc_domain(struct intel_iommu
*iommu
)
1109 unsigned long ndomains
;
1110 struct dmar_domain
*domain
;
1111 unsigned long flags
;
1113 domain
= alloc_domain_mem();
1117 ndomains
= cap_ndoms(iommu
->cap
);
1119 spin_lock_irqsave(&iommu
->lock
, flags
);
1120 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1121 if (num
>= ndomains
) {
1122 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1123 free_domain_mem(domain
);
1124 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1128 set_bit(num
, iommu
->domain_ids
);
1130 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1131 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1133 iommu
->domains
[num
] = domain
;
1134 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1139 static void iommu_free_domain(struct dmar_domain
*domain
)
1141 unsigned long flags
;
1142 struct intel_iommu
*iommu
;
1144 iommu
= domain_get_iommu(domain
);
1146 spin_lock_irqsave(&iommu
->lock
, flags
);
1147 clear_bit(domain
->id
, iommu
->domain_ids
);
1148 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1151 static struct iova_domain reserved_iova_list
;
1152 static struct lock_class_key reserved_alloc_key
;
1153 static struct lock_class_key reserved_rbtree_key
;
1155 static void dmar_init_reserved_ranges(void)
1157 struct pci_dev
*pdev
= NULL
;
1162 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1164 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1165 &reserved_alloc_key
);
1166 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1167 &reserved_rbtree_key
);
1169 /* IOAPIC ranges shouldn't be accessed by DMA */
1170 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1171 IOVA_PFN(IOAPIC_RANGE_END
));
1173 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1175 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1176 for_each_pci_dev(pdev
) {
1179 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1180 r
= &pdev
->resource
[i
];
1181 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1185 size
= r
->end
- addr
;
1186 size
= PAGE_ALIGN(size
);
1187 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1188 IOVA_PFN(size
+ addr
) - 1);
1190 printk(KERN_ERR
"Reserve iova failed\n");
1196 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1198 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1201 static inline int guestwidth_to_adjustwidth(int gaw
)
1204 int r
= (gaw
- 12) % 9;
1215 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1217 struct intel_iommu
*iommu
;
1218 int adjust_width
, agaw
;
1219 unsigned long sagaw
;
1221 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1222 spin_lock_init(&domain
->mapping_lock
);
1223 spin_lock_init(&domain
->iommu_lock
);
1225 domain_reserve_special_ranges(domain
);
1227 /* calculate AGAW */
1228 iommu
= domain_get_iommu(domain
);
1229 if (guest_width
> cap_mgaw(iommu
->cap
))
1230 guest_width
= cap_mgaw(iommu
->cap
);
1231 domain
->gaw
= guest_width
;
1232 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1233 agaw
= width_to_agaw(adjust_width
);
1234 sagaw
= cap_sagaw(iommu
->cap
);
1235 if (!test_bit(agaw
, &sagaw
)) {
1236 /* hardware doesn't support it, choose a bigger one */
1237 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1238 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1242 domain
->agaw
= agaw
;
1243 INIT_LIST_HEAD(&domain
->devices
);
1245 if (ecap_coherent(iommu
->ecap
))
1246 domain
->iommu_coherency
= 1;
1248 domain
->iommu_coherency
= 0;
1250 domain
->iommu_count
= 1;
1252 /* always allocate the top pgd */
1253 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1256 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1260 static void domain_exit(struct dmar_domain
*domain
)
1264 /* Domain 0 is reserved, so dont process it */
1268 domain_remove_dev_info(domain
);
1270 put_iova_domain(&domain
->iovad
);
1271 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
1272 end
= end
& (~PAGE_MASK
);
1275 dma_pte_clear_range(domain
, 0, end
);
1277 /* free page tables */
1278 dma_pte_free_pagetable(domain
, 0, end
);
1280 iommu_free_domain(domain
);
1281 free_domain_mem(domain
);
1284 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1287 struct context_entry
*context
;
1288 unsigned long flags
;
1289 struct intel_iommu
*iommu
;
1290 struct dma_pte
*pgd
;
1292 unsigned long ndomains
;
1296 pr_debug("Set context mapping for %02x:%02x.%d\n",
1297 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1298 BUG_ON(!domain
->pgd
);
1300 iommu
= device_to_iommu(bus
, devfn
);
1304 context
= device_to_context_entry(iommu
, bus
, devfn
);
1307 spin_lock_irqsave(&iommu
->lock
, flags
);
1308 if (context_present(context
)) {
1309 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1316 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) {
1319 /* find an available domain id for this device in iommu */
1320 ndomains
= cap_ndoms(iommu
->cap
);
1321 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1322 for (; num
< ndomains
; ) {
1323 if (iommu
->domains
[num
] == domain
) {
1328 num
= find_next_bit(iommu
->domain_ids
,
1329 cap_ndoms(iommu
->cap
), num
+1);
1333 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1334 if (num
>= ndomains
) {
1335 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1336 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1340 set_bit(num
, iommu
->domain_ids
);
1341 iommu
->domains
[num
] = domain
;
1345 /* Skip top levels of page tables for
1346 * iommu which has less agaw than default.
1348 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1349 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1350 if (!dma_pte_present(pgd
)) {
1351 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1357 context_set_domain_id(context
, id
);
1358 context_set_address_width(context
, iommu
->agaw
);
1359 context_set_address_root(context
, virt_to_phys(pgd
));
1360 context_set_translation_type(context
, CONTEXT_TT_MULTI_LEVEL
);
1361 context_set_fault_enable(context
);
1362 context_set_present(context
);
1363 domain_flush_cache(domain
, context
, sizeof(*context
));
1365 /* it's a non-present to present mapping */
1366 if (iommu
->flush
.flush_context(iommu
, domain
->id
,
1367 (((u16
)bus
) << 8) | devfn
, DMA_CCMD_MASK_NOBIT
,
1368 DMA_CCMD_DEVICE_INVL
, 1))
1369 iommu_flush_write_buffer(iommu
);
1371 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
, 0);
1373 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1375 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1376 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1377 domain
->iommu_count
++;
1378 domain_update_iommu_coherency(domain
);
1380 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1385 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
)
1388 struct pci_dev
*tmp
, *parent
;
1390 ret
= domain_context_mapping_one(domain
, pdev
->bus
->number
,
1395 /* dependent device mapping */
1396 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1399 /* Secondary interface's bus number and devfn 0 */
1400 parent
= pdev
->bus
->self
;
1401 while (parent
!= tmp
) {
1402 ret
= domain_context_mapping_one(domain
, parent
->bus
->number
,
1406 parent
= parent
->bus
->self
;
1408 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1409 return domain_context_mapping_one(domain
,
1410 tmp
->subordinate
->number
, 0);
1411 else /* this is a legacy PCI bridge */
1412 return domain_context_mapping_one(domain
,
1413 tmp
->bus
->number
, tmp
->devfn
);
1416 static int domain_context_mapped(struct pci_dev
*pdev
)
1419 struct pci_dev
*tmp
, *parent
;
1420 struct intel_iommu
*iommu
;
1422 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
1426 ret
= device_context_mapped(iommu
,
1427 pdev
->bus
->number
, pdev
->devfn
);
1430 /* dependent device mapping */
1431 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1434 /* Secondary interface's bus number and devfn 0 */
1435 parent
= pdev
->bus
->self
;
1436 while (parent
!= tmp
) {
1437 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1441 parent
= parent
->bus
->self
;
1444 return device_context_mapped(iommu
,
1445 tmp
->subordinate
->number
, 0);
1447 return device_context_mapped(iommu
,
1448 tmp
->bus
->number
, tmp
->devfn
);
1452 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1453 u64 hpa
, size_t size
, int prot
)
1455 u64 start_pfn
, end_pfn
;
1456 struct dma_pte
*pte
;
1458 int addr_width
= agaw_to_width(domain
->agaw
);
1460 hpa
&= (((u64
)1) << addr_width
) - 1;
1462 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1465 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1466 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1468 while (start_pfn
< end_pfn
) {
1469 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1472 /* We don't need lock here, nobody else
1473 * touches the iova range
1475 BUG_ON(dma_pte_addr(pte
));
1476 dma_set_pte_addr(pte
, start_pfn
<< VTD_PAGE_SHIFT
);
1477 dma_set_pte_prot(pte
, prot
);
1478 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1485 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1490 clear_context_table(iommu
, bus
, devfn
);
1491 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1492 DMA_CCMD_GLOBAL_INVL
, 0);
1493 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
1494 DMA_TLB_GLOBAL_FLUSH
, 0);
1497 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1499 struct device_domain_info
*info
;
1500 unsigned long flags
;
1501 struct intel_iommu
*iommu
;
1503 spin_lock_irqsave(&device_domain_lock
, flags
);
1504 while (!list_empty(&domain
->devices
)) {
1505 info
= list_entry(domain
->devices
.next
,
1506 struct device_domain_info
, link
);
1507 list_del(&info
->link
);
1508 list_del(&info
->global
);
1510 info
->dev
->dev
.archdata
.iommu
= NULL
;
1511 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1513 iommu
= device_to_iommu(info
->bus
, info
->devfn
);
1514 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1515 free_devinfo_mem(info
);
1517 spin_lock_irqsave(&device_domain_lock
, flags
);
1519 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1524 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1526 static struct dmar_domain
*
1527 find_domain(struct pci_dev
*pdev
)
1529 struct device_domain_info
*info
;
1531 /* No lock here, assumes no domain exit in normal case */
1532 info
= pdev
->dev
.archdata
.iommu
;
1534 return info
->domain
;
1538 /* domain is initialized */
1539 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1541 struct dmar_domain
*domain
, *found
= NULL
;
1542 struct intel_iommu
*iommu
;
1543 struct dmar_drhd_unit
*drhd
;
1544 struct device_domain_info
*info
, *tmp
;
1545 struct pci_dev
*dev_tmp
;
1546 unsigned long flags
;
1547 int bus
= 0, devfn
= 0;
1549 domain
= find_domain(pdev
);
1553 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1555 if (dev_tmp
->is_pcie
) {
1556 bus
= dev_tmp
->subordinate
->number
;
1559 bus
= dev_tmp
->bus
->number
;
1560 devfn
= dev_tmp
->devfn
;
1562 spin_lock_irqsave(&device_domain_lock
, flags
);
1563 list_for_each_entry(info
, &device_domain_list
, global
) {
1564 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1565 found
= info
->domain
;
1569 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1570 /* pcie-pci bridge already has a domain, uses it */
1577 /* Allocate new domain for the device */
1578 drhd
= dmar_find_matched_drhd_unit(pdev
);
1580 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1584 iommu
= drhd
->iommu
;
1586 domain
= iommu_alloc_domain(iommu
);
1590 if (domain_init(domain
, gaw
)) {
1591 domain_exit(domain
);
1595 /* register pcie-to-pci device */
1597 info
= alloc_devinfo_mem();
1599 domain_exit(domain
);
1603 info
->devfn
= devfn
;
1605 info
->domain
= domain
;
1606 /* This domain is shared by devices under p2p bridge */
1607 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1609 /* pcie-to-pci bridge already has a domain, uses it */
1611 spin_lock_irqsave(&device_domain_lock
, flags
);
1612 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1613 if (tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1614 found
= tmp
->domain
;
1619 free_devinfo_mem(info
);
1620 domain_exit(domain
);
1623 list_add(&info
->link
, &domain
->devices
);
1624 list_add(&info
->global
, &device_domain_list
);
1626 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1630 info
= alloc_devinfo_mem();
1633 info
->bus
= pdev
->bus
->number
;
1634 info
->devfn
= pdev
->devfn
;
1636 info
->domain
= domain
;
1637 spin_lock_irqsave(&device_domain_lock
, flags
);
1638 /* somebody is fast */
1639 found
= find_domain(pdev
);
1640 if (found
!= NULL
) {
1641 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1642 if (found
!= domain
) {
1643 domain_exit(domain
);
1646 free_devinfo_mem(info
);
1649 list_add(&info
->link
, &domain
->devices
);
1650 list_add(&info
->global
, &device_domain_list
);
1651 pdev
->dev
.archdata
.iommu
= info
;
1652 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1655 /* recheck it here, maybe others set it */
1656 return find_domain(pdev
);
1659 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1660 unsigned long long start
,
1661 unsigned long long end
)
1663 struct dmar_domain
*domain
;
1665 unsigned long long base
;
1669 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1670 pci_name(pdev
), start
, end
);
1671 /* page table init */
1672 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1676 /* The address might not be aligned */
1677 base
= start
& PAGE_MASK
;
1679 size
= PAGE_ALIGN(size
);
1680 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1681 IOVA_PFN(base
+ size
) - 1)) {
1682 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1687 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1688 size
, base
, pci_name(pdev
));
1690 * RMRR range might have overlap with physical memory range,
1693 dma_pte_clear_range(domain
, base
, base
+ size
);
1695 ret
= domain_page_mapping(domain
, base
, base
, size
,
1696 DMA_PTE_READ
|DMA_PTE_WRITE
);
1700 /* context entry init */
1701 ret
= domain_context_mapping(domain
, pdev
);
1705 domain_exit(domain
);
1710 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1711 struct pci_dev
*pdev
)
1713 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1715 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1716 rmrr
->end_address
+ 1);
1719 #ifdef CONFIG_DMAR_GFX_WA
1720 struct iommu_prepare_data
{
1721 struct pci_dev
*pdev
;
1725 static int __init
iommu_prepare_work_fn(unsigned long start_pfn
,
1726 unsigned long end_pfn
, void *datax
)
1728 struct iommu_prepare_data
*data
;
1730 data
= (struct iommu_prepare_data
*)datax
;
1732 data
->ret
= iommu_prepare_identity_map(data
->pdev
,
1733 start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
1738 static int __init
iommu_prepare_with_active_regions(struct pci_dev
*pdev
)
1741 struct iommu_prepare_data data
;
1746 for_each_online_node(nid
) {
1747 work_with_active_regions(nid
, iommu_prepare_work_fn
, &data
);
1754 static void __init
iommu_prepare_gfx_mapping(void)
1756 struct pci_dev
*pdev
= NULL
;
1759 for_each_pci_dev(pdev
) {
1760 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
||
1761 !IS_GFX_DEVICE(pdev
))
1763 printk(KERN_INFO
"IOMMU: gfx device %s 1-1 mapping\n",
1765 ret
= iommu_prepare_with_active_regions(pdev
);
1767 printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
1770 #else /* !CONFIG_DMAR_GFX_WA */
1771 static inline void iommu_prepare_gfx_mapping(void)
1777 #ifdef CONFIG_DMAR_FLOPPY_WA
1778 static inline void iommu_prepare_isa(void)
1780 struct pci_dev
*pdev
;
1783 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1787 printk(KERN_INFO
"IOMMU: Prepare 0-16M unity mapping for LPC\n");
1788 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1791 printk("IOMMU: Failed to create 0-64M identity map, "
1792 "floppy might not work\n");
1796 static inline void iommu_prepare_isa(void)
1800 #endif /* !CONFIG_DMAR_FLPY_WA */
1802 static int __init
init_dmars(void)
1804 struct dmar_drhd_unit
*drhd
;
1805 struct dmar_rmrr_unit
*rmrr
;
1806 struct pci_dev
*pdev
;
1807 struct intel_iommu
*iommu
;
1813 * initialize and program root entry to not present
1816 for_each_drhd_unit(drhd
) {
1819 * lock not needed as this is only incremented in the single
1820 * threaded kernel __init code path all other access are read
1825 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
1828 printk(KERN_ERR
"Allocating global iommu array failed\n");
1833 deferred_flush
= kzalloc(g_num_of_iommus
*
1834 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
1835 if (!deferred_flush
) {
1841 for_each_drhd_unit(drhd
) {
1845 iommu
= drhd
->iommu
;
1846 g_iommus
[iommu
->seq_id
] = iommu
;
1848 ret
= iommu_init_domains(iommu
);
1854 * we could share the same root & context tables
1855 * amoung all IOMMU's. Need to Split it later.
1857 ret
= iommu_alloc_root_entry(iommu
);
1859 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
1865 * Start from the sane iommu hardware state.
1867 for_each_drhd_unit(drhd
) {
1871 iommu
= drhd
->iommu
;
1874 * If the queued invalidation is already initialized by us
1875 * (for example, while enabling interrupt-remapping) then
1876 * we got the things already rolling from a sane state.
1882 * Clear any previous faults.
1884 dmar_fault(-1, iommu
);
1886 * Disable queued invalidation if supported and already enabled
1887 * before OS handover.
1889 dmar_disable_qi(iommu
);
1892 for_each_drhd_unit(drhd
) {
1896 iommu
= drhd
->iommu
;
1898 if (dmar_enable_qi(iommu
)) {
1900 * Queued Invalidate not enabled, use Register Based
1903 iommu
->flush
.flush_context
= __iommu_flush_context
;
1904 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
1905 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
1907 (unsigned long long)drhd
->reg_base_addr
);
1909 iommu
->flush
.flush_context
= qi_flush_context
;
1910 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
1911 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
1913 (unsigned long long)drhd
->reg_base_addr
);
1919 * for each dev attached to rmrr
1921 * locate drhd for dev, alloc domain for dev
1922 * allocate free domain
1923 * allocate page table entries for rmrr
1924 * if context not allocated for bus
1925 * allocate and init context
1926 * set present in root table for this bus
1927 * init context with domain, translation etc
1931 for_each_rmrr_units(rmrr
) {
1932 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
1933 pdev
= rmrr
->devices
[i
];
1934 /* some BIOS lists non-exist devices in DMAR table */
1937 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
1940 "IOMMU: mapping reserved region failed\n");
1944 iommu_prepare_gfx_mapping();
1946 iommu_prepare_isa();
1951 * global invalidate context cache
1952 * global invalidate iotlb
1953 * enable translation
1955 for_each_drhd_unit(drhd
) {
1958 iommu
= drhd
->iommu
;
1960 iommu_flush_write_buffer(iommu
);
1962 ret
= dmar_set_interrupt(iommu
);
1966 iommu_set_root_entry(iommu
);
1968 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
,
1970 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
,
1972 iommu_disable_protect_mem_regions(iommu
);
1974 ret
= iommu_enable_translation(iommu
);
1981 for_each_drhd_unit(drhd
) {
1984 iommu
= drhd
->iommu
;
1991 static inline u64
aligned_size(u64 host_addr
, size_t size
)
1994 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
1995 return PAGE_ALIGN(addr
);
1999 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
2003 /* Make sure it's in range */
2004 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
2005 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
2008 piova
= alloc_iova(&domain
->iovad
,
2009 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
2013 static struct iova
*
2014 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
2015 size_t size
, u64 dma_mask
)
2017 struct pci_dev
*pdev
= to_pci_dev(dev
);
2018 struct iova
*iova
= NULL
;
2020 if (dma_mask
<= DMA_32BIT_MASK
|| dmar_forcedac
)
2021 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2024 * First try to allocate an io virtual address in
2025 * DMA_32BIT_MASK and if that fails then try allocating
2028 iova
= iommu_alloc_iova(domain
, size
, DMA_32BIT_MASK
);
2030 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2034 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2041 static struct dmar_domain
*
2042 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2044 struct dmar_domain
*domain
;
2047 domain
= get_domain_for_dev(pdev
,
2048 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2051 "Allocating domain for %s failed", pci_name(pdev
));
2055 /* make sure context mapping is ok */
2056 if (unlikely(!domain_context_mapped(pdev
))) {
2057 ret
= domain_context_mapping(domain
, pdev
);
2060 "Domain context map for %s failed",
2069 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2070 size_t size
, int dir
, u64 dma_mask
)
2072 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2073 struct dmar_domain
*domain
;
2074 phys_addr_t start_paddr
;
2078 struct intel_iommu
*iommu
;
2080 BUG_ON(dir
== DMA_NONE
);
2081 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2084 domain
= get_valid_domain_for_dev(pdev
);
2088 iommu
= domain_get_iommu(domain
);
2089 size
= aligned_size((u64
)paddr
, size
);
2091 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2095 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2098 * Check if DMAR supports zero-length reads on write only
2101 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2102 !cap_zlr(iommu
->cap
))
2103 prot
|= DMA_PTE_READ
;
2104 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2105 prot
|= DMA_PTE_WRITE
;
2107 * paddr - (paddr + size) might be partial page, we should map the whole
2108 * page. Note: if two part of one page are separately mapped, we
2109 * might have two guest_addr mapping to the same host paddr, but this
2110 * is not a big problem
2112 ret
= domain_page_mapping(domain
, start_paddr
,
2113 ((u64
)paddr
) & PAGE_MASK
, size
, prot
);
2117 /* it's a non-present to present mapping */
2118 ret
= iommu_flush_iotlb_psi(iommu
, domain
->id
,
2119 start_paddr
, size
>> VTD_PAGE_SHIFT
, 1);
2121 iommu_flush_write_buffer(iommu
);
2123 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2127 __free_iova(&domain
->iovad
, iova
);
2128 printk(KERN_ERR
"Device %s request: %lx@%llx dir %d --- failed\n",
2129 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2133 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2134 unsigned long offset
, size_t size
,
2135 enum dma_data_direction dir
,
2136 struct dma_attrs
*attrs
)
2138 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2139 dir
, to_pci_dev(dev
)->dma_mask
);
2142 static void flush_unmaps(void)
2148 /* just flush them all */
2149 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2150 struct intel_iommu
*iommu
= g_iommus
[i
];
2154 if (deferred_flush
[i
].next
) {
2155 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2156 DMA_TLB_GLOBAL_FLUSH
, 0);
2157 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2158 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
,
2159 deferred_flush
[i
].iova
[j
]);
2161 deferred_flush
[i
].next
= 0;
2168 static void flush_unmaps_timeout(unsigned long data
)
2170 unsigned long flags
;
2172 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2174 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2177 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2179 unsigned long flags
;
2181 struct intel_iommu
*iommu
;
2183 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2184 if (list_size
== HIGH_WATER_MARK
)
2187 iommu
= domain_get_iommu(dom
);
2188 iommu_id
= iommu
->seq_id
;
2190 next
= deferred_flush
[iommu_id
].next
;
2191 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2192 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2193 deferred_flush
[iommu_id
].next
++;
2196 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2200 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2203 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2204 size_t size
, enum dma_data_direction dir
,
2205 struct dma_attrs
*attrs
)
2207 struct pci_dev
*pdev
= to_pci_dev(dev
);
2208 struct dmar_domain
*domain
;
2209 unsigned long start_addr
;
2211 struct intel_iommu
*iommu
;
2213 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2215 domain
= find_domain(pdev
);
2218 iommu
= domain_get_iommu(domain
);
2220 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2224 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2225 size
= aligned_size((u64
)dev_addr
, size
);
2227 pr_debug("Device %s unmapping: %lx@%llx\n",
2228 pci_name(pdev
), size
, (unsigned long long)start_addr
);
2230 /* clear the whole page */
2231 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2232 /* free page tables */
2233 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2234 if (intel_iommu_strict
) {
2235 if (iommu_flush_iotlb_psi(iommu
,
2236 domain
->id
, start_addr
, size
>> VTD_PAGE_SHIFT
, 0))
2237 iommu_flush_write_buffer(iommu
);
2239 __free_iova(&domain
->iovad
, iova
);
2241 add_unmap(domain
, iova
);
2243 * queue up the release of the unmap to save the 1/6th of the
2244 * cpu used up by the iotlb flush operation...
2249 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2252 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2255 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2256 dma_addr_t
*dma_handle
, gfp_t flags
)
2261 size
= PAGE_ALIGN(size
);
2262 order
= get_order(size
);
2263 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2265 vaddr
= (void *)__get_free_pages(flags
, order
);
2268 memset(vaddr
, 0, size
);
2270 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2272 hwdev
->coherent_dma_mask
);
2275 free_pages((unsigned long)vaddr
, order
);
2279 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2280 dma_addr_t dma_handle
)
2284 size
= PAGE_ALIGN(size
);
2285 order
= get_order(size
);
2287 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2288 free_pages((unsigned long)vaddr
, order
);
2291 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2293 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2294 int nelems
, enum dma_data_direction dir
,
2295 struct dma_attrs
*attrs
)
2298 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2299 struct dmar_domain
*domain
;
2300 unsigned long start_addr
;
2304 struct scatterlist
*sg
;
2305 struct intel_iommu
*iommu
;
2307 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2310 domain
= find_domain(pdev
);
2313 iommu
= domain_get_iommu(domain
);
2315 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2318 for_each_sg(sglist
, sg
, nelems
, i
) {
2319 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2320 size
+= aligned_size((u64
)addr
, sg
->length
);
2323 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2325 /* clear the whole page */
2326 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2327 /* free page tables */
2328 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2330 if (iommu_flush_iotlb_psi(iommu
, domain
->id
, start_addr
,
2331 size
>> VTD_PAGE_SHIFT
, 0))
2332 iommu_flush_write_buffer(iommu
);
2335 __free_iova(&domain
->iovad
, iova
);
2338 static int intel_nontranslate_map_sg(struct device
*hddev
,
2339 struct scatterlist
*sglist
, int nelems
, int dir
)
2342 struct scatterlist
*sg
;
2344 for_each_sg(sglist
, sg
, nelems
, i
) {
2345 BUG_ON(!sg_page(sg
));
2346 sg
->dma_address
= virt_to_bus(SG_ENT_VIRT_ADDRESS(sg
));
2347 sg
->dma_length
= sg
->length
;
2352 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2353 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2357 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2358 struct dmar_domain
*domain
;
2362 struct iova
*iova
= NULL
;
2364 struct scatterlist
*sg
;
2365 unsigned long start_addr
;
2366 struct intel_iommu
*iommu
;
2368 BUG_ON(dir
== DMA_NONE
);
2369 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2370 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2372 domain
= get_valid_domain_for_dev(pdev
);
2376 iommu
= domain_get_iommu(domain
);
2378 for_each_sg(sglist
, sg
, nelems
, i
) {
2379 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2380 addr
= (void *)virt_to_phys(addr
);
2381 size
+= aligned_size((u64
)addr
, sg
->length
);
2384 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2386 sglist
->dma_length
= 0;
2391 * Check if DMAR supports zero-length reads on write only
2394 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2395 !cap_zlr(iommu
->cap
))
2396 prot
|= DMA_PTE_READ
;
2397 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2398 prot
|= DMA_PTE_WRITE
;
2400 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2402 for_each_sg(sglist
, sg
, nelems
, i
) {
2403 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2404 addr
= (void *)virt_to_phys(addr
);
2405 size
= aligned_size((u64
)addr
, sg
->length
);
2406 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2407 ((u64
)addr
) & PAGE_MASK
,
2410 /* clear the page */
2411 dma_pte_clear_range(domain
, start_addr
,
2412 start_addr
+ offset
);
2413 /* free page tables */
2414 dma_pte_free_pagetable(domain
, start_addr
,
2415 start_addr
+ offset
);
2417 __free_iova(&domain
->iovad
, iova
);
2420 sg
->dma_address
= start_addr
+ offset
+
2421 ((u64
)addr
& (~PAGE_MASK
));
2422 sg
->dma_length
= sg
->length
;
2426 /* it's a non-present to present mapping */
2427 if (iommu_flush_iotlb_psi(iommu
, domain
->id
,
2428 start_addr
, offset
>> VTD_PAGE_SHIFT
, 1))
2429 iommu_flush_write_buffer(iommu
);
2433 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2438 struct dma_map_ops intel_dma_ops
= {
2439 .alloc_coherent
= intel_alloc_coherent
,
2440 .free_coherent
= intel_free_coherent
,
2441 .map_sg
= intel_map_sg
,
2442 .unmap_sg
= intel_unmap_sg
,
2443 .map_page
= intel_map_page
,
2444 .unmap_page
= intel_unmap_page
,
2445 .mapping_error
= intel_mapping_error
,
2448 static inline int iommu_domain_cache_init(void)
2452 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2453 sizeof(struct dmar_domain
),
2458 if (!iommu_domain_cache
) {
2459 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2466 static inline int iommu_devinfo_cache_init(void)
2470 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2471 sizeof(struct device_domain_info
),
2475 if (!iommu_devinfo_cache
) {
2476 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2483 static inline int iommu_iova_cache_init(void)
2487 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2488 sizeof(struct iova
),
2492 if (!iommu_iova_cache
) {
2493 printk(KERN_ERR
"Couldn't create iova cache\n");
2500 static int __init
iommu_init_mempool(void)
2503 ret
= iommu_iova_cache_init();
2507 ret
= iommu_domain_cache_init();
2511 ret
= iommu_devinfo_cache_init();
2515 kmem_cache_destroy(iommu_domain_cache
);
2517 kmem_cache_destroy(iommu_iova_cache
);
2522 static void __init
iommu_exit_mempool(void)
2524 kmem_cache_destroy(iommu_devinfo_cache
);
2525 kmem_cache_destroy(iommu_domain_cache
);
2526 kmem_cache_destroy(iommu_iova_cache
);
2530 static void __init
init_no_remapping_devices(void)
2532 struct dmar_drhd_unit
*drhd
;
2534 for_each_drhd_unit(drhd
) {
2535 if (!drhd
->include_all
) {
2537 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2538 if (drhd
->devices
[i
] != NULL
)
2540 /* ignore DMAR unit if no pci devices exist */
2541 if (i
== drhd
->devices_cnt
)
2549 for_each_drhd_unit(drhd
) {
2551 if (drhd
->ignored
|| drhd
->include_all
)
2554 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2555 if (drhd
->devices
[i
] &&
2556 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2559 if (i
< drhd
->devices_cnt
)
2562 /* bypass IOMMU if it is just for gfx devices */
2564 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2565 if (!drhd
->devices
[i
])
2567 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2572 int __init
intel_iommu_init(void)
2576 if (dmar_table_init())
2579 if (dmar_dev_scope_init())
2583 * Check the need for DMA-remapping initialization now.
2584 * Above initialization will also be used by Interrupt-remapping.
2586 if (no_iommu
|| swiotlb
|| dmar_disabled
)
2589 iommu_init_mempool();
2590 dmar_init_reserved_ranges();
2592 init_no_remapping_devices();
2596 printk(KERN_ERR
"IOMMU: dmar init failed\n");
2597 put_iova_domain(&reserved_iova_list
);
2598 iommu_exit_mempool();
2602 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2604 init_timer(&unmap_timer
);
2606 dma_ops
= &intel_dma_ops
;
2608 register_iommu(&intel_iommu_ops
);
2613 static int vm_domain_add_dev_info(struct dmar_domain
*domain
,
2614 struct pci_dev
*pdev
)
2616 struct device_domain_info
*info
;
2617 unsigned long flags
;
2619 info
= alloc_devinfo_mem();
2623 info
->bus
= pdev
->bus
->number
;
2624 info
->devfn
= pdev
->devfn
;
2626 info
->domain
= domain
;
2628 spin_lock_irqsave(&device_domain_lock
, flags
);
2629 list_add(&info
->link
, &domain
->devices
);
2630 list_add(&info
->global
, &device_domain_list
);
2631 pdev
->dev
.archdata
.iommu
= info
;
2632 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2637 static void vm_domain_remove_one_dev_info(struct dmar_domain
*domain
,
2638 struct pci_dev
*pdev
)
2640 struct device_domain_info
*info
;
2641 struct intel_iommu
*iommu
;
2642 unsigned long flags
;
2644 struct list_head
*entry
, *tmp
;
2646 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
2650 spin_lock_irqsave(&device_domain_lock
, flags
);
2651 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
2652 info
= list_entry(entry
, struct device_domain_info
, link
);
2653 if (info
->bus
== pdev
->bus
->number
&&
2654 info
->devfn
== pdev
->devfn
) {
2655 list_del(&info
->link
);
2656 list_del(&info
->global
);
2658 info
->dev
->dev
.archdata
.iommu
= NULL
;
2659 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2661 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2662 free_devinfo_mem(info
);
2664 spin_lock_irqsave(&device_domain_lock
, flags
);
2672 /* if there is no other devices under the same iommu
2673 * owned by this domain, clear this iommu in iommu_bmp
2674 * update iommu count and coherency
2676 if (device_to_iommu(info
->bus
, info
->devfn
) == iommu
)
2681 unsigned long tmp_flags
;
2682 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
2683 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
2684 domain
->iommu_count
--;
2685 domain_update_iommu_coherency(domain
);
2686 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
2689 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2692 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
2694 struct device_domain_info
*info
;
2695 struct intel_iommu
*iommu
;
2696 unsigned long flags1
, flags2
;
2698 spin_lock_irqsave(&device_domain_lock
, flags1
);
2699 while (!list_empty(&domain
->devices
)) {
2700 info
= list_entry(domain
->devices
.next
,
2701 struct device_domain_info
, link
);
2702 list_del(&info
->link
);
2703 list_del(&info
->global
);
2705 info
->dev
->dev
.archdata
.iommu
= NULL
;
2707 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2709 iommu
= device_to_iommu(info
->bus
, info
->devfn
);
2710 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2712 /* clear this iommu in iommu_bmp, update iommu count
2715 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
2716 if (test_and_clear_bit(iommu
->seq_id
,
2717 &domain
->iommu_bmp
)) {
2718 domain
->iommu_count
--;
2719 domain_update_iommu_coherency(domain
);
2721 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
2723 free_devinfo_mem(info
);
2724 spin_lock_irqsave(&device_domain_lock
, flags1
);
2726 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2729 /* domain id for virtual machine, it won't be set in context */
2730 static unsigned long vm_domid
;
2732 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
2735 int min_agaw
= domain
->agaw
;
2737 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
2738 for (; i
< g_num_of_iommus
; ) {
2739 if (min_agaw
> g_iommus
[i
]->agaw
)
2740 min_agaw
= g_iommus
[i
]->agaw
;
2742 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
2748 static struct dmar_domain
*iommu_alloc_vm_domain(void)
2750 struct dmar_domain
*domain
;
2752 domain
= alloc_domain_mem();
2756 domain
->id
= vm_domid
++;
2757 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
2758 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
2763 static int vm_domain_init(struct dmar_domain
*domain
, int guest_width
)
2767 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
2768 spin_lock_init(&domain
->mapping_lock
);
2769 spin_lock_init(&domain
->iommu_lock
);
2771 domain_reserve_special_ranges(domain
);
2773 /* calculate AGAW */
2774 domain
->gaw
= guest_width
;
2775 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
2776 domain
->agaw
= width_to_agaw(adjust_width
);
2778 INIT_LIST_HEAD(&domain
->devices
);
2780 domain
->iommu_count
= 0;
2781 domain
->iommu_coherency
= 0;
2782 domain
->max_addr
= 0;
2784 /* always allocate the top pgd */
2785 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
2788 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
2792 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
2794 unsigned long flags
;
2795 struct dmar_drhd_unit
*drhd
;
2796 struct intel_iommu
*iommu
;
2798 unsigned long ndomains
;
2800 for_each_drhd_unit(drhd
) {
2803 iommu
= drhd
->iommu
;
2805 ndomains
= cap_ndoms(iommu
->cap
);
2806 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
2807 for (; i
< ndomains
; ) {
2808 if (iommu
->domains
[i
] == domain
) {
2809 spin_lock_irqsave(&iommu
->lock
, flags
);
2810 clear_bit(i
, iommu
->domain_ids
);
2811 iommu
->domains
[i
] = NULL
;
2812 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2815 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
2820 static void vm_domain_exit(struct dmar_domain
*domain
)
2824 /* Domain 0 is reserved, so dont process it */
2828 vm_domain_remove_all_dev_info(domain
);
2830 put_iova_domain(&domain
->iovad
);
2831 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
2832 end
= end
& (~VTD_PAGE_MASK
);
2835 dma_pte_clear_range(domain
, 0, end
);
2837 /* free page tables */
2838 dma_pte_free_pagetable(domain
, 0, end
);
2840 iommu_free_vm_domain(domain
);
2841 free_domain_mem(domain
);
2844 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
2846 struct dmar_domain
*dmar_domain
;
2848 dmar_domain
= iommu_alloc_vm_domain();
2851 "intel_iommu_domain_init: dmar_domain == NULL\n");
2854 if (vm_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2856 "intel_iommu_domain_init() failed\n");
2857 vm_domain_exit(dmar_domain
);
2860 domain
->priv
= dmar_domain
;
2865 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
2867 struct dmar_domain
*dmar_domain
= domain
->priv
;
2869 domain
->priv
= NULL
;
2870 vm_domain_exit(dmar_domain
);
2873 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
2876 struct dmar_domain
*dmar_domain
= domain
->priv
;
2877 struct pci_dev
*pdev
= to_pci_dev(dev
);
2878 struct intel_iommu
*iommu
;
2883 /* normally pdev is not mapped */
2884 if (unlikely(domain_context_mapped(pdev
))) {
2885 struct dmar_domain
*old_domain
;
2887 old_domain
= find_domain(pdev
);
2889 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
2890 vm_domain_remove_one_dev_info(old_domain
, pdev
);
2892 domain_remove_dev_info(old_domain
);
2896 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
2900 /* check if this iommu agaw is sufficient for max mapped address */
2901 addr_width
= agaw_to_width(iommu
->agaw
);
2902 end
= DOMAIN_MAX_ADDR(addr_width
);
2903 end
= end
& VTD_PAGE_MASK
;
2904 if (end
< dmar_domain
->max_addr
) {
2905 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
2906 "sufficient for the mapped address (%llx)\n",
2907 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
2911 ret
= domain_context_mapping(dmar_domain
, pdev
);
2915 ret
= vm_domain_add_dev_info(dmar_domain
, pdev
);
2919 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
2922 struct dmar_domain
*dmar_domain
= domain
->priv
;
2923 struct pci_dev
*pdev
= to_pci_dev(dev
);
2925 vm_domain_remove_one_dev_info(dmar_domain
, pdev
);
2928 static int intel_iommu_map_range(struct iommu_domain
*domain
,
2929 unsigned long iova
, phys_addr_t hpa
,
2930 size_t size
, int iommu_prot
)
2932 struct dmar_domain
*dmar_domain
= domain
->priv
;
2938 if (iommu_prot
& IOMMU_READ
)
2939 prot
|= DMA_PTE_READ
;
2940 if (iommu_prot
& IOMMU_WRITE
)
2941 prot
|= DMA_PTE_WRITE
;
2943 max_addr
= (iova
& VTD_PAGE_MASK
) + VTD_PAGE_ALIGN(size
);
2944 if (dmar_domain
->max_addr
< max_addr
) {
2948 /* check if minimum agaw is sufficient for mapped address */
2949 min_agaw
= vm_domain_min_agaw(dmar_domain
);
2950 addr_width
= agaw_to_width(min_agaw
);
2951 end
= DOMAIN_MAX_ADDR(addr_width
);
2952 end
= end
& VTD_PAGE_MASK
;
2953 if (end
< max_addr
) {
2954 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
2955 "sufficient for the mapped address (%llx)\n",
2956 __func__
, min_agaw
, max_addr
);
2959 dmar_domain
->max_addr
= max_addr
;
2962 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
2966 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
2967 unsigned long iova
, size_t size
)
2969 struct dmar_domain
*dmar_domain
= domain
->priv
;
2972 /* The address might not be aligned */
2973 base
= iova
& VTD_PAGE_MASK
;
2974 size
= VTD_PAGE_ALIGN(size
);
2975 dma_pte_clear_range(dmar_domain
, base
, base
+ size
);
2977 if (dmar_domain
->max_addr
== base
+ size
)
2978 dmar_domain
->max_addr
= base
;
2981 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
2984 struct dmar_domain
*dmar_domain
= domain
->priv
;
2985 struct dma_pte
*pte
;
2988 pte
= addr_to_dma_pte(dmar_domain
, iova
);
2990 phys
= dma_pte_addr(pte
);
2995 static struct iommu_ops intel_iommu_ops
= {
2996 .domain_init
= intel_iommu_domain_init
,
2997 .domain_destroy
= intel_iommu_domain_destroy
,
2998 .attach_dev
= intel_iommu_attach_device
,
2999 .detach_dev
= intel_iommu_detach_device
,
3000 .map
= intel_iommu_map_range
,
3001 .unmap
= intel_iommu_unmap_range
,
3002 .iova_to_phys
= intel_iommu_iova_to_phys
,
3005 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3008 * Mobile 4 Series Chipset neglects to set RWBF capability,
3011 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3015 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);