2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
26 #include <linux/list.h>
27 #include <linux/kvm_host.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/stat.h>
31 #include <linux/dmar.h>
32 #include <linux/iommu.h>
33 #include <linux/intel-iommu.h>
34 #include "assigned-dev.h"
36 static bool allow_unsafe_assigned_interrupts
;
37 module_param_named(allow_unsafe_assigned_interrupts
,
38 allow_unsafe_assigned_interrupts
, bool, S_IRUGO
| S_IWUSR
);
39 MODULE_PARM_DESC(allow_unsafe_assigned_interrupts
,
40 "Enable device assignment on platforms without interrupt remapping support.");
42 static int kvm_iommu_unmap_memslots(struct kvm
*kvm
);
43 static void kvm_iommu_put_pages(struct kvm
*kvm
,
44 gfn_t base_gfn
, unsigned long npages
);
46 static pfn_t
kvm_pin_pages(struct kvm_memory_slot
*slot
, gfn_t gfn
,
52 pfn
= gfn_to_pfn_memslot(slot
, gfn
);
53 end_gfn
= gfn
+ npages
;
56 if (is_error_noslot_pfn(pfn
))
60 gfn_to_pfn_memslot(slot
, gfn
++);
65 static void kvm_unpin_pages(struct kvm
*kvm
, pfn_t pfn
, unsigned long npages
)
69 for (i
= 0; i
< npages
; ++i
)
70 kvm_release_pfn_clean(pfn
+ i
);
73 int kvm_iommu_map_pages(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
78 struct iommu_domain
*domain
= kvm
->arch
.iommu_domain
;
81 /* check if iommu exists and in use */
86 end_gfn
= gfn
+ slot
->npages
;
89 if (!(slot
->flags
& KVM_MEM_READONLY
))
91 if (!kvm
->arch
.iommu_noncoherent
)
95 while (gfn
< end_gfn
) {
96 unsigned long page_size
;
98 /* Check if already mapped */
99 if (iommu_iova_to_phys(domain
, gfn_to_gpa(gfn
))) {
104 /* Get the page size we could use to map */
105 page_size
= kvm_host_page_size(kvm
, gfn
);
107 /* Make sure the page_size does not exceed the memslot */
108 while ((gfn
+ (page_size
>> PAGE_SHIFT
)) > end_gfn
)
111 /* Make sure gfn is aligned to the page size we want to map */
112 while ((gfn
<< PAGE_SHIFT
) & (page_size
- 1))
115 /* Make sure hva is aligned to the page size we want to map */
116 while (__gfn_to_hva_memslot(slot
, gfn
) & (page_size
- 1))
120 * Pin all pages we are about to map in memory. This is
121 * important because we unmap and unpin in 4kb steps later.
123 pfn
= kvm_pin_pages(slot
, gfn
, page_size
>> PAGE_SHIFT
);
124 if (is_error_noslot_pfn(pfn
)) {
129 /* Map into IO address space */
130 r
= iommu_map(domain
, gfn_to_gpa(gfn
), pfn_to_hpa(pfn
),
133 printk(KERN_ERR
"kvm_iommu_map_address:"
134 "iommu failed to map pfn=%llx\n", pfn
);
135 kvm_unpin_pages(kvm
, pfn
, page_size
>> PAGE_SHIFT
);
139 gfn
+= page_size
>> PAGE_SHIFT
;
147 kvm_iommu_put_pages(kvm
, slot
->base_gfn
, gfn
- slot
->base_gfn
);
151 static int kvm_iommu_map_memslots(struct kvm
*kvm
)
154 struct kvm_memslots
*slots
;
155 struct kvm_memory_slot
*memslot
;
157 if (kvm
->arch
.iommu_noncoherent
)
158 kvm_arch_register_noncoherent_dma(kvm
);
160 idx
= srcu_read_lock(&kvm
->srcu
);
161 slots
= kvm_memslots(kvm
);
163 kvm_for_each_memslot(memslot
, slots
) {
164 r
= kvm_iommu_map_pages(kvm
, memslot
);
168 srcu_read_unlock(&kvm
->srcu
, idx
);
173 int kvm_assign_device(struct kvm
*kvm
, struct pci_dev
*pdev
)
175 struct iommu_domain
*domain
= kvm
->arch
.iommu_domain
;
179 /* check if iommu exists and in use */
186 r
= iommu_attach_device(domain
, &pdev
->dev
);
188 dev_err(&pdev
->dev
, "kvm assign device failed ret %d", r
);
192 noncoherent
= !iommu_capable(&pci_bus_type
, IOMMU_CAP_CACHE_COHERENCY
);
194 /* Check if need to update IOMMU page table for guest memory */
195 if (noncoherent
!= kvm
->arch
.iommu_noncoherent
) {
196 kvm_iommu_unmap_memslots(kvm
);
197 kvm
->arch
.iommu_noncoherent
= noncoherent
;
198 r
= kvm_iommu_map_memslots(kvm
);
203 kvm_arch_start_assignment(kvm
);
204 pci_set_dev_assigned(pdev
);
206 dev_info(&pdev
->dev
, "kvm assign device\n");
210 kvm_iommu_unmap_memslots(kvm
);
214 int kvm_deassign_device(struct kvm
*kvm
, struct pci_dev
*pdev
)
216 struct iommu_domain
*domain
= kvm
->arch
.iommu_domain
;
218 /* check if iommu exists and in use */
225 iommu_detach_device(domain
, &pdev
->dev
);
227 pci_clear_dev_assigned(pdev
);
228 kvm_arch_end_assignment(kvm
);
230 dev_info(&pdev
->dev
, "kvm deassign device\n");
235 int kvm_iommu_map_guest(struct kvm
*kvm
)
239 if (!iommu_present(&pci_bus_type
)) {
240 printk(KERN_ERR
"%s: iommu not found\n", __func__
);
244 mutex_lock(&kvm
->slots_lock
);
246 kvm
->arch
.iommu_domain
= iommu_domain_alloc(&pci_bus_type
);
247 if (!kvm
->arch
.iommu_domain
) {
252 if (!allow_unsafe_assigned_interrupts
&&
253 !iommu_capable(&pci_bus_type
, IOMMU_CAP_INTR_REMAP
)) {
254 printk(KERN_WARNING
"%s: No interrupt remapping support,"
255 " disallowing device assignment."
256 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
257 " module option.\n", __func__
);
258 iommu_domain_free(kvm
->arch
.iommu_domain
);
259 kvm
->arch
.iommu_domain
= NULL
;
264 r
= kvm_iommu_map_memslots(kvm
);
266 kvm_iommu_unmap_memslots(kvm
);
269 mutex_unlock(&kvm
->slots_lock
);
273 static void kvm_iommu_put_pages(struct kvm
*kvm
,
274 gfn_t base_gfn
, unsigned long npages
)
276 struct iommu_domain
*domain
;
281 domain
= kvm
->arch
.iommu_domain
;
282 end_gfn
= base_gfn
+ npages
;
285 /* check if iommu exists and in use */
289 while (gfn
< end_gfn
) {
290 unsigned long unmap_pages
;
293 /* Get physical address */
294 phys
= iommu_iova_to_phys(domain
, gfn_to_gpa(gfn
));
301 pfn
= phys
>> PAGE_SHIFT
;
303 /* Unmap address from IO address space */
304 size
= iommu_unmap(domain
, gfn_to_gpa(gfn
), PAGE_SIZE
);
305 unmap_pages
= 1ULL << get_order(size
);
307 /* Unpin all pages we just unmapped to not leak any memory */
308 kvm_unpin_pages(kvm
, pfn
, unmap_pages
);
316 void kvm_iommu_unmap_pages(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
318 kvm_iommu_put_pages(kvm
, slot
->base_gfn
, slot
->npages
);
321 static int kvm_iommu_unmap_memslots(struct kvm
*kvm
)
324 struct kvm_memslots
*slots
;
325 struct kvm_memory_slot
*memslot
;
327 idx
= srcu_read_lock(&kvm
->srcu
);
328 slots
= kvm_memslots(kvm
);
330 kvm_for_each_memslot(memslot
, slots
)
331 kvm_iommu_unmap_pages(kvm
, memslot
);
333 srcu_read_unlock(&kvm
->srcu
, idx
);
335 if (kvm
->arch
.iommu_noncoherent
)
336 kvm_arch_unregister_noncoherent_dma(kvm
);
341 int kvm_iommu_unmap_guest(struct kvm
*kvm
)
343 struct iommu_domain
*domain
= kvm
->arch
.iommu_domain
;
345 /* check if iommu exists and in use */
349 mutex_lock(&kvm
->slots_lock
);
350 kvm_iommu_unmap_memslots(kvm
);
351 kvm
->arch
.iommu_domain
= NULL
;
352 kvm
->arch
.iommu_noncoherent
= false;
353 mutex_unlock(&kvm
->slots_lock
);
355 iommu_domain_free(domain
);