4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 static bool enable_out_of_sync
= false;
42 static int preallocated_oos_pages
= 8192;
45 * validate a gm address and related range size,
46 * translate it to host gm address
48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu
*vgpu
, u64 addr
, u32 size
)
50 if ((!vgpu_gmadr_is_valid(vgpu
, addr
)) || (size
51 && !vgpu_gmadr_is_valid(vgpu
, addr
+ size
- 1))) {
52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
59 /* translate a guest gmadr to host gmadr */
60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu
*vgpu
, u64 g_addr
, u64
*h_addr
)
62 if (WARN(!vgpu_gmadr_is_valid(vgpu
, g_addr
),
63 "invalid guest gmadr %llx\n", g_addr
))
66 if (vgpu_gmadr_is_aperture(vgpu
, g_addr
))
67 *h_addr
= vgpu_aperture_gmadr_base(vgpu
)
68 + (g_addr
- vgpu_aperture_offset(vgpu
));
70 *h_addr
= vgpu_hidden_gmadr_base(vgpu
)
71 + (g_addr
- vgpu_hidden_offset(vgpu
));
75 /* translate a host gmadr to guest gmadr */
76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu
*vgpu
, u64 h_addr
, u64
*g_addr
)
78 if (WARN(!gvt_gmadr_is_valid(vgpu
->gvt
, h_addr
),
79 "invalid host gmadr %llx\n", h_addr
))
82 if (gvt_gmadr_is_aperture(vgpu
->gvt
, h_addr
))
83 *g_addr
= vgpu_aperture_gmadr_base(vgpu
)
84 + (h_addr
- gvt_aperture_gmadr_base(vgpu
->gvt
));
86 *g_addr
= vgpu_hidden_gmadr_base(vgpu
)
87 + (h_addr
- gvt_hidden_gmadr_base(vgpu
->gvt
));
91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu
*vgpu
, unsigned long g_index
,
92 unsigned long *h_index
)
97 ret
= intel_gvt_ggtt_gmadr_g2h(vgpu
, g_index
<< I915_GTT_PAGE_SHIFT
,
102 *h_index
= h_addr
>> I915_GTT_PAGE_SHIFT
;
106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu
*vgpu
, unsigned long h_index
,
107 unsigned long *g_index
)
112 ret
= intel_gvt_ggtt_gmadr_h2g(vgpu
, h_index
<< I915_GTT_PAGE_SHIFT
,
117 *g_index
= g_addr
>> I915_GTT_PAGE_SHIFT
;
121 #define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
126 #define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
129 #define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
132 #define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
135 #define gtt_init_entry(e, t, p, v) do { \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
157 struct gtt_type_table_entry
{
164 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
166 .entry_type = e_type, \
167 .pt_type = cpt_type, \
168 .next_pt_type = npt_type, \
169 .pse_entry_type = pse_type, \
172 static struct gtt_type_table_entry gtt_type_table
[] = {
173 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
174 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
176 GTT_TYPE_PPGTT_PML4_PT
,
178 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT
,
179 GTT_TYPE_PPGTT_PML4_ENTRY
,
180 GTT_TYPE_PPGTT_PML4_PT
,
181 GTT_TYPE_PPGTT_PDP_PT
,
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY
,
184 GTT_TYPE_PPGTT_PML4_ENTRY
,
185 GTT_TYPE_PPGTT_PML4_PT
,
186 GTT_TYPE_PPGTT_PDP_PT
,
188 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT
,
189 GTT_TYPE_PPGTT_PDP_ENTRY
,
190 GTT_TYPE_PPGTT_PDP_PT
,
191 GTT_TYPE_PPGTT_PDE_PT
,
192 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
193 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
194 GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
196 GTT_TYPE_PPGTT_PDE_PT
,
197 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
198 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY
,
199 GTT_TYPE_PPGTT_PDP_ENTRY
,
200 GTT_TYPE_PPGTT_PDP_PT
,
201 GTT_TYPE_PPGTT_PDE_PT
,
202 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT
,
204 GTT_TYPE_PPGTT_PDE_ENTRY
,
205 GTT_TYPE_PPGTT_PDE_PT
,
206 GTT_TYPE_PPGTT_PTE_PT
,
207 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
208 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY
,
209 GTT_TYPE_PPGTT_PDE_ENTRY
,
210 GTT_TYPE_PPGTT_PDE_PT
,
211 GTT_TYPE_PPGTT_PTE_PT
,
212 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
213 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT
,
214 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
215 GTT_TYPE_PPGTT_PTE_PT
,
218 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
219 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
220 GTT_TYPE_PPGTT_PTE_PT
,
223 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY
,
224 GTT_TYPE_PPGTT_PDE_ENTRY
,
225 GTT_TYPE_PPGTT_PDE_PT
,
227 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
228 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY
,
229 GTT_TYPE_PPGTT_PDP_ENTRY
,
230 GTT_TYPE_PPGTT_PDP_PT
,
232 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
233 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE
,
240 static inline int get_next_pt_type(int type
)
242 return gtt_type_table
[type
].next_pt_type
;
245 static inline int get_pt_type(int type
)
247 return gtt_type_table
[type
].pt_type
;
250 static inline int get_entry_type(int type
)
252 return gtt_type_table
[type
].entry_type
;
255 static inline int get_pse_type(int type
)
257 return gtt_type_table
[type
].pse_entry_type
;
260 static u64
read_pte64(struct drm_i915_private
*dev_priv
, unsigned long index
)
262 void __iomem
*addr
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+ index
;
267 static void gtt_invalidate(struct drm_i915_private
*dev_priv
)
269 mmio_hw_access_pre(dev_priv
);
270 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
271 mmio_hw_access_post(dev_priv
);
274 static void write_pte64(struct drm_i915_private
*dev_priv
,
275 unsigned long index
, u64 pte
)
277 void __iomem
*addr
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+ index
;
282 static inline int gtt_get_entry64(void *pt
,
283 struct intel_gvt_gtt_entry
*e
,
284 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
285 struct intel_vgpu
*vgpu
)
287 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
290 if (WARN_ON(info
->gtt_entry_size
!= 8))
293 if (hypervisor_access
) {
294 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, gpa
+
295 (index
<< info
->gtt_entry_size_shift
),
300 e
->val64
= read_pte64(vgpu
->gvt
->dev_priv
, index
);
302 e
->val64
= *((u64
*)pt
+ index
);
307 static inline int gtt_set_entry64(void *pt
,
308 struct intel_gvt_gtt_entry
*e
,
309 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
310 struct intel_vgpu
*vgpu
)
312 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
315 if (WARN_ON(info
->gtt_entry_size
!= 8))
318 if (hypervisor_access
) {
319 ret
= intel_gvt_hypervisor_write_gpa(vgpu
, gpa
+
320 (index
<< info
->gtt_entry_size_shift
),
325 write_pte64(vgpu
->gvt
->dev_priv
, index
, e
->val64
);
327 *((u64
*)pt
+ index
) = e
->val64
;
334 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
335 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
336 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
338 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry
*e
)
342 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
)
343 pfn
= (e
->val64
& ADDR_1G_MASK
) >> 12;
344 else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
)
345 pfn
= (e
->val64
& ADDR_2M_MASK
) >> 12;
347 pfn
= (e
->val64
& ADDR_4K_MASK
) >> 12;
351 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry
*e
, unsigned long pfn
)
353 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
) {
354 e
->val64
&= ~ADDR_1G_MASK
;
355 pfn
&= (ADDR_1G_MASK
>> 12);
356 } else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
) {
357 e
->val64
&= ~ADDR_2M_MASK
;
358 pfn
&= (ADDR_2M_MASK
>> 12);
360 e
->val64
&= ~ADDR_4K_MASK
;
361 pfn
&= (ADDR_4K_MASK
>> 12);
364 e
->val64
|= (pfn
<< 12);
367 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry
*e
)
369 /* Entry doesn't have PSE bit. */
370 if (get_pse_type(e
->type
) == GTT_TYPE_INVALID
)
373 e
->type
= get_entry_type(e
->type
);
374 if (!(e
->val64
& BIT(7)))
377 e
->type
= get_pse_type(e
->type
);
381 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry
*e
)
384 * i915 writes PDP root pointer registers without present bit,
385 * it also works, so we need to treat root pointer entry
388 if (e
->type
== GTT_TYPE_PPGTT_ROOT_L3_ENTRY
389 || e
->type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
)
390 return (e
->val64
!= 0);
392 return (e
->val64
& BIT(0));
395 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry
*e
)
400 static void gtt_entry_set_present(struct intel_gvt_gtt_entry
*e
)
406 * Per-platform GMA routines.
408 static unsigned long gma_to_ggtt_pte_index(unsigned long gma
)
410 unsigned long x
= (gma
>> I915_GTT_PAGE_SHIFT
);
412 trace_gma_index(__func__
, gma
, x
);
416 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
417 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
419 unsigned long x = (exp); \
420 trace_gma_index(__func__, gma, x); \
424 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pte
, (gma
>> 12 & 0x1ff));
425 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pde
, (gma
>> 21 & 0x1ff));
426 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l3_pdp
, (gma
>> 30 & 0x3));
427 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l4_pdp
, (gma
>> 30 & 0x1ff));
428 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pml4
, (gma
>> 39 & 0x1ff));
430 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops
= {
431 .get_entry
= gtt_get_entry64
,
432 .set_entry
= gtt_set_entry64
,
433 .clear_present
= gtt_entry_clear_present
,
434 .set_present
= gtt_entry_set_present
,
435 .test_present
= gen8_gtt_test_present
,
436 .test_pse
= gen8_gtt_test_pse
,
437 .get_pfn
= gen8_gtt_get_pfn
,
438 .set_pfn
= gen8_gtt_set_pfn
,
441 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops
= {
442 .gma_to_ggtt_pte_index
= gma_to_ggtt_pte_index
,
443 .gma_to_pte_index
= gen8_gma_to_pte_index
,
444 .gma_to_pde_index
= gen8_gma_to_pde_index
,
445 .gma_to_l3_pdp_index
= gen8_gma_to_l3_pdp_index
,
446 .gma_to_l4_pdp_index
= gen8_gma_to_l4_pdp_index
,
447 .gma_to_pml4_index
= gen8_gma_to_pml4_index
,
450 static int gtt_entry_p2m(struct intel_vgpu
*vgpu
, struct intel_gvt_gtt_entry
*p
,
451 struct intel_gvt_gtt_entry
*m
)
453 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
454 unsigned long gfn
, mfn
;
458 if (!ops
->test_present(p
))
461 gfn
= ops
->get_pfn(p
);
463 mfn
= intel_gvt_hypervisor_gfn_to_mfn(vgpu
, gfn
);
464 if (mfn
== INTEL_GVT_INVALID_ADDR
) {
465 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn
);
469 ops
->set_pfn(m
, mfn
);
476 int intel_vgpu_mm_get_entry(struct intel_vgpu_mm
*mm
,
477 void *page_table
, struct intel_gvt_gtt_entry
*e
,
480 struct intel_gvt
*gvt
= mm
->vgpu
->gvt
;
481 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
484 e
->type
= mm
->page_table_entry_type
;
486 ret
= ops
->get_entry(page_table
, e
, index
, false, 0, mm
->vgpu
);
494 int intel_vgpu_mm_set_entry(struct intel_vgpu_mm
*mm
,
495 void *page_table
, struct intel_gvt_gtt_entry
*e
,
498 struct intel_gvt
*gvt
= mm
->vgpu
->gvt
;
499 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
501 return ops
->set_entry(page_table
, e
, index
, false, 0, mm
->vgpu
);
505 * PPGTT shadow page table helpers.
507 static inline int ppgtt_spt_get_entry(
508 struct intel_vgpu_ppgtt_spt
*spt
,
509 void *page_table
, int type
,
510 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
513 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
514 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
517 e
->type
= get_entry_type(type
);
519 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
522 ret
= ops
->get_entry(page_table
, e
, index
, guest
,
523 spt
->guest_page
.track
.gfn
<< I915_GTT_PAGE_SHIFT
,
532 static inline int ppgtt_spt_set_entry(
533 struct intel_vgpu_ppgtt_spt
*spt
,
534 void *page_table
, int type
,
535 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
538 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
539 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
541 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
544 return ops
->set_entry(page_table
, e
, index
, guest
,
545 spt
->guest_page
.track
.gfn
<< I915_GTT_PAGE_SHIFT
,
549 #define ppgtt_get_guest_entry(spt, e, index) \
550 ppgtt_spt_get_entry(spt, NULL, \
551 spt->guest_page_type, e, index, true)
553 #define ppgtt_set_guest_entry(spt, e, index) \
554 ppgtt_spt_set_entry(spt, NULL, \
555 spt->guest_page_type, e, index, true)
557 #define ppgtt_get_shadow_entry(spt, e, index) \
558 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
559 spt->shadow_page.type, e, index, false)
561 #define ppgtt_set_shadow_entry(spt, e, index) \
562 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
563 spt->shadow_page.type, e, index, false)
566 * intel_vgpu_init_page_track - init a page track data structure
568 * @t: a page track data structure
569 * @gfn: guest memory page frame number
570 * @handler: the function will be called when target guest memory page has
573 * This function is called when a user wants to prepare a page track data
574 * structure to track a guest memory page.
577 * Zero on success, negative error code if failed.
579 int intel_vgpu_init_page_track(struct intel_vgpu
*vgpu
,
580 struct intel_vgpu_page_track
*t
,
582 int (*handler
)(void *, u64
, void *, int),
585 INIT_HLIST_NODE(&t
->node
);
589 t
->handler
= handler
;
592 hash_add(vgpu
->gtt
.tracked_guest_page_hash_table
, &t
->node
, t
->gfn
);
597 * intel_vgpu_clean_page_track - release a page track data structure
599 * @t: a page track data structure
601 * This function is called before a user frees a page track data structure.
603 void intel_vgpu_clean_page_track(struct intel_vgpu
*vgpu
,
604 struct intel_vgpu_page_track
*t
)
606 if (!hlist_unhashed(&t
->node
))
610 intel_gvt_hypervisor_disable_page_track(vgpu
, t
);
614 * intel_vgpu_find_tracked_page - find a tracked guest page
616 * @gfn: guest memory page frame number
618 * This function is called when the emulation layer wants to figure out if a
619 * trapped GFN is a tracked guest page.
622 * Pointer to page track data structure, NULL if not found.
624 struct intel_vgpu_page_track
*intel_vgpu_find_tracked_page(
625 struct intel_vgpu
*vgpu
, unsigned long gfn
)
627 struct intel_vgpu_page_track
*t
;
629 hash_for_each_possible(vgpu
->gtt
.tracked_guest_page_hash_table
,
637 static int init_guest_page(struct intel_vgpu
*vgpu
,
638 struct intel_vgpu_guest_page
*p
,
640 int (*handler
)(void *, u64
, void *, int),
646 return intel_vgpu_init_page_track(vgpu
, &p
->track
, gfn
, handler
, data
);
649 static int detach_oos_page(struct intel_vgpu
*vgpu
,
650 struct intel_vgpu_oos_page
*oos_page
);
652 static void clean_guest_page(struct intel_vgpu
*vgpu
,
653 struct intel_vgpu_guest_page
*p
)
656 detach_oos_page(vgpu
, p
->oos_page
);
658 intel_vgpu_clean_page_track(vgpu
, &p
->track
);
661 static inline int init_shadow_page(struct intel_vgpu
*vgpu
,
662 struct intel_vgpu_shadow_page
*p
, int type
, bool hash
)
664 struct device
*kdev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
667 daddr
= dma_map_page(kdev
, p
->page
, 0, 4096, PCI_DMA_BIDIRECTIONAL
);
668 if (dma_mapping_error(kdev
, daddr
)) {
669 gvt_vgpu_err("fail to map dma addr\n");
673 p
->vaddr
= page_address(p
->page
);
676 INIT_HLIST_NODE(&p
->node
);
678 p
->mfn
= daddr
>> I915_GTT_PAGE_SHIFT
;
680 hash_add(vgpu
->gtt
.shadow_page_hash_table
, &p
->node
, p
->mfn
);
684 static inline void clean_shadow_page(struct intel_vgpu
*vgpu
,
685 struct intel_vgpu_shadow_page
*p
)
687 struct device
*kdev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
689 dma_unmap_page(kdev
, p
->mfn
<< I915_GTT_PAGE_SHIFT
, 4096,
690 PCI_DMA_BIDIRECTIONAL
);
692 if (!hlist_unhashed(&p
->node
))
696 static inline struct intel_vgpu_shadow_page
*find_shadow_page(
697 struct intel_vgpu
*vgpu
, unsigned long mfn
)
699 struct intel_vgpu_shadow_page
*p
;
701 hash_for_each_possible(vgpu
->gtt
.shadow_page_hash_table
,
709 #define page_track_to_guest_page(ptr) \
710 container_of(ptr, struct intel_vgpu_guest_page, track)
712 #define guest_page_to_ppgtt_spt(ptr) \
713 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
715 #define shadow_page_to_ppgtt_spt(ptr) \
716 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
718 static void *alloc_spt(gfp_t gfp_mask
)
720 struct intel_vgpu_ppgtt_spt
*spt
;
722 spt
= kzalloc(sizeof(*spt
), gfp_mask
);
726 spt
->shadow_page
.page
= alloc_page(gfp_mask
);
727 if (!spt
->shadow_page
.page
) {
734 static void free_spt(struct intel_vgpu_ppgtt_spt
*spt
)
736 __free_page(spt
->shadow_page
.page
);
740 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
742 trace_spt_free(spt
->vgpu
->id
, spt
, spt
->shadow_page
.type
);
744 clean_shadow_page(spt
->vgpu
, &spt
->shadow_page
);
745 clean_guest_page(spt
->vgpu
, &spt
->guest_page
);
746 list_del_init(&spt
->post_shadow_list
);
751 static void ppgtt_free_all_shadow_page(struct intel_vgpu
*vgpu
)
753 struct hlist_node
*n
;
754 struct intel_vgpu_shadow_page
*sp
;
757 hash_for_each_safe(vgpu
->gtt
.shadow_page_hash_table
, i
, n
, sp
, node
)
758 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp
));
761 static int ppgtt_handle_guest_write_page_table_bytes(
762 struct intel_vgpu_guest_page
*gpt
,
763 u64 pa
, void *p_data
, int bytes
);
765 static int ppgtt_write_protection_handler(void *data
, u64 pa
,
766 void *p_data
, int bytes
)
768 struct intel_vgpu_page_track
*t
= data
;
769 struct intel_vgpu_guest_page
*p
= page_track_to_guest_page(t
);
772 if (bytes
!= 4 && bytes
!= 8)
778 ret
= ppgtt_handle_guest_write_page_table_bytes(p
,
785 static int reclaim_one_mm(struct intel_gvt
*gvt
);
787 static struct intel_vgpu_ppgtt_spt
*ppgtt_alloc_shadow_page(
788 struct intel_vgpu
*vgpu
, int type
, unsigned long gfn
)
790 struct intel_vgpu_ppgtt_spt
*spt
= NULL
;
794 spt
= alloc_spt(GFP_KERNEL
| __GFP_ZERO
);
796 if (reclaim_one_mm(vgpu
->gvt
))
799 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
800 return ERR_PTR(-ENOMEM
);
804 spt
->guest_page_type
= type
;
805 atomic_set(&spt
->refcount
, 1);
806 INIT_LIST_HEAD(&spt
->post_shadow_list
);
809 * TODO: guest page type may be different with shadow page type,
810 * when we support PSE page in future.
812 ret
= init_shadow_page(vgpu
, &spt
->shadow_page
, type
, true);
814 gvt_vgpu_err("fail to initialize shadow page for spt\n");
818 ret
= init_guest_page(vgpu
, &spt
->guest_page
,
819 gfn
, ppgtt_write_protection_handler
, NULL
);
821 gvt_vgpu_err("fail to initialize guest page for spt\n");
825 trace_spt_alloc(vgpu
->id
, spt
, type
, spt
->shadow_page
.mfn
, gfn
);
828 ppgtt_free_shadow_page(spt
);
832 static struct intel_vgpu_ppgtt_spt
*ppgtt_find_shadow_page(
833 struct intel_vgpu
*vgpu
, unsigned long mfn
)
835 struct intel_vgpu_shadow_page
*p
= find_shadow_page(vgpu
, mfn
);
838 return shadow_page_to_ppgtt_spt(p
);
840 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn
);
844 #define pt_entry_size_shift(spt) \
845 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
847 #define pt_entries(spt) \
848 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
850 #define for_each_present_guest_entry(spt, e, i) \
851 for (i = 0; i < pt_entries(spt); i++) \
852 if (!ppgtt_get_guest_entry(spt, e, i) && \
853 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
855 #define for_each_present_shadow_entry(spt, e, i) \
856 for (i = 0; i < pt_entries(spt); i++) \
857 if (!ppgtt_get_shadow_entry(spt, e, i) && \
858 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
860 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
862 int v
= atomic_read(&spt
->refcount
);
864 trace_spt_refcount(spt
->vgpu
->id
, "inc", spt
, v
, (v
+ 1));
866 atomic_inc(&spt
->refcount
);
869 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
);
871 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu
*vgpu
,
872 struct intel_gvt_gtt_entry
*e
)
874 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
875 struct intel_vgpu_ppgtt_spt
*s
;
876 intel_gvt_gtt_type_t cur_pt_type
;
878 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e
->type
))))
881 if (e
->type
!= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
882 && e
->type
!= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
883 cur_pt_type
= get_next_pt_type(e
->type
) + 1;
884 if (ops
->get_pfn(e
) ==
885 vgpu
->gtt
.scratch_pt
[cur_pt_type
].page_mfn
)
888 s
= ppgtt_find_shadow_page(vgpu
, ops
->get_pfn(e
));
890 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
894 return ppgtt_invalidate_shadow_page(s
);
897 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
899 struct intel_vgpu
*vgpu
= spt
->vgpu
;
900 struct intel_gvt_gtt_entry e
;
903 int v
= atomic_read(&spt
->refcount
);
905 trace_spt_change(spt
->vgpu
->id
, "die", spt
,
906 spt
->guest_page
.track
.gfn
, spt
->shadow_page
.type
);
908 trace_spt_refcount(spt
->vgpu
->id
, "dec", spt
, v
, (v
- 1));
910 if (atomic_dec_return(&spt
->refcount
) > 0)
913 if (gtt_type_is_pte_pt(spt
->shadow_page
.type
))
916 for_each_present_shadow_entry(spt
, &e
, index
) {
917 if (!gtt_type_is_pt(get_next_pt_type(e
.type
))) {
918 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
921 ret
= ppgtt_invalidate_shadow_page_by_shadow_entry(
927 trace_spt_change(spt
->vgpu
->id
, "release", spt
,
928 spt
->guest_page
.track
.gfn
, spt
->shadow_page
.type
);
929 ppgtt_free_shadow_page(spt
);
932 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
933 spt
, e
.val64
, e
.type
);
937 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
);
939 static struct intel_vgpu_ppgtt_spt
*ppgtt_populate_shadow_page_by_guest_entry(
940 struct intel_vgpu
*vgpu
, struct intel_gvt_gtt_entry
*we
)
942 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
943 struct intel_vgpu_ppgtt_spt
*s
= NULL
;
944 struct intel_vgpu_guest_page
*g
;
945 struct intel_vgpu_page_track
*t
;
948 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we
->type
)))) {
953 t
= intel_vgpu_find_tracked_page(vgpu
, ops
->get_pfn(we
));
955 g
= page_track_to_guest_page(t
);
956 s
= guest_page_to_ppgtt_spt(g
);
957 ppgtt_get_shadow_page(s
);
959 int type
= get_next_pt_type(we
->type
);
961 s
= ppgtt_alloc_shadow_page(vgpu
, type
, ops
->get_pfn(we
));
967 ret
= intel_gvt_hypervisor_enable_page_track(vgpu
,
968 &s
->guest_page
.track
);
972 ret
= ppgtt_populate_shadow_page(s
);
976 trace_spt_change(vgpu
->id
, "new", s
, s
->guest_page
.track
.gfn
,
977 s
->shadow_page
.type
);
981 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
982 s
, we
->val64
, we
->type
);
986 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry
*se
,
987 struct intel_vgpu_ppgtt_spt
*s
, struct intel_gvt_gtt_entry
*ge
)
989 struct intel_gvt_gtt_pte_ops
*ops
= s
->vgpu
->gvt
->gtt
.pte_ops
;
992 se
->val64
= ge
->val64
;
994 ops
->set_pfn(se
, s
->shadow_page
.mfn
);
997 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
999 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1000 struct intel_gvt
*gvt
= vgpu
->gvt
;
1001 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1002 struct intel_vgpu_ppgtt_spt
*s
;
1003 struct intel_gvt_gtt_entry se
, ge
;
1004 unsigned long gfn
, i
;
1007 trace_spt_change(spt
->vgpu
->id
, "born", spt
,
1008 spt
->guest_page
.track
.gfn
, spt
->shadow_page
.type
);
1010 if (gtt_type_is_pte_pt(spt
->shadow_page
.type
)) {
1011 for_each_present_guest_entry(spt
, &ge
, i
) {
1012 gfn
= ops
->get_pfn(&ge
);
1013 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu
, gfn
) ||
1014 gtt_entry_p2m(vgpu
, &ge
, &se
))
1015 ops
->set_pfn(&se
, gvt
->gtt
.scratch_mfn
);
1016 ppgtt_set_shadow_entry(spt
, &se
, i
);
1021 for_each_present_guest_entry(spt
, &ge
, i
) {
1022 if (!gtt_type_is_pt(get_next_pt_type(ge
.type
))) {
1023 gvt_vgpu_err("GVT doesn't support pse bit now\n");
1028 s
= ppgtt_populate_shadow_page_by_guest_entry(vgpu
, &ge
);
1033 ppgtt_get_shadow_entry(spt
, &se
, i
);
1034 ppgtt_generate_shadow_entry(&se
, s
, &ge
);
1035 ppgtt_set_shadow_entry(spt
, &se
, i
);
1039 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1040 spt
, ge
.val64
, ge
.type
);
1044 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page
*gpt
,
1045 struct intel_gvt_gtt_entry
*se
, unsigned long index
)
1047 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1048 struct intel_vgpu_shadow_page
*sp
= &spt
->shadow_page
;
1049 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1050 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1053 trace_gpt_change(spt
->vgpu
->id
, "remove", spt
, sp
->type
, se
->val64
,
1056 if (!ops
->test_present(se
))
1059 if (ops
->get_pfn(se
) == vgpu
->gtt
.scratch_pt
[sp
->type
].page_mfn
)
1062 if (gtt_type_is_pt(get_next_pt_type(se
->type
))) {
1063 struct intel_vgpu_ppgtt_spt
*s
=
1064 ppgtt_find_shadow_page(vgpu
, ops
->get_pfn(se
));
1066 gvt_vgpu_err("fail to find guest page\n");
1070 ret
= ppgtt_invalidate_shadow_page(s
);
1076 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1077 spt
, se
->val64
, se
->type
);
1081 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page
*gpt
,
1082 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1084 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1085 struct intel_vgpu_shadow_page
*sp
= &spt
->shadow_page
;
1086 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1087 struct intel_gvt_gtt_entry m
;
1088 struct intel_vgpu_ppgtt_spt
*s
;
1091 trace_gpt_change(spt
->vgpu
->id
, "add", spt
, sp
->type
,
1094 if (gtt_type_is_pt(get_next_pt_type(we
->type
))) {
1095 s
= ppgtt_populate_shadow_page_by_guest_entry(vgpu
, we
);
1100 ppgtt_get_shadow_entry(spt
, &m
, index
);
1101 ppgtt_generate_shadow_entry(&m
, s
, we
);
1102 ppgtt_set_shadow_entry(spt
, &m
, index
);
1104 ret
= gtt_entry_p2m(vgpu
, we
, &m
);
1107 ppgtt_set_shadow_entry(spt
, &m
, index
);
1111 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1112 spt
, we
->val64
, we
->type
);
1116 static int sync_oos_page(struct intel_vgpu
*vgpu
,
1117 struct intel_vgpu_oos_page
*oos_page
)
1119 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1120 struct intel_gvt
*gvt
= vgpu
->gvt
;
1121 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1122 struct intel_vgpu_ppgtt_spt
*spt
=
1123 guest_page_to_ppgtt_spt(oos_page
->guest_page
);
1124 struct intel_gvt_gtt_entry old
, new, m
;
1128 trace_oos_change(vgpu
->id
, "sync", oos_page
->id
,
1129 oos_page
->guest_page
, spt
->guest_page_type
);
1131 old
.type
= new.type
= get_entry_type(spt
->guest_page_type
);
1132 old
.val64
= new.val64
= 0;
1134 for (index
= 0; index
< (I915_GTT_PAGE_SIZE
>>
1135 info
->gtt_entry_size_shift
); index
++) {
1136 ops
->get_entry(oos_page
->mem
, &old
, index
, false, 0, vgpu
);
1137 ops
->get_entry(NULL
, &new, index
, true,
1138 oos_page
->guest_page
->track
.gfn
<< PAGE_SHIFT
, vgpu
);
1140 if (old
.val64
== new.val64
1141 && !test_and_clear_bit(index
, spt
->post_shadow_bitmap
))
1144 trace_oos_sync(vgpu
->id
, oos_page
->id
,
1145 oos_page
->guest_page
, spt
->guest_page_type
,
1148 ret
= gtt_entry_p2m(vgpu
, &new, &m
);
1152 ops
->set_entry(oos_page
->mem
, &new, index
, false, 0, vgpu
);
1153 ppgtt_set_shadow_entry(spt
, &m
, index
);
1156 oos_page
->guest_page
->write_cnt
= 0;
1157 list_del_init(&spt
->post_shadow_list
);
1161 static int detach_oos_page(struct intel_vgpu
*vgpu
,
1162 struct intel_vgpu_oos_page
*oos_page
)
1164 struct intel_gvt
*gvt
= vgpu
->gvt
;
1165 struct intel_vgpu_ppgtt_spt
*spt
=
1166 guest_page_to_ppgtt_spt(oos_page
->guest_page
);
1168 trace_oos_change(vgpu
->id
, "detach", oos_page
->id
,
1169 oos_page
->guest_page
, spt
->guest_page_type
);
1171 oos_page
->guest_page
->write_cnt
= 0;
1172 oos_page
->guest_page
->oos_page
= NULL
;
1173 oos_page
->guest_page
= NULL
;
1175 list_del_init(&oos_page
->vm_list
);
1176 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_free_list_head
);
1181 static int attach_oos_page(struct intel_vgpu
*vgpu
,
1182 struct intel_vgpu_oos_page
*oos_page
,
1183 struct intel_vgpu_guest_page
*gpt
)
1185 struct intel_gvt
*gvt
= vgpu
->gvt
;
1188 ret
= intel_gvt_hypervisor_read_gpa(vgpu
,
1189 gpt
->track
.gfn
<< I915_GTT_PAGE_SHIFT
,
1190 oos_page
->mem
, I915_GTT_PAGE_SIZE
);
1194 oos_page
->guest_page
= gpt
;
1195 gpt
->oos_page
= oos_page
;
1197 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_use_list_head
);
1199 trace_oos_change(vgpu
->id
, "attach", gpt
->oos_page
->id
,
1200 gpt
, guest_page_to_ppgtt_spt(gpt
)->guest_page_type
);
1204 static int ppgtt_set_guest_page_sync(struct intel_vgpu
*vgpu
,
1205 struct intel_vgpu_guest_page
*gpt
)
1209 ret
= intel_gvt_hypervisor_enable_page_track(vgpu
, &gpt
->track
);
1213 trace_oos_change(vgpu
->id
, "set page sync", gpt
->oos_page
->id
,
1214 gpt
, guest_page_to_ppgtt_spt(gpt
)->guest_page_type
);
1216 list_del_init(&gpt
->oos_page
->vm_list
);
1217 return sync_oos_page(vgpu
, gpt
->oos_page
);
1220 static int ppgtt_allocate_oos_page(struct intel_vgpu
*vgpu
,
1221 struct intel_vgpu_guest_page
*gpt
)
1223 struct intel_gvt
*gvt
= vgpu
->gvt
;
1224 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1225 struct intel_vgpu_oos_page
*oos_page
= gpt
->oos_page
;
1228 WARN(oos_page
, "shadow PPGTT page has already has a oos page\n");
1230 if (list_empty(>t
->oos_page_free_list_head
)) {
1231 oos_page
= container_of(gtt
->oos_page_use_list_head
.next
,
1232 struct intel_vgpu_oos_page
, list
);
1233 ret
= ppgtt_set_guest_page_sync(vgpu
, oos_page
->guest_page
);
1236 ret
= detach_oos_page(vgpu
, oos_page
);
1240 oos_page
= container_of(gtt
->oos_page_free_list_head
.next
,
1241 struct intel_vgpu_oos_page
, list
);
1242 return attach_oos_page(vgpu
, oos_page
, gpt
);
1245 static int ppgtt_set_guest_page_oos(struct intel_vgpu
*vgpu
,
1246 struct intel_vgpu_guest_page
*gpt
)
1248 struct intel_vgpu_oos_page
*oos_page
= gpt
->oos_page
;
1250 if (WARN(!oos_page
, "shadow PPGTT page should have a oos page\n"))
1253 trace_oos_change(vgpu
->id
, "set page out of sync", gpt
->oos_page
->id
,
1254 gpt
, guest_page_to_ppgtt_spt(gpt
)->guest_page_type
);
1256 list_add_tail(&oos_page
->vm_list
, &vgpu
->gtt
.oos_page_list_head
);
1257 return intel_gvt_hypervisor_disable_page_track(vgpu
, &gpt
->track
);
1261 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1264 * This function is called before submitting a guest workload to host,
1265 * to sync all the out-of-synced shadow for vGPU
1268 * Zero on success, negative error code if failed.
1270 int intel_vgpu_sync_oos_pages(struct intel_vgpu
*vgpu
)
1272 struct list_head
*pos
, *n
;
1273 struct intel_vgpu_oos_page
*oos_page
;
1276 if (!enable_out_of_sync
)
1279 list_for_each_safe(pos
, n
, &vgpu
->gtt
.oos_page_list_head
) {
1280 oos_page
= container_of(pos
,
1281 struct intel_vgpu_oos_page
, vm_list
);
1282 ret
= ppgtt_set_guest_page_sync(vgpu
, oos_page
->guest_page
);
1290 * The heart of PPGTT shadow page table.
1292 static int ppgtt_handle_guest_write_page_table(
1293 struct intel_vgpu_guest_page
*gpt
,
1294 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1296 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1297 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1298 int type
= spt
->shadow_page
.type
;
1299 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1300 struct intel_gvt_gtt_entry se
;
1305 new_present
= ops
->test_present(we
);
1308 * Adding the new entry first and then removing the old one, that can
1309 * guarantee the ppgtt table is validated during the window between
1310 * adding and removal.
1312 ppgtt_get_shadow_entry(spt
, &se
, index
);
1315 ret
= ppgtt_handle_guest_entry_add(gpt
, we
, index
);
1320 ret
= ppgtt_handle_guest_entry_removal(gpt
, &se
, index
);
1325 ops
->set_pfn(&se
, vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1326 ppgtt_set_shadow_entry(spt
, &se
, index
);
1331 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1332 spt
, we
->val64
, we
->type
);
1336 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page
*gpt
)
1338 return enable_out_of_sync
1339 && gtt_type_is_pte_pt(
1340 guest_page_to_ppgtt_spt(gpt
)->guest_page_type
)
1341 && gpt
->write_cnt
>= 2;
1344 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt
*spt
,
1345 unsigned long index
)
1347 set_bit(index
, spt
->post_shadow_bitmap
);
1348 if (!list_empty(&spt
->post_shadow_list
))
1351 list_add_tail(&spt
->post_shadow_list
,
1352 &spt
->vgpu
->gtt
.post_shadow_list_head
);
1356 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1359 * This function is called before submitting a guest workload to host,
1360 * to flush all the post shadows for a vGPU.
1363 * Zero on success, negative error code if failed.
1365 int intel_vgpu_flush_post_shadow(struct intel_vgpu
*vgpu
)
1367 struct list_head
*pos
, *n
;
1368 struct intel_vgpu_ppgtt_spt
*spt
;
1369 struct intel_gvt_gtt_entry ge
;
1370 unsigned long index
;
1373 list_for_each_safe(pos
, n
, &vgpu
->gtt
.post_shadow_list_head
) {
1374 spt
= container_of(pos
, struct intel_vgpu_ppgtt_spt
,
1377 for_each_set_bit(index
, spt
->post_shadow_bitmap
,
1378 GTT_ENTRY_NUM_IN_ONE_PAGE
) {
1379 ppgtt_get_guest_entry(spt
, &ge
, index
);
1381 ret
= ppgtt_handle_guest_write_page_table(
1382 &spt
->guest_page
, &ge
, index
);
1385 clear_bit(index
, spt
->post_shadow_bitmap
);
1387 list_del_init(&spt
->post_shadow_list
);
1392 static int ppgtt_handle_guest_write_page_table_bytes(
1393 struct intel_vgpu_guest_page
*gpt
,
1394 u64 pa
, void *p_data
, int bytes
)
1396 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1397 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1398 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1399 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1400 struct intel_gvt_gtt_entry we
, se
;
1401 unsigned long index
;
1404 index
= (pa
& (PAGE_SIZE
- 1)) >> info
->gtt_entry_size_shift
;
1406 ppgtt_get_guest_entry(spt
, &we
, index
);
1410 if (bytes
== info
->gtt_entry_size
) {
1411 ret
= ppgtt_handle_guest_write_page_table(gpt
, &we
, index
);
1415 if (!test_bit(index
, spt
->post_shadow_bitmap
)) {
1416 int type
= spt
->shadow_page
.type
;
1418 ppgtt_get_shadow_entry(spt
, &se
, index
);
1419 ret
= ppgtt_handle_guest_entry_removal(gpt
, &se
, index
);
1422 ops
->set_pfn(&se
, vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1423 ppgtt_set_shadow_entry(spt
, &se
, index
);
1425 ppgtt_set_post_shadow(spt
, index
);
1428 if (!enable_out_of_sync
)
1434 ops
->set_entry(gpt
->oos_page
->mem
, &we
, index
,
1437 if (can_do_out_of_sync(gpt
)) {
1439 ppgtt_allocate_oos_page(vgpu
, gpt
);
1441 ret
= ppgtt_set_guest_page_oos(vgpu
, gpt
);
1449 * mm page table allocation policy for bdw+
1450 * - for ggtt, only virtual page table will be allocated.
1451 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1453 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm
*mm
)
1455 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1456 struct intel_gvt
*gvt
= vgpu
->gvt
;
1457 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
1460 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1461 mm
->page_table_entry_cnt
= 4;
1462 mm
->page_table_entry_size
= mm
->page_table_entry_cnt
*
1463 info
->gtt_entry_size
;
1464 mem
= kzalloc(mm
->has_shadow_page_table
?
1465 mm
->page_table_entry_size
* 2
1466 : mm
->page_table_entry_size
, GFP_KERNEL
);
1469 mm
->virtual_page_table
= mem
;
1470 if (!mm
->has_shadow_page_table
)
1472 mm
->shadow_page_table
= mem
+ mm
->page_table_entry_size
;
1473 } else if (mm
->type
== INTEL_GVT_MM_GGTT
) {
1474 mm
->page_table_entry_cnt
=
1475 (gvt_ggtt_gm_sz(gvt
) >> I915_GTT_PAGE_SHIFT
);
1476 mm
->page_table_entry_size
= mm
->page_table_entry_cnt
*
1477 info
->gtt_entry_size
;
1478 mem
= vzalloc(mm
->page_table_entry_size
);
1481 mm
->virtual_page_table
= mem
;
1486 static void gen8_mm_free_page_table(struct intel_vgpu_mm
*mm
)
1488 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1489 kfree(mm
->virtual_page_table
);
1490 } else if (mm
->type
== INTEL_GVT_MM_GGTT
) {
1491 if (mm
->virtual_page_table
)
1492 vfree(mm
->virtual_page_table
);
1494 mm
->virtual_page_table
= mm
->shadow_page_table
= NULL
;
1497 static void invalidate_mm(struct intel_vgpu_mm
*mm
)
1499 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1500 struct intel_gvt
*gvt
= vgpu
->gvt
;
1501 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1502 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1503 struct intel_gvt_gtt_entry se
;
1506 if (WARN_ON(!mm
->has_shadow_page_table
|| !mm
->shadowed
))
1509 for (i
= 0; i
< mm
->page_table_entry_cnt
; i
++) {
1510 ppgtt_get_shadow_root_entry(mm
, &se
, i
);
1511 if (!ops
->test_present(&se
))
1513 ppgtt_invalidate_shadow_page_by_shadow_entry(
1516 ppgtt_set_shadow_root_entry(mm
, &se
, i
);
1518 trace_gpt_change(vgpu
->id
, "destroy root pointer",
1519 NULL
, se
.type
, se
.val64
, i
);
1521 mm
->shadowed
= false;
1525 * intel_vgpu_destroy_mm - destroy a mm object
1526 * @mm: a kref object
1528 * This function is used to destroy a mm object for vGPU
1531 void intel_vgpu_destroy_mm(struct kref
*mm_ref
)
1533 struct intel_vgpu_mm
*mm
= container_of(mm_ref
, typeof(*mm
), ref
);
1534 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1535 struct intel_gvt
*gvt
= vgpu
->gvt
;
1536 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1538 if (!mm
->initialized
)
1541 list_del(&mm
->list
);
1542 list_del(&mm
->lru_list
);
1544 if (mm
->has_shadow_page_table
)
1547 gtt
->mm_free_page_table(mm
);
1552 static int shadow_mm(struct intel_vgpu_mm
*mm
)
1554 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1555 struct intel_gvt
*gvt
= vgpu
->gvt
;
1556 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1557 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1558 struct intel_vgpu_ppgtt_spt
*spt
;
1559 struct intel_gvt_gtt_entry ge
, se
;
1563 if (WARN_ON(!mm
->has_shadow_page_table
|| mm
->shadowed
))
1566 mm
->shadowed
= true;
1568 for (i
= 0; i
< mm
->page_table_entry_cnt
; i
++) {
1569 ppgtt_get_guest_root_entry(mm
, &ge
, i
);
1570 if (!ops
->test_present(&ge
))
1573 trace_gpt_change(vgpu
->id
, __func__
, NULL
,
1574 ge
.type
, ge
.val64
, i
);
1576 spt
= ppgtt_populate_shadow_page_by_guest_entry(vgpu
, &ge
);
1578 gvt_vgpu_err("fail to populate guest root pointer\n");
1582 ppgtt_generate_shadow_entry(&se
, spt
, &ge
);
1583 ppgtt_set_shadow_root_entry(mm
, &se
, i
);
1585 trace_gpt_change(vgpu
->id
, "populate root pointer",
1586 NULL
, se
.type
, se
.val64
, i
);
1595 * intel_vgpu_create_mm - create a mm object for a vGPU
1597 * @mm_type: mm object type, should be PPGTT or GGTT
1598 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1599 * to populate shadow later.
1600 * @page_table_level: describe the page table level of the mm object
1601 * @pde_base_index: pde root pointer base in GGTT MMIO.
1603 * This function is used to create a mm object for a vGPU.
1606 * Zero on success, negative error code in pointer if failed.
1608 struct intel_vgpu_mm
*intel_vgpu_create_mm(struct intel_vgpu
*vgpu
,
1609 int mm_type
, void *virtual_page_table
, int page_table_level
,
1612 struct intel_gvt
*gvt
= vgpu
->gvt
;
1613 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1614 struct intel_vgpu_mm
*mm
;
1617 mm
= kzalloc(sizeof(*mm
), GFP_KERNEL
);
1625 if (page_table_level
== 1)
1626 mm
->page_table_entry_type
= GTT_TYPE_GGTT_PTE
;
1627 else if (page_table_level
== 3)
1628 mm
->page_table_entry_type
= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
;
1629 else if (page_table_level
== 4)
1630 mm
->page_table_entry_type
= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
;
1637 mm
->page_table_level
= page_table_level
;
1638 mm
->pde_base_index
= pde_base_index
;
1641 mm
->has_shadow_page_table
= !!(mm_type
== INTEL_GVT_MM_PPGTT
);
1643 kref_init(&mm
->ref
);
1644 atomic_set(&mm
->pincount
, 0);
1645 INIT_LIST_HEAD(&mm
->list
);
1646 INIT_LIST_HEAD(&mm
->lru_list
);
1647 list_add_tail(&mm
->list
, &vgpu
->gtt
.mm_list_head
);
1649 ret
= gtt
->mm_alloc_page_table(mm
);
1651 gvt_vgpu_err("fail to allocate page table for mm\n");
1655 mm
->initialized
= true;
1657 if (virtual_page_table
)
1658 memcpy(mm
->virtual_page_table
, virtual_page_table
,
1659 mm
->page_table_entry_size
);
1661 if (mm
->has_shadow_page_table
) {
1662 ret
= shadow_mm(mm
);
1665 list_add_tail(&mm
->lru_list
, &gvt
->gtt
.mm_lru_list_head
);
1669 gvt_vgpu_err("fail to create mm\n");
1671 intel_gvt_mm_unreference(mm
);
1672 return ERR_PTR(ret
);
1676 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1677 * @mm: a vGPU mm object
1679 * This function is called when user doesn't want to use a vGPU mm object
1681 void intel_vgpu_unpin_mm(struct intel_vgpu_mm
*mm
)
1683 if (WARN_ON(mm
->type
!= INTEL_GVT_MM_PPGTT
))
1686 atomic_dec(&mm
->pincount
);
1690 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1693 * This function is called when user wants to use a vGPU mm object. If this
1694 * mm object hasn't been shadowed yet, the shadow will be populated at this
1698 * Zero on success, negative error code if failed.
1700 int intel_vgpu_pin_mm(struct intel_vgpu_mm
*mm
)
1704 if (WARN_ON(mm
->type
!= INTEL_GVT_MM_PPGTT
))
1707 if (!mm
->shadowed
) {
1708 ret
= shadow_mm(mm
);
1713 atomic_inc(&mm
->pincount
);
1714 list_del_init(&mm
->lru_list
);
1715 list_add_tail(&mm
->lru_list
, &mm
->vgpu
->gvt
->gtt
.mm_lru_list_head
);
1719 static int reclaim_one_mm(struct intel_gvt
*gvt
)
1721 struct intel_vgpu_mm
*mm
;
1722 struct list_head
*pos
, *n
;
1724 list_for_each_safe(pos
, n
, &gvt
->gtt
.mm_lru_list_head
) {
1725 mm
= container_of(pos
, struct intel_vgpu_mm
, lru_list
);
1727 if (mm
->type
!= INTEL_GVT_MM_PPGTT
)
1729 if (atomic_read(&mm
->pincount
))
1732 list_del_init(&mm
->lru_list
);
1740 * GMA translation APIs.
1742 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm
*mm
,
1743 struct intel_gvt_gtt_entry
*e
, unsigned long index
, bool guest
)
1745 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1746 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1747 struct intel_vgpu_ppgtt_spt
*s
;
1749 if (WARN_ON(!mm
->has_shadow_page_table
))
1752 s
= ppgtt_find_shadow_page(vgpu
, ops
->get_pfn(e
));
1757 ppgtt_get_shadow_entry(s
, e
, index
);
1759 ppgtt_get_guest_entry(s
, e
, index
);
1764 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1765 * @mm: mm object. could be a PPGTT or GGTT mm object
1766 * @gma: graphics memory address in this mm object
1768 * This function is used to translate a graphics memory address in specific
1769 * graphics memory space to guest physical address.
1772 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1774 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm
*mm
, unsigned long gma
)
1776 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1777 struct intel_gvt
*gvt
= vgpu
->gvt
;
1778 struct intel_gvt_gtt_pte_ops
*pte_ops
= gvt
->gtt
.pte_ops
;
1779 struct intel_gvt_gtt_gma_ops
*gma_ops
= gvt
->gtt
.gma_ops
;
1780 unsigned long gpa
= INTEL_GVT_INVALID_ADDR
;
1781 unsigned long gma_index
[4];
1782 struct intel_gvt_gtt_entry e
;
1786 if (mm
->type
!= INTEL_GVT_MM_GGTT
&& mm
->type
!= INTEL_GVT_MM_PPGTT
)
1787 return INTEL_GVT_INVALID_ADDR
;
1789 if (mm
->type
== INTEL_GVT_MM_GGTT
) {
1790 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
1793 ret
= ggtt_get_guest_entry(mm
, &e
,
1794 gma_ops
->gma_to_ggtt_pte_index(gma
));
1797 gpa
= (pte_ops
->get_pfn(&e
) << I915_GTT_PAGE_SHIFT
)
1798 + (gma
& ~I915_GTT_PAGE_MASK
);
1800 trace_gma_translate(vgpu
->id
, "ggtt", 0, 0, gma
, gpa
);
1804 switch (mm
->page_table_level
) {
1806 ret
= ppgtt_get_shadow_root_entry(mm
, &e
, 0);
1809 gma_index
[0] = gma_ops
->gma_to_pml4_index(gma
);
1810 gma_index
[1] = gma_ops
->gma_to_l4_pdp_index(gma
);
1811 gma_index
[2] = gma_ops
->gma_to_pde_index(gma
);
1812 gma_index
[3] = gma_ops
->gma_to_pte_index(gma
);
1816 ret
= ppgtt_get_shadow_root_entry(mm
, &e
,
1817 gma_ops
->gma_to_l3_pdp_index(gma
));
1820 gma_index
[0] = gma_ops
->gma_to_pde_index(gma
);
1821 gma_index
[1] = gma_ops
->gma_to_pte_index(gma
);
1825 ret
= ppgtt_get_shadow_root_entry(mm
, &e
,
1826 gma_ops
->gma_to_pde_index(gma
));
1829 gma_index
[0] = gma_ops
->gma_to_pte_index(gma
);
1837 /* walk into the shadow page table and get gpa from guest entry */
1838 for (i
= 0; i
< index
; i
++) {
1839 ret
= ppgtt_get_next_level_entry(mm
, &e
, gma_index
[i
],
1844 if (!pte_ops
->test_present(&e
)) {
1845 gvt_dbg_core("GMA 0x%lx is not present\n", gma
);
1850 gpa
= (pte_ops
->get_pfn(&e
) << I915_GTT_PAGE_SHIFT
)
1851 + (gma
& ~I915_GTT_PAGE_MASK
);
1853 trace_gma_translate(vgpu
->id
, "ppgtt", 0,
1854 mm
->page_table_level
, gma
, gpa
);
1857 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm
->type
, gma
);
1858 return INTEL_GVT_INVALID_ADDR
;
1861 static int emulate_gtt_mmio_read(struct intel_vgpu
*vgpu
,
1862 unsigned int off
, void *p_data
, unsigned int bytes
)
1864 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
1865 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1866 unsigned long index
= off
>> info
->gtt_entry_size_shift
;
1867 struct intel_gvt_gtt_entry e
;
1869 if (bytes
!= 4 && bytes
!= 8)
1872 ggtt_get_guest_entry(ggtt_mm
, &e
, index
);
1873 memcpy(p_data
, (void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)),
1879 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1881 * @off: register offset
1882 * @p_data: data will be returned to guest
1883 * @bytes: data length
1885 * This function is used to emulate the GTT MMIO register read
1888 * Zero on success, error code if failed.
1890 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu
*vgpu
, unsigned int off
,
1891 void *p_data
, unsigned int bytes
)
1893 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1896 if (bytes
!= 4 && bytes
!= 8)
1899 off
-= info
->gtt_start_offset
;
1900 ret
= emulate_gtt_mmio_read(vgpu
, off
, p_data
, bytes
);
1904 static int emulate_gtt_mmio_write(struct intel_vgpu
*vgpu
, unsigned int off
,
1905 void *p_data
, unsigned int bytes
)
1907 struct intel_gvt
*gvt
= vgpu
->gvt
;
1908 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
1909 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
1910 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1911 unsigned long g_gtt_index
= off
>> info
->gtt_entry_size_shift
;
1912 unsigned long gma
, gfn
;
1913 struct intel_gvt_gtt_entry e
, m
;
1916 if (bytes
!= 4 && bytes
!= 8)
1919 gma
= g_gtt_index
<< I915_GTT_PAGE_SHIFT
;
1921 /* the VM may configure the whole GM space when ballooning is used */
1922 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
1925 ggtt_get_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
1927 memcpy((void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)), p_data
,
1930 if (ops
->test_present(&e
)) {
1931 gfn
= ops
->get_pfn(&e
);
1933 /* one PTE update may be issued in multiple writes and the
1934 * first write may not construct a valid gfn
1936 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu
, gfn
)) {
1937 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
1941 ret
= gtt_entry_p2m(vgpu
, &e
, &m
);
1943 gvt_vgpu_err("fail to translate guest gtt entry\n");
1944 /* guest driver may read/write the entry when partial
1945 * update the entry in this situation p2m will fail
1946 * settting the shadow entry to point to a scratch page
1948 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
1952 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
1956 ggtt_set_shadow_entry(ggtt_mm
, &m
, g_gtt_index
);
1957 gtt_invalidate(gvt
->dev_priv
);
1958 ggtt_set_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
1963 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1965 * @off: register offset
1966 * @p_data: data from guest write
1967 * @bytes: data length
1969 * This function is used to emulate the GTT MMIO register write
1972 * Zero on success, error code if failed.
1974 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu
*vgpu
, unsigned int off
,
1975 void *p_data
, unsigned int bytes
)
1977 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1980 if (bytes
!= 4 && bytes
!= 8)
1983 off
-= info
->gtt_start_offset
;
1984 ret
= emulate_gtt_mmio_write(vgpu
, off
, p_data
, bytes
);
1988 int intel_vgpu_write_protect_handler(struct intel_vgpu
*vgpu
, u64 pa
,
1989 void *p_data
, unsigned int bytes
)
1991 struct intel_gvt
*gvt
= vgpu
->gvt
;
1994 if (atomic_read(&vgpu
->gtt
.n_tracked_guest_page
)) {
1995 struct intel_vgpu_page_track
*t
;
1997 mutex_lock(&gvt
->lock
);
1999 t
= intel_vgpu_find_tracked_page(vgpu
, pa
>> PAGE_SHIFT
);
2001 if (unlikely(vgpu
->failsafe
)) {
2002 /* remove write protection to prevent furture traps */
2003 intel_vgpu_clean_page_track(vgpu
, t
);
2005 ret
= t
->handler(t
, pa
, p_data
, bytes
);
2007 gvt_err("guest page write error %d, "
2008 "gfn 0x%lx, pa 0x%llx, "
2009 "var 0x%x, len %d\n",
2011 *(u32
*)p_data
, bytes
);
2015 mutex_unlock(&gvt
->lock
);
2021 static int alloc_scratch_pages(struct intel_vgpu
*vgpu
,
2022 intel_gvt_gtt_type_t type
)
2024 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
2025 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2026 int page_entry_num
= I915_GTT_PAGE_SIZE
>>
2027 vgpu
->gvt
->device_info
.gtt_entry_size_shift
;
2030 struct device
*dev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
2033 if (WARN_ON(type
< GTT_TYPE_PPGTT_PTE_PT
|| type
>= GTT_TYPE_MAX
))
2036 scratch_pt
= (void *)get_zeroed_page(GFP_KERNEL
);
2038 gvt_vgpu_err("fail to allocate scratch page\n");
2042 daddr
= dma_map_page(dev
, virt_to_page(scratch_pt
), 0,
2043 4096, PCI_DMA_BIDIRECTIONAL
);
2044 if (dma_mapping_error(dev
, daddr
)) {
2045 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2046 __free_page(virt_to_page(scratch_pt
));
2049 gtt
->scratch_pt
[type
].page_mfn
=
2050 (unsigned long)(daddr
>> I915_GTT_PAGE_SHIFT
);
2051 gtt
->scratch_pt
[type
].page
= virt_to_page(scratch_pt
);
2052 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2053 vgpu
->id
, type
, gtt
->scratch_pt
[type
].page_mfn
);
2055 /* Build the tree by full filled the scratch pt with the entries which
2056 * point to the next level scratch pt or scratch page. The
2057 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2059 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2060 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2061 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2063 if (type
> GTT_TYPE_PPGTT_PTE_PT
&& type
< GTT_TYPE_MAX
) {
2064 struct intel_gvt_gtt_entry se
;
2066 memset(&se
, 0, sizeof(struct intel_gvt_gtt_entry
));
2067 se
.type
= get_entry_type(type
- 1);
2068 ops
->set_pfn(&se
, gtt
->scratch_pt
[type
- 1].page_mfn
);
2070 /* The entry parameters like present/writeable/cache type
2071 * set to the same as i915's scratch page tree.
2073 se
.val64
|= _PAGE_PRESENT
| _PAGE_RW
;
2074 if (type
== GTT_TYPE_PPGTT_PDE_PT
)
2075 se
.val64
|= PPAT_CACHED
;
2077 for (i
= 0; i
< page_entry_num
; i
++)
2078 ops
->set_entry(scratch_pt
, &se
, i
, false, 0, vgpu
);
2084 static int release_scratch_page_tree(struct intel_vgpu
*vgpu
)
2087 struct device
*dev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
2090 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2091 if (vgpu
->gtt
.scratch_pt
[i
].page
!= NULL
) {
2092 daddr
= (dma_addr_t
)(vgpu
->gtt
.scratch_pt
[i
].page_mfn
<<
2093 I915_GTT_PAGE_SHIFT
);
2094 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2095 __free_page(vgpu
->gtt
.scratch_pt
[i
].page
);
2096 vgpu
->gtt
.scratch_pt
[i
].page
= NULL
;
2097 vgpu
->gtt
.scratch_pt
[i
].page_mfn
= 0;
2104 static int create_scratch_page_tree(struct intel_vgpu
*vgpu
)
2108 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2109 ret
= alloc_scratch_pages(vgpu
, i
);
2117 release_scratch_page_tree(vgpu
);
2122 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2125 * This function is used to initialize per-vGPU graphics memory virtualization
2129 * Zero on success, error code if failed.
2131 int intel_vgpu_init_gtt(struct intel_vgpu
*vgpu
)
2133 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
2134 struct intel_vgpu_mm
*ggtt_mm
;
2136 hash_init(gtt
->tracked_guest_page_hash_table
);
2137 hash_init(gtt
->shadow_page_hash_table
);
2139 INIT_LIST_HEAD(>t
->mm_list_head
);
2140 INIT_LIST_HEAD(>t
->oos_page_list_head
);
2141 INIT_LIST_HEAD(>t
->post_shadow_list_head
);
2143 intel_vgpu_reset_ggtt(vgpu
);
2145 ggtt_mm
= intel_vgpu_create_mm(vgpu
, INTEL_GVT_MM_GGTT
,
2147 if (IS_ERR(ggtt_mm
)) {
2148 gvt_vgpu_err("fail to create mm for ggtt.\n");
2149 return PTR_ERR(ggtt_mm
);
2152 gtt
->ggtt_mm
= ggtt_mm
;
2154 return create_scratch_page_tree(vgpu
);
2157 static void intel_vgpu_free_mm(struct intel_vgpu
*vgpu
, int type
)
2159 struct list_head
*pos
, *n
;
2160 struct intel_vgpu_mm
*mm
;
2162 list_for_each_safe(pos
, n
, &vgpu
->gtt
.mm_list_head
) {
2163 mm
= container_of(pos
, struct intel_vgpu_mm
, list
);
2164 if (mm
->type
== type
) {
2165 vgpu
->gvt
->gtt
.mm_free_page_table(mm
);
2166 list_del(&mm
->list
);
2167 list_del(&mm
->lru_list
);
2174 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2177 * This function is used to clean up per-vGPU graphics memory virtualization
2181 * Zero on success, error code if failed.
2183 void intel_vgpu_clean_gtt(struct intel_vgpu
*vgpu
)
2185 ppgtt_free_all_shadow_page(vgpu
);
2186 release_scratch_page_tree(vgpu
);
2188 intel_vgpu_free_mm(vgpu
, INTEL_GVT_MM_PPGTT
);
2189 intel_vgpu_free_mm(vgpu
, INTEL_GVT_MM_GGTT
);
2192 static void clean_spt_oos(struct intel_gvt
*gvt
)
2194 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2195 struct list_head
*pos
, *n
;
2196 struct intel_vgpu_oos_page
*oos_page
;
2198 WARN(!list_empty(>t
->oos_page_use_list_head
),
2199 "someone is still using oos page\n");
2201 list_for_each_safe(pos
, n
, >t
->oos_page_free_list_head
) {
2202 oos_page
= container_of(pos
, struct intel_vgpu_oos_page
, list
);
2203 list_del(&oos_page
->list
);
2208 static int setup_spt_oos(struct intel_gvt
*gvt
)
2210 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2211 struct intel_vgpu_oos_page
*oos_page
;
2215 INIT_LIST_HEAD(>t
->oos_page_free_list_head
);
2216 INIT_LIST_HEAD(>t
->oos_page_use_list_head
);
2218 for (i
= 0; i
< preallocated_oos_pages
; i
++) {
2219 oos_page
= kzalloc(sizeof(*oos_page
), GFP_KERNEL
);
2225 INIT_LIST_HEAD(&oos_page
->list
);
2226 INIT_LIST_HEAD(&oos_page
->vm_list
);
2228 list_add_tail(&oos_page
->list
, >t
->oos_page_free_list_head
);
2231 gvt_dbg_mm("%d oos pages preallocated\n", i
);
2240 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2242 * @page_table_level: PPGTT page table level
2243 * @root_entry: PPGTT page table root pointers
2245 * This function is used to find a PPGTT mm object from mm object pool
2248 * pointer to mm object on success, NULL if failed.
2250 struct intel_vgpu_mm
*intel_vgpu_find_ppgtt_mm(struct intel_vgpu
*vgpu
,
2251 int page_table_level
, void *root_entry
)
2253 struct list_head
*pos
;
2254 struct intel_vgpu_mm
*mm
;
2257 list_for_each(pos
, &vgpu
->gtt
.mm_list_head
) {
2258 mm
= container_of(pos
, struct intel_vgpu_mm
, list
);
2259 if (mm
->type
!= INTEL_GVT_MM_PPGTT
)
2262 if (mm
->page_table_level
!= page_table_level
)
2266 dst
= mm
->virtual_page_table
;
2268 if (page_table_level
== 3) {
2269 if (src
[0] == dst
[0]
2272 && src
[3] == dst
[3])
2275 if (src
[0] == dst
[0])
2283 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2286 * @page_table_level: PPGTT page table level
2288 * This function is used to create a PPGTT mm object from a guest to GVT-g
2292 * Zero on success, negative error code if failed.
2294 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu
*vgpu
,
2295 int page_table_level
)
2297 u64
*pdp
= (u64
*)&vgpu_vreg64_t(vgpu
, vgtif_reg(pdp
[0]));
2298 struct intel_vgpu_mm
*mm
;
2300 if (WARN_ON((page_table_level
!= 4) && (page_table_level
!= 3)))
2303 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, page_table_level
, pdp
);
2305 intel_gvt_mm_reference(mm
);
2307 mm
= intel_vgpu_create_mm(vgpu
, INTEL_GVT_MM_PPGTT
,
2308 pdp
, page_table_level
, 0);
2310 gvt_vgpu_err("fail to create mm\n");
2318 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2321 * @page_table_level: PPGTT page table level
2323 * This function is used to create a PPGTT mm object from a guest to GVT-g
2327 * Zero on success, negative error code if failed.
2329 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu
*vgpu
,
2330 int page_table_level
)
2332 u64
*pdp
= (u64
*)&vgpu_vreg64_t(vgpu
, vgtif_reg(pdp
[0]));
2333 struct intel_vgpu_mm
*mm
;
2335 if (WARN_ON((page_table_level
!= 4) && (page_table_level
!= 3)))
2338 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, page_table_level
, pdp
);
2340 gvt_vgpu_err("fail to find ppgtt instance.\n");
2343 intel_gvt_mm_unreference(mm
);
2348 * intel_gvt_init_gtt - initialize mm components of a GVT device
2351 * This function is called at the initialization stage, to initialize
2352 * the mm components of a GVT device.
2355 * zero on success, negative error code if failed.
2357 int intel_gvt_init_gtt(struct intel_gvt
*gvt
)
2361 struct device
*dev
= &gvt
->dev_priv
->drm
.pdev
->dev
;
2364 gvt_dbg_core("init gtt\n");
2366 if (IS_BROADWELL(gvt
->dev_priv
) || IS_SKYLAKE(gvt
->dev_priv
)
2367 || IS_KABYLAKE(gvt
->dev_priv
)) {
2368 gvt
->gtt
.pte_ops
= &gen8_gtt_pte_ops
;
2369 gvt
->gtt
.gma_ops
= &gen8_gtt_gma_ops
;
2370 gvt
->gtt
.mm_alloc_page_table
= gen8_mm_alloc_page_table
;
2371 gvt
->gtt
.mm_free_page_table
= gen8_mm_free_page_table
;
2376 page
= (void *)get_zeroed_page(GFP_KERNEL
);
2378 gvt_err("fail to allocate scratch ggtt page\n");
2382 daddr
= dma_map_page(dev
, virt_to_page(page
), 0,
2383 4096, PCI_DMA_BIDIRECTIONAL
);
2384 if (dma_mapping_error(dev
, daddr
)) {
2385 gvt_err("fail to dmamap scratch ggtt page\n");
2386 __free_page(virt_to_page(page
));
2390 gvt
->gtt
.scratch_page
= virt_to_page(page
);
2391 gvt
->gtt
.scratch_mfn
= (unsigned long)(daddr
>> I915_GTT_PAGE_SHIFT
);
2393 if (enable_out_of_sync
) {
2394 ret
= setup_spt_oos(gvt
);
2396 gvt_err("fail to initialize SPT oos\n");
2397 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2398 __free_page(gvt
->gtt
.scratch_page
);
2402 INIT_LIST_HEAD(&gvt
->gtt
.mm_lru_list_head
);
2407 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2410 * This function is called at the driver unloading stage, to clean up the
2411 * the mm components of a GVT device.
2414 void intel_gvt_clean_gtt(struct intel_gvt
*gvt
)
2416 struct device
*dev
= &gvt
->dev_priv
->drm
.pdev
->dev
;
2417 dma_addr_t daddr
= (dma_addr_t
)(gvt
->gtt
.scratch_mfn
<<
2418 I915_GTT_PAGE_SHIFT
);
2420 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2422 __free_page(gvt
->gtt
.scratch_page
);
2424 if (enable_out_of_sync
)
2429 * intel_vgpu_reset_ggtt - reset the GGTT entry
2432 * This function is called at the vGPU create stage
2433 * to reset all the GGTT entries.
2436 void intel_vgpu_reset_ggtt(struct intel_vgpu
*vgpu
)
2438 struct intel_gvt
*gvt
= vgpu
->gvt
;
2439 struct drm_i915_private
*dev_priv
= gvt
->dev_priv
;
2440 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2444 struct intel_gvt_gtt_entry e
;
2446 memset(&e
, 0, sizeof(struct intel_gvt_gtt_entry
));
2447 e
.type
= GTT_TYPE_GGTT_PTE
;
2448 ops
->set_pfn(&e
, gvt
->gtt
.scratch_mfn
);
2449 e
.val64
|= _PAGE_PRESENT
;
2451 index
= vgpu_aperture_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2452 num_entries
= vgpu_aperture_sz(vgpu
) >> PAGE_SHIFT
;
2453 for (offset
= 0; offset
< num_entries
; offset
++)
2454 ops
->set_entry(NULL
, &e
, index
+ offset
, false, 0, vgpu
);
2456 index
= vgpu_hidden_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2457 num_entries
= vgpu_hidden_sz(vgpu
) >> PAGE_SHIFT
;
2458 for (offset
= 0; offset
< num_entries
; offset
++)
2459 ops
->set_entry(NULL
, &e
, index
+ offset
, false, 0, vgpu
);
2461 gtt_invalidate(dev_priv
);
2465 * intel_vgpu_reset_gtt - reset the all GTT related status
2468 * This function is called from vfio core to reset reset all
2469 * GTT related status, including GGTT, PPGTT, scratch page.
2472 void intel_vgpu_reset_gtt(struct intel_vgpu
*vgpu
)
2474 ppgtt_free_all_shadow_page(vgpu
);
2476 /* Shadow pages are only created when there is no page
2477 * table tracking data, so remove page tracking data after
2478 * removing the shadow pages.
2480 intel_vgpu_free_mm(vgpu
, INTEL_GVT_MM_PPGTT
);
2482 intel_vgpu_reset_ggtt(vgpu
);