4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 #if defined(VERBOSE_DEBUG)
42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
44 #define gvt_vdbg_mm(fmt, args...)
47 static bool enable_out_of_sync
= false;
48 static int preallocated_oos_pages
= 8192;
51 * validate a gm address and related range size,
52 * translate it to host gm address
54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu
*vgpu
, u64 addr
, u32 size
)
56 if ((!vgpu_gmadr_is_valid(vgpu
, addr
)) || (size
57 && !vgpu_gmadr_is_valid(vgpu
, addr
+ size
- 1))) {
58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
65 /* translate a guest gmadr to host gmadr */
66 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu
*vgpu
, u64 g_addr
, u64
*h_addr
)
68 if (WARN(!vgpu_gmadr_is_valid(vgpu
, g_addr
),
69 "invalid guest gmadr %llx\n", g_addr
))
72 if (vgpu_gmadr_is_aperture(vgpu
, g_addr
))
73 *h_addr
= vgpu_aperture_gmadr_base(vgpu
)
74 + (g_addr
- vgpu_aperture_offset(vgpu
));
76 *h_addr
= vgpu_hidden_gmadr_base(vgpu
)
77 + (g_addr
- vgpu_hidden_offset(vgpu
));
81 /* translate a host gmadr to guest gmadr */
82 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu
*vgpu
, u64 h_addr
, u64
*g_addr
)
84 if (WARN(!gvt_gmadr_is_valid(vgpu
->gvt
, h_addr
),
85 "invalid host gmadr %llx\n", h_addr
))
88 if (gvt_gmadr_is_aperture(vgpu
->gvt
, h_addr
))
89 *g_addr
= vgpu_aperture_gmadr_base(vgpu
)
90 + (h_addr
- gvt_aperture_gmadr_base(vgpu
->gvt
));
92 *g_addr
= vgpu_hidden_gmadr_base(vgpu
)
93 + (h_addr
- gvt_hidden_gmadr_base(vgpu
->gvt
));
97 int intel_gvt_ggtt_index_g2h(struct intel_vgpu
*vgpu
, unsigned long g_index
,
98 unsigned long *h_index
)
103 ret
= intel_gvt_ggtt_gmadr_g2h(vgpu
, g_index
<< I915_GTT_PAGE_SHIFT
,
108 *h_index
= h_addr
>> I915_GTT_PAGE_SHIFT
;
112 int intel_gvt_ggtt_h2g_index(struct intel_vgpu
*vgpu
, unsigned long h_index
,
113 unsigned long *g_index
)
118 ret
= intel_gvt_ggtt_gmadr_h2g(vgpu
, h_index
<< I915_GTT_PAGE_SHIFT
,
123 *g_index
= g_addr
>> I915_GTT_PAGE_SHIFT
;
127 #define gtt_type_is_entry(type) \
128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
132 #define gtt_type_is_pt(type) \
133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
135 #define gtt_type_is_pte_pt(type) \
136 (type == GTT_TYPE_PPGTT_PTE_PT)
138 #define gtt_type_is_root_pointer(type) \
139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
141 #define gtt_init_entry(e, t, p, v) do { \
144 memcpy(&(e)->val64, &v, sizeof(v)); \
148 * Mappings between GTT_TYPE* enumerations.
149 * Following information can be found according to the given type:
150 * - type of next level page table
151 * - type of entry inside this level page table
152 * - type of entry with PSE set
154 * If the given type doesn't have such a kind of information,
155 * e.g. give a l4 root entry type, then request to get its PSE type,
156 * give a PTE page table type, then request to get its next level page
157 * table type, as we know l4 root entry doesn't have a PSE bit,
158 * and a PTE page table doesn't have a next level page table type,
159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
163 struct gtt_type_table_entry
{
170 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
172 .entry_type = e_type, \
173 .pt_type = cpt_type, \
174 .next_pt_type = npt_type, \
175 .pse_entry_type = pse_type, \
178 static struct gtt_type_table_entry gtt_type_table
[] = {
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
182 GTT_TYPE_PPGTT_PML4_PT
,
184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT
,
185 GTT_TYPE_PPGTT_PML4_ENTRY
,
186 GTT_TYPE_PPGTT_PML4_PT
,
187 GTT_TYPE_PPGTT_PDP_PT
,
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY
,
190 GTT_TYPE_PPGTT_PML4_ENTRY
,
191 GTT_TYPE_PPGTT_PML4_PT
,
192 GTT_TYPE_PPGTT_PDP_PT
,
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT
,
195 GTT_TYPE_PPGTT_PDP_ENTRY
,
196 GTT_TYPE_PPGTT_PDP_PT
,
197 GTT_TYPE_PPGTT_PDE_PT
,
198 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
202 GTT_TYPE_PPGTT_PDE_PT
,
203 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY
,
205 GTT_TYPE_PPGTT_PDP_ENTRY
,
206 GTT_TYPE_PPGTT_PDP_PT
,
207 GTT_TYPE_PPGTT_PDE_PT
,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT
,
210 GTT_TYPE_PPGTT_PDE_ENTRY
,
211 GTT_TYPE_PPGTT_PDE_PT
,
212 GTT_TYPE_PPGTT_PTE_PT
,
213 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY
,
215 GTT_TYPE_PPGTT_PDE_ENTRY
,
216 GTT_TYPE_PPGTT_PDE_PT
,
217 GTT_TYPE_PPGTT_PTE_PT
,
218 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
219 /* We take IPS bit as 'PSE' for PTE level. */
220 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT
,
221 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
222 GTT_TYPE_PPGTT_PTE_PT
,
224 GTT_TYPE_PPGTT_PTE_64K_ENTRY
),
225 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
226 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
227 GTT_TYPE_PPGTT_PTE_PT
,
229 GTT_TYPE_PPGTT_PTE_64K_ENTRY
),
230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY
,
231 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
232 GTT_TYPE_PPGTT_PTE_PT
,
234 GTT_TYPE_PPGTT_PTE_64K_ENTRY
),
235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY
,
236 GTT_TYPE_PPGTT_PDE_ENTRY
,
237 GTT_TYPE_PPGTT_PDE_PT
,
239 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY
,
241 GTT_TYPE_PPGTT_PDP_ENTRY
,
242 GTT_TYPE_PPGTT_PDP_PT
,
244 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE
,
252 static inline int get_next_pt_type(int type
)
254 return gtt_type_table
[type
].next_pt_type
;
257 static inline int get_pt_type(int type
)
259 return gtt_type_table
[type
].pt_type
;
262 static inline int get_entry_type(int type
)
264 return gtt_type_table
[type
].entry_type
;
267 static inline int get_pse_type(int type
)
269 return gtt_type_table
[type
].pse_entry_type
;
272 static u64
read_pte64(struct drm_i915_private
*dev_priv
, unsigned long index
)
274 void __iomem
*addr
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+ index
;
279 static void ggtt_invalidate(struct drm_i915_private
*dev_priv
)
281 mmio_hw_access_pre(dev_priv
);
282 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
283 mmio_hw_access_post(dev_priv
);
286 static void write_pte64(struct drm_i915_private
*dev_priv
,
287 unsigned long index
, u64 pte
)
289 void __iomem
*addr
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+ index
;
294 static inline int gtt_get_entry64(void *pt
,
295 struct intel_gvt_gtt_entry
*e
,
296 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
297 struct intel_vgpu
*vgpu
)
299 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
302 if (WARN_ON(info
->gtt_entry_size
!= 8))
305 if (hypervisor_access
) {
306 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, gpa
+
307 (index
<< info
->gtt_entry_size_shift
),
312 e
->val64
= read_pte64(vgpu
->gvt
->dev_priv
, index
);
314 e
->val64
= *((u64
*)pt
+ index
);
319 static inline int gtt_set_entry64(void *pt
,
320 struct intel_gvt_gtt_entry
*e
,
321 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
322 struct intel_vgpu
*vgpu
)
324 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
327 if (WARN_ON(info
->gtt_entry_size
!= 8))
330 if (hypervisor_access
) {
331 ret
= intel_gvt_hypervisor_write_gpa(vgpu
, gpa
+
332 (index
<< info
->gtt_entry_size_shift
),
337 write_pte64(vgpu
->gvt
->dev_priv
, index
, e
->val64
);
339 *((u64
*)pt
+ index
) = e
->val64
;
346 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
347 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
348 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
349 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
351 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
352 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
354 #define GTT_64K_PTE_STRIDE 16
356 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry
*e
)
360 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
)
361 pfn
= (e
->val64
& ADDR_1G_MASK
) >> PAGE_SHIFT
;
362 else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
)
363 pfn
= (e
->val64
& ADDR_2M_MASK
) >> PAGE_SHIFT
;
364 else if (e
->type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
)
365 pfn
= (e
->val64
& ADDR_64K_MASK
) >> PAGE_SHIFT
;
367 pfn
= (e
->val64
& ADDR_4K_MASK
) >> PAGE_SHIFT
;
371 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry
*e
, unsigned long pfn
)
373 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
) {
374 e
->val64
&= ~ADDR_1G_MASK
;
375 pfn
&= (ADDR_1G_MASK
>> PAGE_SHIFT
);
376 } else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
) {
377 e
->val64
&= ~ADDR_2M_MASK
;
378 pfn
&= (ADDR_2M_MASK
>> PAGE_SHIFT
);
379 } else if (e
->type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
) {
380 e
->val64
&= ~ADDR_64K_MASK
;
381 pfn
&= (ADDR_64K_MASK
>> PAGE_SHIFT
);
383 e
->val64
&= ~ADDR_4K_MASK
;
384 pfn
&= (ADDR_4K_MASK
>> PAGE_SHIFT
);
387 e
->val64
|= (pfn
<< PAGE_SHIFT
);
390 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry
*e
)
392 return !!(e
->val64
& _PAGE_PSE
);
395 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry
*e
)
397 if (gen8_gtt_test_pse(e
)) {
399 case GTT_TYPE_PPGTT_PTE_2M_ENTRY
:
400 e
->val64
&= ~_PAGE_PSE
;
401 e
->type
= GTT_TYPE_PPGTT_PDE_ENTRY
;
403 case GTT_TYPE_PPGTT_PTE_1G_ENTRY
:
404 e
->type
= GTT_TYPE_PPGTT_PDP_ENTRY
;
405 e
->val64
&= ~_PAGE_PSE
;
413 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry
*e
)
415 if (GEM_WARN_ON(e
->type
!= GTT_TYPE_PPGTT_PDE_ENTRY
))
418 return !!(e
->val64
& GEN8_PDE_IPS_64K
);
421 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry
*e
)
423 if (GEM_WARN_ON(e
->type
!= GTT_TYPE_PPGTT_PDE_ENTRY
))
426 e
->val64
&= ~GEN8_PDE_IPS_64K
;
429 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry
*e
)
432 * i915 writes PDP root pointer registers without present bit,
433 * it also works, so we need to treat root pointer entry
436 if (e
->type
== GTT_TYPE_PPGTT_ROOT_L3_ENTRY
437 || e
->type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
)
438 return (e
->val64
!= 0);
440 return (e
->val64
& _PAGE_PRESENT
);
443 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry
*e
)
445 e
->val64
&= ~_PAGE_PRESENT
;
448 static void gtt_entry_set_present(struct intel_gvt_gtt_entry
*e
)
450 e
->val64
|= _PAGE_PRESENT
;
453 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry
*e
)
455 return !!(e
->val64
& GTT_SPTE_FLAG_64K_SPLITED
);
458 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry
*e
)
460 e
->val64
|= GTT_SPTE_FLAG_64K_SPLITED
;
463 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry
*e
)
465 e
->val64
&= ~GTT_SPTE_FLAG_64K_SPLITED
;
469 * Per-platform GMA routines.
471 static unsigned long gma_to_ggtt_pte_index(unsigned long gma
)
473 unsigned long x
= (gma
>> I915_GTT_PAGE_SHIFT
);
475 trace_gma_index(__func__
, gma
, x
);
479 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
480 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
482 unsigned long x = (exp); \
483 trace_gma_index(__func__, gma, x); \
487 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pte
, (gma
>> 12 & 0x1ff));
488 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pde
, (gma
>> 21 & 0x1ff));
489 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l3_pdp
, (gma
>> 30 & 0x3));
490 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l4_pdp
, (gma
>> 30 & 0x1ff));
491 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pml4
, (gma
>> 39 & 0x1ff));
493 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops
= {
494 .get_entry
= gtt_get_entry64
,
495 .set_entry
= gtt_set_entry64
,
496 .clear_present
= gtt_entry_clear_present
,
497 .set_present
= gtt_entry_set_present
,
498 .test_present
= gen8_gtt_test_present
,
499 .test_pse
= gen8_gtt_test_pse
,
500 .clear_pse
= gen8_gtt_clear_pse
,
501 .clear_ips
= gen8_gtt_clear_ips
,
502 .test_ips
= gen8_gtt_test_ips
,
503 .clear_64k_splited
= gen8_gtt_clear_64k_splited
,
504 .set_64k_splited
= gen8_gtt_set_64k_splited
,
505 .test_64k_splited
= gen8_gtt_test_64k_splited
,
506 .get_pfn
= gen8_gtt_get_pfn
,
507 .set_pfn
= gen8_gtt_set_pfn
,
510 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops
= {
511 .gma_to_ggtt_pte_index
= gma_to_ggtt_pte_index
,
512 .gma_to_pte_index
= gen8_gma_to_pte_index
,
513 .gma_to_pde_index
= gen8_gma_to_pde_index
,
514 .gma_to_l3_pdp_index
= gen8_gma_to_l3_pdp_index
,
515 .gma_to_l4_pdp_index
= gen8_gma_to_l4_pdp_index
,
516 .gma_to_pml4_index
= gen8_gma_to_pml4_index
,
519 /* Update entry type per pse and ips bit. */
520 static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops
*pte_ops
,
521 struct intel_gvt_gtt_entry
*entry
, bool ips
)
523 switch (entry
->type
) {
524 case GTT_TYPE_PPGTT_PDE_ENTRY
:
525 case GTT_TYPE_PPGTT_PDP_ENTRY
:
526 if (pte_ops
->test_pse(entry
))
527 entry
->type
= get_pse_type(entry
->type
);
529 case GTT_TYPE_PPGTT_PTE_4K_ENTRY
:
531 entry
->type
= get_pse_type(entry
->type
);
534 GEM_BUG_ON(!gtt_type_is_entry(entry
->type
));
537 GEM_BUG_ON(entry
->type
== GTT_TYPE_INVALID
);
543 static void _ppgtt_get_root_entry(struct intel_vgpu_mm
*mm
,
544 struct intel_gvt_gtt_entry
*entry
, unsigned long index
,
547 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
549 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_PPGTT
);
551 entry
->type
= mm
->ppgtt_mm
.root_entry_type
;
552 pte_ops
->get_entry(guest
? mm
->ppgtt_mm
.guest_pdps
:
553 mm
->ppgtt_mm
.shadow_pdps
,
554 entry
, index
, false, 0, mm
->vgpu
);
555 update_entry_type_for_real(pte_ops
, entry
, false);
558 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm
*mm
,
559 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
561 _ppgtt_get_root_entry(mm
, entry
, index
, true);
564 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm
*mm
,
565 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
567 _ppgtt_get_root_entry(mm
, entry
, index
, false);
570 static void _ppgtt_set_root_entry(struct intel_vgpu_mm
*mm
,
571 struct intel_gvt_gtt_entry
*entry
, unsigned long index
,
574 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
576 pte_ops
->set_entry(guest
? mm
->ppgtt_mm
.guest_pdps
:
577 mm
->ppgtt_mm
.shadow_pdps
,
578 entry
, index
, false, 0, mm
->vgpu
);
581 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm
*mm
,
582 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
584 _ppgtt_set_root_entry(mm
, entry
, index
, true);
587 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm
*mm
,
588 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
590 _ppgtt_set_root_entry(mm
, entry
, index
, false);
593 static void ggtt_get_guest_entry(struct intel_vgpu_mm
*mm
,
594 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
596 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
598 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
600 entry
->type
= GTT_TYPE_GGTT_PTE
;
601 pte_ops
->get_entry(mm
->ggtt_mm
.virtual_ggtt
, entry
, index
,
605 static void ggtt_set_guest_entry(struct intel_vgpu_mm
*mm
,
606 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
608 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
610 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
612 pte_ops
->set_entry(mm
->ggtt_mm
.virtual_ggtt
, entry
, index
,
616 static void ggtt_get_host_entry(struct intel_vgpu_mm
*mm
,
617 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
619 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
621 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
623 pte_ops
->get_entry(NULL
, entry
, index
, false, 0, mm
->vgpu
);
626 static void ggtt_set_host_entry(struct intel_vgpu_mm
*mm
,
627 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
629 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
631 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
633 pte_ops
->set_entry(NULL
, entry
, index
, false, 0, mm
->vgpu
);
637 * PPGTT shadow page table helpers.
639 static inline int ppgtt_spt_get_entry(
640 struct intel_vgpu_ppgtt_spt
*spt
,
641 void *page_table
, int type
,
642 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
645 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
646 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
649 e
->type
= get_entry_type(type
);
651 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
654 ret
= ops
->get_entry(page_table
, e
, index
, guest
,
655 spt
->guest_page
.gfn
<< I915_GTT_PAGE_SHIFT
,
660 update_entry_type_for_real(ops
, e
, guest
?
661 spt
->guest_page
.pde_ips
: false);
663 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
664 type
, e
->type
, index
, e
->val64
);
668 static inline int ppgtt_spt_set_entry(
669 struct intel_vgpu_ppgtt_spt
*spt
,
670 void *page_table
, int type
,
671 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
674 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
675 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
677 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
680 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
681 type
, e
->type
, index
, e
->val64
);
683 return ops
->set_entry(page_table
, e
, index
, guest
,
684 spt
->guest_page
.gfn
<< I915_GTT_PAGE_SHIFT
,
688 #define ppgtt_get_guest_entry(spt, e, index) \
689 ppgtt_spt_get_entry(spt, NULL, \
690 spt->guest_page.type, e, index, true)
692 #define ppgtt_set_guest_entry(spt, e, index) \
693 ppgtt_spt_set_entry(spt, NULL, \
694 spt->guest_page.type, e, index, true)
696 #define ppgtt_get_shadow_entry(spt, e, index) \
697 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
698 spt->shadow_page.type, e, index, false)
700 #define ppgtt_set_shadow_entry(spt, e, index) \
701 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
702 spt->shadow_page.type, e, index, false)
704 static void *alloc_spt(gfp_t gfp_mask
)
706 struct intel_vgpu_ppgtt_spt
*spt
;
708 spt
= kzalloc(sizeof(*spt
), gfp_mask
);
712 spt
->shadow_page
.page
= alloc_page(gfp_mask
);
713 if (!spt
->shadow_page
.page
) {
720 static void free_spt(struct intel_vgpu_ppgtt_spt
*spt
)
722 __free_page(spt
->shadow_page
.page
);
726 static int detach_oos_page(struct intel_vgpu
*vgpu
,
727 struct intel_vgpu_oos_page
*oos_page
);
729 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt
*spt
)
731 struct device
*kdev
= &spt
->vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
733 trace_spt_free(spt
->vgpu
->id
, spt
, spt
->guest_page
.type
);
735 dma_unmap_page(kdev
, spt
->shadow_page
.mfn
<< I915_GTT_PAGE_SHIFT
, 4096,
736 PCI_DMA_BIDIRECTIONAL
);
738 radix_tree_delete(&spt
->vgpu
->gtt
.spt_tree
, spt
->shadow_page
.mfn
);
740 if (spt
->guest_page
.gfn
) {
741 if (spt
->guest_page
.oos_page
)
742 detach_oos_page(spt
->vgpu
, spt
->guest_page
.oos_page
);
744 intel_vgpu_unregister_page_track(spt
->vgpu
, spt
->guest_page
.gfn
);
747 list_del_init(&spt
->post_shadow_list
);
751 static void ppgtt_free_all_spt(struct intel_vgpu
*vgpu
)
753 struct intel_vgpu_ppgtt_spt
*spt
;
754 struct radix_tree_iter iter
;
757 radix_tree_for_each_slot(slot
, &vgpu
->gtt
.spt_tree
, &iter
, 0) {
758 spt
= radix_tree_deref_slot(slot
);
763 static int ppgtt_handle_guest_write_page_table_bytes(
764 struct intel_vgpu_ppgtt_spt
*spt
,
765 u64 pa
, void *p_data
, int bytes
);
767 static int ppgtt_write_protection_handler(
768 struct intel_vgpu_page_track
*page_track
,
769 u64 gpa
, void *data
, int bytes
)
771 struct intel_vgpu_ppgtt_spt
*spt
= page_track
->priv_data
;
775 if (bytes
!= 4 && bytes
!= 8)
778 ret
= ppgtt_handle_guest_write_page_table_bytes(spt
, gpa
, data
, bytes
);
784 /* Find a spt by guest gfn. */
785 static struct intel_vgpu_ppgtt_spt
*intel_vgpu_find_spt_by_gfn(
786 struct intel_vgpu
*vgpu
, unsigned long gfn
)
788 struct intel_vgpu_page_track
*track
;
790 track
= intel_vgpu_find_page_track(vgpu
, gfn
);
791 if (track
&& track
->handler
== ppgtt_write_protection_handler
)
792 return track
->priv_data
;
797 /* Find the spt by shadow page mfn. */
798 static inline struct intel_vgpu_ppgtt_spt
*intel_vgpu_find_spt_by_mfn(
799 struct intel_vgpu
*vgpu
, unsigned long mfn
)
801 return radix_tree_lookup(&vgpu
->gtt
.spt_tree
, mfn
);
804 static int reclaim_one_ppgtt_mm(struct intel_gvt
*gvt
);
806 /* Allocate shadow page table without guest page. */
807 static struct intel_vgpu_ppgtt_spt
*ppgtt_alloc_spt(
808 struct intel_vgpu
*vgpu
, intel_gvt_gtt_type_t type
)
810 struct device
*kdev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
811 struct intel_vgpu_ppgtt_spt
*spt
= NULL
;
816 spt
= alloc_spt(GFP_KERNEL
| __GFP_ZERO
);
818 if (reclaim_one_ppgtt_mm(vgpu
->gvt
))
821 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
822 return ERR_PTR(-ENOMEM
);
826 atomic_set(&spt
->refcount
, 1);
827 INIT_LIST_HEAD(&spt
->post_shadow_list
);
832 spt
->shadow_page
.type
= type
;
833 daddr
= dma_map_page(kdev
, spt
->shadow_page
.page
,
834 0, 4096, PCI_DMA_BIDIRECTIONAL
);
835 if (dma_mapping_error(kdev
, daddr
)) {
836 gvt_vgpu_err("fail to map dma addr\n");
840 spt
->shadow_page
.vaddr
= page_address(spt
->shadow_page
.page
);
841 spt
->shadow_page
.mfn
= daddr
>> I915_GTT_PAGE_SHIFT
;
843 ret
= radix_tree_insert(&vgpu
->gtt
.spt_tree
, spt
->shadow_page
.mfn
, spt
);
850 dma_unmap_page(kdev
, daddr
, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
856 /* Allocate shadow page table associated with specific gfn. */
857 static struct intel_vgpu_ppgtt_spt
*ppgtt_alloc_spt_gfn(
858 struct intel_vgpu
*vgpu
, intel_gvt_gtt_type_t type
,
859 unsigned long gfn
, bool guest_pde_ips
)
861 struct intel_vgpu_ppgtt_spt
*spt
;
864 spt
= ppgtt_alloc_spt(vgpu
, type
);
871 ret
= intel_vgpu_register_page_track(vgpu
, gfn
,
872 ppgtt_write_protection_handler
, spt
);
878 spt
->guest_page
.type
= type
;
879 spt
->guest_page
.gfn
= gfn
;
880 spt
->guest_page
.pde_ips
= guest_pde_ips
;
882 trace_spt_alloc(vgpu
->id
, spt
, type
, spt
->shadow_page
.mfn
, gfn
);
887 #define pt_entry_size_shift(spt) \
888 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
890 #define pt_entries(spt) \
891 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
893 #define for_each_present_guest_entry(spt, e, i) \
894 for (i = 0; i < pt_entries(spt); \
895 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
896 if (!ppgtt_get_guest_entry(spt, e, i) && \
897 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
899 #define for_each_present_shadow_entry(spt, e, i) \
900 for (i = 0; i < pt_entries(spt); \
901 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
902 if (!ppgtt_get_shadow_entry(spt, e, i) && \
903 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
905 #define for_each_shadow_entry(spt, e, i) \
906 for (i = 0; i < pt_entries(spt); \
907 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
908 if (!ppgtt_get_shadow_entry(spt, e, i))
910 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt
*spt
)
912 int v
= atomic_read(&spt
->refcount
);
914 trace_spt_refcount(spt
->vgpu
->id
, "inc", spt
, v
, (v
+ 1));
915 atomic_inc(&spt
->refcount
);
918 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt
*spt
)
920 int v
= atomic_read(&spt
->refcount
);
922 trace_spt_refcount(spt
->vgpu
->id
, "dec", spt
, v
, (v
- 1));
923 return atomic_dec_return(&spt
->refcount
);
926 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt
*spt
);
928 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu
*vgpu
,
929 struct intel_gvt_gtt_entry
*e
)
931 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
932 struct intel_vgpu_ppgtt_spt
*s
;
933 intel_gvt_gtt_type_t cur_pt_type
;
935 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e
->type
)));
937 if (e
->type
!= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
938 && e
->type
!= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
939 cur_pt_type
= get_next_pt_type(e
->type
) + 1;
940 if (ops
->get_pfn(e
) ==
941 vgpu
->gtt
.scratch_pt
[cur_pt_type
].page_mfn
)
944 s
= intel_vgpu_find_spt_by_mfn(vgpu
, ops
->get_pfn(e
));
946 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
950 return ppgtt_invalidate_spt(s
);
953 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt
*spt
,
954 struct intel_gvt_gtt_entry
*entry
)
956 struct intel_vgpu
*vgpu
= spt
->vgpu
;
957 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
961 pfn
= ops
->get_pfn(entry
);
962 type
= spt
->shadow_page
.type
;
964 /* Uninitialized spte or unshadowed spte. */
965 if (!pfn
|| pfn
== vgpu
->gtt
.scratch_pt
[type
].page_mfn
)
968 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu
, pfn
<< PAGE_SHIFT
);
971 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt
*spt
)
973 struct intel_vgpu
*vgpu
= spt
->vgpu
;
974 struct intel_gvt_gtt_entry e
;
978 trace_spt_change(spt
->vgpu
->id
, "die", spt
,
979 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
981 if (ppgtt_put_spt(spt
) > 0)
984 for_each_present_shadow_entry(spt
, &e
, index
) {
986 case GTT_TYPE_PPGTT_PTE_4K_ENTRY
:
987 gvt_vdbg_mm("invalidate 4K entry\n");
988 ppgtt_invalidate_pte(spt
, &e
);
990 case GTT_TYPE_PPGTT_PTE_64K_ENTRY
:
991 /* We don't setup 64K shadow entry so far. */
992 WARN(1, "suspicious 64K gtt entry\n");
994 case GTT_TYPE_PPGTT_PTE_2M_ENTRY
:
995 gvt_vdbg_mm("invalidate 2M entry\n");
997 case GTT_TYPE_PPGTT_PTE_1G_ENTRY
:
998 WARN(1, "GVT doesn't support 1GB page\n");
1000 case GTT_TYPE_PPGTT_PML4_ENTRY
:
1001 case GTT_TYPE_PPGTT_PDP_ENTRY
:
1002 case GTT_TYPE_PPGTT_PDE_ENTRY
:
1003 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1004 ret
= ppgtt_invalidate_spt_by_shadow_entry(
1014 trace_spt_change(spt
->vgpu
->id
, "release", spt
,
1015 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
1016 ppgtt_free_spt(spt
);
1019 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1020 spt
, e
.val64
, e
.type
);
1024 static bool vgpu_ips_enabled(struct intel_vgpu
*vgpu
)
1026 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
1028 if (INTEL_GEN(dev_priv
) == 9 || INTEL_GEN(dev_priv
) == 10) {
1029 u32 ips
= vgpu_vreg_t(vgpu
, GEN8_GAMW_ECO_DEV_RW_IA
) &
1030 GAMW_ECO_ENABLE_64K_IPS_FIELD
;
1032 return ips
== GAMW_ECO_ENABLE_64K_IPS_FIELD
;
1033 } else if (INTEL_GEN(dev_priv
) >= 11) {
1034 /* 64K paging only controlled by IPS bit in PTE now. */
1040 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt
*spt
);
1042 static struct intel_vgpu_ppgtt_spt
*ppgtt_populate_spt_by_guest_entry(
1043 struct intel_vgpu
*vgpu
, struct intel_gvt_gtt_entry
*we
)
1045 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1046 struct intel_vgpu_ppgtt_spt
*spt
= NULL
;
1050 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we
->type
)));
1052 if (we
->type
== GTT_TYPE_PPGTT_PDE_ENTRY
)
1053 ips
= vgpu_ips_enabled(vgpu
) && ops
->test_ips(we
);
1055 spt
= intel_vgpu_find_spt_by_gfn(vgpu
, ops
->get_pfn(we
));
1059 if (ips
!= spt
->guest_page
.pde_ips
) {
1060 spt
->guest_page
.pde_ips
= ips
;
1062 gvt_dbg_mm("reshadow PDE since ips changed\n");
1063 clear_page(spt
->shadow_page
.vaddr
);
1064 ret
= ppgtt_populate_spt(spt
);
1071 int type
= get_next_pt_type(we
->type
);
1073 spt
= ppgtt_alloc_spt_gfn(vgpu
, type
, ops
->get_pfn(we
), ips
);
1079 ret
= intel_vgpu_enable_page_track(vgpu
, spt
->guest_page
.gfn
);
1083 ret
= ppgtt_populate_spt(spt
);
1087 trace_spt_change(vgpu
->id
, "new", spt
, spt
->guest_page
.gfn
,
1088 spt
->shadow_page
.type
);
1093 ppgtt_free_spt(spt
);
1095 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1096 spt
, we
->val64
, we
->type
);
1097 return ERR_PTR(ret
);
1100 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry
*se
,
1101 struct intel_vgpu_ppgtt_spt
*s
, struct intel_gvt_gtt_entry
*ge
)
1103 struct intel_gvt_gtt_pte_ops
*ops
= s
->vgpu
->gvt
->gtt
.pte_ops
;
1105 se
->type
= ge
->type
;
1106 se
->val64
= ge
->val64
;
1108 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1109 if (se
->type
== GTT_TYPE_PPGTT_PDE_ENTRY
)
1112 ops
->set_pfn(se
, s
->shadow_page
.mfn
);
1116 * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
1117 * negtive if found err.
1119 static int is_2MB_gtt_possible(struct intel_vgpu
*vgpu
,
1120 struct intel_gvt_gtt_entry
*entry
)
1122 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1125 if (!HAS_PAGE_SIZES(vgpu
->gvt
->dev_priv
, I915_GTT_PAGE_SIZE_2M
))
1128 pfn
= intel_gvt_hypervisor_gfn_to_mfn(vgpu
, ops
->get_pfn(entry
));
1129 if (pfn
== INTEL_GVT_INVALID_ADDR
)
1132 return PageTransHuge(pfn_to_page(pfn
));
1135 static int split_2MB_gtt_entry(struct intel_vgpu
*vgpu
,
1136 struct intel_vgpu_ppgtt_spt
*spt
, unsigned long index
,
1137 struct intel_gvt_gtt_entry
*se
)
1139 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1140 struct intel_vgpu_ppgtt_spt
*sub_spt
;
1141 struct intel_gvt_gtt_entry sub_se
;
1142 unsigned long start_gfn
;
1143 dma_addr_t dma_addr
;
1144 unsigned long sub_index
;
1147 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index
);
1149 start_gfn
= ops
->get_pfn(se
);
1151 sub_spt
= ppgtt_alloc_spt(vgpu
, GTT_TYPE_PPGTT_PTE_PT
);
1152 if (IS_ERR(sub_spt
))
1153 return PTR_ERR(sub_spt
);
1155 for_each_shadow_entry(sub_spt
, &sub_se
, sub_index
) {
1156 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
,
1157 start_gfn
+ sub_index
, PAGE_SIZE
, &dma_addr
);
1159 ppgtt_invalidate_spt(spt
);
1162 sub_se
.val64
= se
->val64
;
1164 /* Copy the PAT field from PDE. */
1165 sub_se
.val64
&= ~_PAGE_PAT
;
1166 sub_se
.val64
|= (se
->val64
& _PAGE_PAT_LARGE
) >> 5;
1168 ops
->set_pfn(&sub_se
, dma_addr
>> PAGE_SHIFT
);
1169 ppgtt_set_shadow_entry(sub_spt
, &sub_se
, sub_index
);
1172 /* Clear dirty field. */
1173 se
->val64
&= ~_PAGE_DIRTY
;
1177 ops
->set_pfn(se
, sub_spt
->shadow_page
.mfn
);
1178 ppgtt_set_shadow_entry(spt
, se
, index
);
1182 static int split_64KB_gtt_entry(struct intel_vgpu
*vgpu
,
1183 struct intel_vgpu_ppgtt_spt
*spt
, unsigned long index
,
1184 struct intel_gvt_gtt_entry
*se
)
1186 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1187 struct intel_gvt_gtt_entry entry
= *se
;
1188 unsigned long start_gfn
;
1189 dma_addr_t dma_addr
;
1192 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index
);
1194 GEM_BUG_ON(index
% GTT_64K_PTE_STRIDE
);
1196 start_gfn
= ops
->get_pfn(se
);
1198 entry
.type
= GTT_TYPE_PPGTT_PTE_4K_ENTRY
;
1199 ops
->set_64k_splited(&entry
);
1201 for (i
= 0; i
< GTT_64K_PTE_STRIDE
; i
++) {
1202 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
,
1203 start_gfn
+ i
, PAGE_SIZE
, &dma_addr
);
1207 ops
->set_pfn(&entry
, dma_addr
>> PAGE_SHIFT
);
1208 ppgtt_set_shadow_entry(spt
, &entry
, index
+ i
);
1213 static int ppgtt_populate_shadow_entry(struct intel_vgpu
*vgpu
,
1214 struct intel_vgpu_ppgtt_spt
*spt
, unsigned long index
,
1215 struct intel_gvt_gtt_entry
*ge
)
1217 struct intel_gvt_gtt_pte_ops
*pte_ops
= vgpu
->gvt
->gtt
.pte_ops
;
1218 struct intel_gvt_gtt_entry se
= *ge
;
1219 unsigned long gfn
, page_size
= PAGE_SIZE
;
1220 dma_addr_t dma_addr
;
1223 if (!pte_ops
->test_present(ge
))
1226 gfn
= pte_ops
->get_pfn(ge
);
1229 case GTT_TYPE_PPGTT_PTE_4K_ENTRY
:
1230 gvt_vdbg_mm("shadow 4K gtt entry\n");
1232 case GTT_TYPE_PPGTT_PTE_64K_ENTRY
:
1233 gvt_vdbg_mm("shadow 64K gtt entry\n");
1235 * The layout of 64K page is special, the page size is
1236 * controlled by uper PDE. To be simple, we always split
1237 * 64K page to smaller 4K pages in shadow PT.
1239 return split_64KB_gtt_entry(vgpu
, spt
, index
, &se
);
1240 case GTT_TYPE_PPGTT_PTE_2M_ENTRY
:
1241 gvt_vdbg_mm("shadow 2M gtt entry\n");
1242 ret
= is_2MB_gtt_possible(vgpu
, ge
);
1244 return split_2MB_gtt_entry(vgpu
, spt
, index
, &se
);
1247 page_size
= I915_GTT_PAGE_SIZE_2M
;
1249 case GTT_TYPE_PPGTT_PTE_1G_ENTRY
:
1250 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1257 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
, gfn
, page_size
,
1262 pte_ops
->set_pfn(&se
, dma_addr
>> PAGE_SHIFT
);
1263 ppgtt_set_shadow_entry(spt
, &se
, index
);
1267 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt
*spt
)
1269 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1270 struct intel_gvt
*gvt
= vgpu
->gvt
;
1271 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1272 struct intel_vgpu_ppgtt_spt
*s
;
1273 struct intel_gvt_gtt_entry se
, ge
;
1274 unsigned long gfn
, i
;
1277 trace_spt_change(spt
->vgpu
->id
, "born", spt
,
1278 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
1280 for_each_present_guest_entry(spt
, &ge
, i
) {
1281 if (gtt_type_is_pt(get_next_pt_type(ge
.type
))) {
1282 s
= ppgtt_populate_spt_by_guest_entry(vgpu
, &ge
);
1287 ppgtt_get_shadow_entry(spt
, &se
, i
);
1288 ppgtt_generate_shadow_entry(&se
, s
, &ge
);
1289 ppgtt_set_shadow_entry(spt
, &se
, i
);
1291 gfn
= ops
->get_pfn(&ge
);
1292 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu
, gfn
)) {
1293 ops
->set_pfn(&se
, gvt
->gtt
.scratch_mfn
);
1294 ppgtt_set_shadow_entry(spt
, &se
, i
);
1298 ret
= ppgtt_populate_shadow_entry(vgpu
, spt
, i
, &ge
);
1305 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1306 spt
, ge
.val64
, ge
.type
);
1310 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt
*spt
,
1311 struct intel_gvt_gtt_entry
*se
, unsigned long index
)
1313 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1314 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1317 trace_spt_guest_change(spt
->vgpu
->id
, "remove", spt
,
1318 spt
->shadow_page
.type
, se
->val64
, index
);
1320 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1321 se
->type
, index
, se
->val64
);
1323 if (!ops
->test_present(se
))
1326 if (ops
->get_pfn(se
) ==
1327 vgpu
->gtt
.scratch_pt
[spt
->shadow_page
.type
].page_mfn
)
1330 if (gtt_type_is_pt(get_next_pt_type(se
->type
))) {
1331 struct intel_vgpu_ppgtt_spt
*s
=
1332 intel_vgpu_find_spt_by_mfn(vgpu
, ops
->get_pfn(se
));
1334 gvt_vgpu_err("fail to find guest page\n");
1338 ret
= ppgtt_invalidate_spt(s
);
1342 /* We don't setup 64K shadow entry so far. */
1343 WARN(se
->type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
,
1344 "suspicious 64K entry\n");
1345 ppgtt_invalidate_pte(spt
, se
);
1350 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1351 spt
, se
->val64
, se
->type
);
1355 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt
*spt
,
1356 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1358 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1359 struct intel_gvt_gtt_entry m
;
1360 struct intel_vgpu_ppgtt_spt
*s
;
1363 trace_spt_guest_change(spt
->vgpu
->id
, "add", spt
, spt
->shadow_page
.type
,
1366 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1367 we
->type
, index
, we
->val64
);
1369 if (gtt_type_is_pt(get_next_pt_type(we
->type
))) {
1370 s
= ppgtt_populate_spt_by_guest_entry(vgpu
, we
);
1375 ppgtt_get_shadow_entry(spt
, &m
, index
);
1376 ppgtt_generate_shadow_entry(&m
, s
, we
);
1377 ppgtt_set_shadow_entry(spt
, &m
, index
);
1379 ret
= ppgtt_populate_shadow_entry(vgpu
, spt
, index
, we
);
1385 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1386 spt
, we
->val64
, we
->type
);
1390 static int sync_oos_page(struct intel_vgpu
*vgpu
,
1391 struct intel_vgpu_oos_page
*oos_page
)
1393 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1394 struct intel_gvt
*gvt
= vgpu
->gvt
;
1395 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1396 struct intel_vgpu_ppgtt_spt
*spt
= oos_page
->spt
;
1397 struct intel_gvt_gtt_entry old
, new;
1401 trace_oos_change(vgpu
->id
, "sync", oos_page
->id
,
1402 spt
, spt
->guest_page
.type
);
1404 old
.type
= new.type
= get_entry_type(spt
->guest_page
.type
);
1405 old
.val64
= new.val64
= 0;
1407 for (index
= 0; index
< (I915_GTT_PAGE_SIZE
>>
1408 info
->gtt_entry_size_shift
); index
++) {
1409 ops
->get_entry(oos_page
->mem
, &old
, index
, false, 0, vgpu
);
1410 ops
->get_entry(NULL
, &new, index
, true,
1411 spt
->guest_page
.gfn
<< PAGE_SHIFT
, vgpu
);
1413 if (old
.val64
== new.val64
1414 && !test_and_clear_bit(index
, spt
->post_shadow_bitmap
))
1417 trace_oos_sync(vgpu
->id
, oos_page
->id
,
1418 spt
, spt
->guest_page
.type
,
1421 ret
= ppgtt_populate_shadow_entry(vgpu
, spt
, index
, &new);
1425 ops
->set_entry(oos_page
->mem
, &new, index
, false, 0, vgpu
);
1428 spt
->guest_page
.write_cnt
= 0;
1429 list_del_init(&spt
->post_shadow_list
);
1433 static int detach_oos_page(struct intel_vgpu
*vgpu
,
1434 struct intel_vgpu_oos_page
*oos_page
)
1436 struct intel_gvt
*gvt
= vgpu
->gvt
;
1437 struct intel_vgpu_ppgtt_spt
*spt
= oos_page
->spt
;
1439 trace_oos_change(vgpu
->id
, "detach", oos_page
->id
,
1440 spt
, spt
->guest_page
.type
);
1442 spt
->guest_page
.write_cnt
= 0;
1443 spt
->guest_page
.oos_page
= NULL
;
1444 oos_page
->spt
= NULL
;
1446 list_del_init(&oos_page
->vm_list
);
1447 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_free_list_head
);
1452 static int attach_oos_page(struct intel_vgpu_oos_page
*oos_page
,
1453 struct intel_vgpu_ppgtt_spt
*spt
)
1455 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
1458 ret
= intel_gvt_hypervisor_read_gpa(spt
->vgpu
,
1459 spt
->guest_page
.gfn
<< I915_GTT_PAGE_SHIFT
,
1460 oos_page
->mem
, I915_GTT_PAGE_SIZE
);
1464 oos_page
->spt
= spt
;
1465 spt
->guest_page
.oos_page
= oos_page
;
1467 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_use_list_head
);
1469 trace_oos_change(spt
->vgpu
->id
, "attach", oos_page
->id
,
1470 spt
, spt
->guest_page
.type
);
1474 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt
*spt
)
1476 struct intel_vgpu_oos_page
*oos_page
= spt
->guest_page
.oos_page
;
1479 ret
= intel_vgpu_enable_page_track(spt
->vgpu
, spt
->guest_page
.gfn
);
1483 trace_oos_change(spt
->vgpu
->id
, "set page sync", oos_page
->id
,
1484 spt
, spt
->guest_page
.type
);
1486 list_del_init(&oos_page
->vm_list
);
1487 return sync_oos_page(spt
->vgpu
, oos_page
);
1490 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt
*spt
)
1492 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
1493 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1494 struct intel_vgpu_oos_page
*oos_page
= spt
->guest_page
.oos_page
;
1497 WARN(oos_page
, "shadow PPGTT page has already has a oos page\n");
1499 if (list_empty(>t
->oos_page_free_list_head
)) {
1500 oos_page
= container_of(gtt
->oos_page_use_list_head
.next
,
1501 struct intel_vgpu_oos_page
, list
);
1502 ret
= ppgtt_set_guest_page_sync(oos_page
->spt
);
1505 ret
= detach_oos_page(spt
->vgpu
, oos_page
);
1509 oos_page
= container_of(gtt
->oos_page_free_list_head
.next
,
1510 struct intel_vgpu_oos_page
, list
);
1511 return attach_oos_page(oos_page
, spt
);
1514 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt
*spt
)
1516 struct intel_vgpu_oos_page
*oos_page
= spt
->guest_page
.oos_page
;
1518 if (WARN(!oos_page
, "shadow PPGTT page should have a oos page\n"))
1521 trace_oos_change(spt
->vgpu
->id
, "set page out of sync", oos_page
->id
,
1522 spt
, spt
->guest_page
.type
);
1524 list_add_tail(&oos_page
->vm_list
, &spt
->vgpu
->gtt
.oos_page_list_head
);
1525 return intel_vgpu_disable_page_track(spt
->vgpu
, spt
->guest_page
.gfn
);
1529 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1532 * This function is called before submitting a guest workload to host,
1533 * to sync all the out-of-synced shadow for vGPU
1536 * Zero on success, negative error code if failed.
1538 int intel_vgpu_sync_oos_pages(struct intel_vgpu
*vgpu
)
1540 struct list_head
*pos
, *n
;
1541 struct intel_vgpu_oos_page
*oos_page
;
1544 if (!enable_out_of_sync
)
1547 list_for_each_safe(pos
, n
, &vgpu
->gtt
.oos_page_list_head
) {
1548 oos_page
= container_of(pos
,
1549 struct intel_vgpu_oos_page
, vm_list
);
1550 ret
= ppgtt_set_guest_page_sync(oos_page
->spt
);
1558 * The heart of PPGTT shadow page table.
1560 static int ppgtt_handle_guest_write_page_table(
1561 struct intel_vgpu_ppgtt_spt
*spt
,
1562 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1564 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1565 int type
= spt
->shadow_page
.type
;
1566 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1567 struct intel_gvt_gtt_entry old_se
;
1571 new_present
= ops
->test_present(we
);
1574 * Adding the new entry first and then removing the old one, that can
1575 * guarantee the ppgtt table is validated during the window between
1576 * adding and removal.
1578 ppgtt_get_shadow_entry(spt
, &old_se
, index
);
1581 ret
= ppgtt_handle_guest_entry_add(spt
, we
, index
);
1586 ret
= ppgtt_handle_guest_entry_removal(spt
, &old_se
, index
);
1591 /* For 64KB splited entries, we need clear them all. */
1592 if (ops
->test_64k_splited(&old_se
) &&
1593 !(index
% GTT_64K_PTE_STRIDE
)) {
1594 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1595 for (i
= 0; i
< GTT_64K_PTE_STRIDE
; i
++) {
1596 ops
->clear_64k_splited(&old_se
);
1597 ops
->set_pfn(&old_se
,
1598 vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1599 ppgtt_set_shadow_entry(spt
, &old_se
, index
+ i
);
1601 } else if (old_se
.type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
||
1602 old_se
.type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
) {
1603 ops
->clear_pse(&old_se
);
1604 ops
->set_pfn(&old_se
,
1605 vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1606 ppgtt_set_shadow_entry(spt
, &old_se
, index
);
1608 ops
->set_pfn(&old_se
,
1609 vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1610 ppgtt_set_shadow_entry(spt
, &old_se
, index
);
1616 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1617 spt
, we
->val64
, we
->type
);
1623 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt
*spt
)
1625 return enable_out_of_sync
1626 && gtt_type_is_pte_pt(spt
->guest_page
.type
)
1627 && spt
->guest_page
.write_cnt
>= 2;
1630 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt
*spt
,
1631 unsigned long index
)
1633 set_bit(index
, spt
->post_shadow_bitmap
);
1634 if (!list_empty(&spt
->post_shadow_list
))
1637 list_add_tail(&spt
->post_shadow_list
,
1638 &spt
->vgpu
->gtt
.post_shadow_list_head
);
1642 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1645 * This function is called before submitting a guest workload to host,
1646 * to flush all the post shadows for a vGPU.
1649 * Zero on success, negative error code if failed.
1651 int intel_vgpu_flush_post_shadow(struct intel_vgpu
*vgpu
)
1653 struct list_head
*pos
, *n
;
1654 struct intel_vgpu_ppgtt_spt
*spt
;
1655 struct intel_gvt_gtt_entry ge
;
1656 unsigned long index
;
1659 list_for_each_safe(pos
, n
, &vgpu
->gtt
.post_shadow_list_head
) {
1660 spt
= container_of(pos
, struct intel_vgpu_ppgtt_spt
,
1663 for_each_set_bit(index
, spt
->post_shadow_bitmap
,
1664 GTT_ENTRY_NUM_IN_ONE_PAGE
) {
1665 ppgtt_get_guest_entry(spt
, &ge
, index
);
1667 ret
= ppgtt_handle_guest_write_page_table(spt
,
1671 clear_bit(index
, spt
->post_shadow_bitmap
);
1673 list_del_init(&spt
->post_shadow_list
);
1678 static int ppgtt_handle_guest_write_page_table_bytes(
1679 struct intel_vgpu_ppgtt_spt
*spt
,
1680 u64 pa
, void *p_data
, int bytes
)
1682 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1683 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1684 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1685 struct intel_gvt_gtt_entry we
, se
;
1686 unsigned long index
;
1689 index
= (pa
& (PAGE_SIZE
- 1)) >> info
->gtt_entry_size_shift
;
1691 ppgtt_get_guest_entry(spt
, &we
, index
);
1694 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1695 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1698 if (we
.type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
&&
1699 (index
% GTT_64K_PTE_STRIDE
)) {
1700 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1705 if (bytes
== info
->gtt_entry_size
) {
1706 ret
= ppgtt_handle_guest_write_page_table(spt
, &we
, index
);
1710 if (!test_bit(index
, spt
->post_shadow_bitmap
)) {
1711 int type
= spt
->shadow_page
.type
;
1713 ppgtt_get_shadow_entry(spt
, &se
, index
);
1714 ret
= ppgtt_handle_guest_entry_removal(spt
, &se
, index
);
1717 ops
->set_pfn(&se
, vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1718 ppgtt_set_shadow_entry(spt
, &se
, index
);
1720 ppgtt_set_post_shadow(spt
, index
);
1723 if (!enable_out_of_sync
)
1726 spt
->guest_page
.write_cnt
++;
1728 if (spt
->guest_page
.oos_page
)
1729 ops
->set_entry(spt
->guest_page
.oos_page
->mem
, &we
, index
,
1732 if (can_do_out_of_sync(spt
)) {
1733 if (!spt
->guest_page
.oos_page
)
1734 ppgtt_allocate_oos_page(spt
);
1736 ret
= ppgtt_set_guest_page_oos(spt
);
1743 static void invalidate_ppgtt_mm(struct intel_vgpu_mm
*mm
)
1745 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1746 struct intel_gvt
*gvt
= vgpu
->gvt
;
1747 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1748 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1749 struct intel_gvt_gtt_entry se
;
1752 if (!mm
->ppgtt_mm
.shadowed
)
1755 for (index
= 0; index
< ARRAY_SIZE(mm
->ppgtt_mm
.shadow_pdps
); index
++) {
1756 ppgtt_get_shadow_root_entry(mm
, &se
, index
);
1758 if (!ops
->test_present(&se
))
1761 ppgtt_invalidate_spt_by_shadow_entry(vgpu
, &se
);
1763 ppgtt_set_shadow_root_entry(mm
, &se
, index
);
1765 trace_spt_guest_change(vgpu
->id
, "destroy root pointer",
1766 NULL
, se
.type
, se
.val64
, index
);
1769 mm
->ppgtt_mm
.shadowed
= false;
1773 static int shadow_ppgtt_mm(struct intel_vgpu_mm
*mm
)
1775 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1776 struct intel_gvt
*gvt
= vgpu
->gvt
;
1777 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1778 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1779 struct intel_vgpu_ppgtt_spt
*spt
;
1780 struct intel_gvt_gtt_entry ge
, se
;
1783 if (mm
->ppgtt_mm
.shadowed
)
1786 mm
->ppgtt_mm
.shadowed
= true;
1788 for (index
= 0; index
< ARRAY_SIZE(mm
->ppgtt_mm
.guest_pdps
); index
++) {
1789 ppgtt_get_guest_root_entry(mm
, &ge
, index
);
1791 if (!ops
->test_present(&ge
))
1794 trace_spt_guest_change(vgpu
->id
, __func__
, NULL
,
1795 ge
.type
, ge
.val64
, index
);
1797 spt
= ppgtt_populate_spt_by_guest_entry(vgpu
, &ge
);
1799 gvt_vgpu_err("fail to populate guest root pointer\n");
1803 ppgtt_generate_shadow_entry(&se
, spt
, &ge
);
1804 ppgtt_set_shadow_root_entry(mm
, &se
, index
);
1806 trace_spt_guest_change(vgpu
->id
, "populate root pointer",
1807 NULL
, se
.type
, se
.val64
, index
);
1812 invalidate_ppgtt_mm(mm
);
1816 static struct intel_vgpu_mm
*vgpu_alloc_mm(struct intel_vgpu
*vgpu
)
1818 struct intel_vgpu_mm
*mm
;
1820 mm
= kzalloc(sizeof(*mm
), GFP_KERNEL
);
1825 kref_init(&mm
->ref
);
1826 atomic_set(&mm
->pincount
, 0);
1831 static void vgpu_free_mm(struct intel_vgpu_mm
*mm
)
1837 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1839 * @root_entry_type: ppgtt root entry type
1840 * @pdps: guest pdps.
1842 * This function is used to create a ppgtt mm object for a vGPU.
1845 * Zero on success, negative error code in pointer if failed.
1847 struct intel_vgpu_mm
*intel_vgpu_create_ppgtt_mm(struct intel_vgpu
*vgpu
,
1848 intel_gvt_gtt_type_t root_entry_type
, u64 pdps
[])
1850 struct intel_gvt
*gvt
= vgpu
->gvt
;
1851 struct intel_vgpu_mm
*mm
;
1854 mm
= vgpu_alloc_mm(vgpu
);
1856 return ERR_PTR(-ENOMEM
);
1858 mm
->type
= INTEL_GVT_MM_PPGTT
;
1860 GEM_BUG_ON(root_entry_type
!= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&&
1861 root_entry_type
!= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
);
1862 mm
->ppgtt_mm
.root_entry_type
= root_entry_type
;
1864 INIT_LIST_HEAD(&mm
->ppgtt_mm
.list
);
1865 INIT_LIST_HEAD(&mm
->ppgtt_mm
.lru_list
);
1867 if (root_entry_type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
)
1868 mm
->ppgtt_mm
.guest_pdps
[0] = pdps
[0];
1870 memcpy(mm
->ppgtt_mm
.guest_pdps
, pdps
,
1871 sizeof(mm
->ppgtt_mm
.guest_pdps
));
1873 ret
= shadow_ppgtt_mm(mm
);
1875 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1877 return ERR_PTR(ret
);
1880 list_add_tail(&mm
->ppgtt_mm
.list
, &vgpu
->gtt
.ppgtt_mm_list_head
);
1881 list_add_tail(&mm
->ppgtt_mm
.lru_list
, &gvt
->gtt
.ppgtt_mm_lru_list_head
);
1885 static struct intel_vgpu_mm
*intel_vgpu_create_ggtt_mm(struct intel_vgpu
*vgpu
)
1887 struct intel_vgpu_mm
*mm
;
1888 unsigned long nr_entries
;
1890 mm
= vgpu_alloc_mm(vgpu
);
1892 return ERR_PTR(-ENOMEM
);
1894 mm
->type
= INTEL_GVT_MM_GGTT
;
1896 nr_entries
= gvt_ggtt_gm_sz(vgpu
->gvt
) >> I915_GTT_PAGE_SHIFT
;
1897 mm
->ggtt_mm
.virtual_ggtt
=
1898 vzalloc(array_size(nr_entries
,
1899 vgpu
->gvt
->device_info
.gtt_entry_size
));
1900 if (!mm
->ggtt_mm
.virtual_ggtt
) {
1902 return ERR_PTR(-ENOMEM
);
1904 mm
->ggtt_mm
.last_partial_off
= -1UL;
1910 * _intel_vgpu_mm_release - destroy a mm object
1911 * @mm_ref: a kref object
1913 * This function is used to destroy a mm object for vGPU
1916 void _intel_vgpu_mm_release(struct kref
*mm_ref
)
1918 struct intel_vgpu_mm
*mm
= container_of(mm_ref
, typeof(*mm
), ref
);
1920 if (GEM_WARN_ON(atomic_read(&mm
->pincount
)))
1921 gvt_err("vgpu mm pin count bug detected\n");
1923 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1924 list_del(&mm
->ppgtt_mm
.list
);
1925 list_del(&mm
->ppgtt_mm
.lru_list
);
1926 invalidate_ppgtt_mm(mm
);
1928 vfree(mm
->ggtt_mm
.virtual_ggtt
);
1929 mm
->ggtt_mm
.last_partial_off
= -1UL;
1936 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1937 * @mm: a vGPU mm object
1939 * This function is called when user doesn't want to use a vGPU mm object
1941 void intel_vgpu_unpin_mm(struct intel_vgpu_mm
*mm
)
1943 atomic_dec_if_positive(&mm
->pincount
);
1947 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1950 * This function is called when user wants to use a vGPU mm object. If this
1951 * mm object hasn't been shadowed yet, the shadow will be populated at this
1955 * Zero on success, negative error code if failed.
1957 int intel_vgpu_pin_mm(struct intel_vgpu_mm
*mm
)
1961 atomic_inc(&mm
->pincount
);
1963 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1964 ret
= shadow_ppgtt_mm(mm
);
1968 list_move_tail(&mm
->ppgtt_mm
.lru_list
,
1969 &mm
->vgpu
->gvt
->gtt
.ppgtt_mm_lru_list_head
);
1976 static int reclaim_one_ppgtt_mm(struct intel_gvt
*gvt
)
1978 struct intel_vgpu_mm
*mm
;
1979 struct list_head
*pos
, *n
;
1981 list_for_each_safe(pos
, n
, &gvt
->gtt
.ppgtt_mm_lru_list_head
) {
1982 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.lru_list
);
1984 if (atomic_read(&mm
->pincount
))
1987 list_del_init(&mm
->ppgtt_mm
.lru_list
);
1988 invalidate_ppgtt_mm(mm
);
1995 * GMA translation APIs.
1997 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm
*mm
,
1998 struct intel_gvt_gtt_entry
*e
, unsigned long index
, bool guest
)
2000 struct intel_vgpu
*vgpu
= mm
->vgpu
;
2001 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2002 struct intel_vgpu_ppgtt_spt
*s
;
2004 s
= intel_vgpu_find_spt_by_mfn(vgpu
, ops
->get_pfn(e
));
2009 ppgtt_get_shadow_entry(s
, e
, index
);
2011 ppgtt_get_guest_entry(s
, e
, index
);
2016 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2017 * @mm: mm object. could be a PPGTT or GGTT mm object
2018 * @gma: graphics memory address in this mm object
2020 * This function is used to translate a graphics memory address in specific
2021 * graphics memory space to guest physical address.
2024 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2026 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm
*mm
, unsigned long gma
)
2028 struct intel_vgpu
*vgpu
= mm
->vgpu
;
2029 struct intel_gvt
*gvt
= vgpu
->gvt
;
2030 struct intel_gvt_gtt_pte_ops
*pte_ops
= gvt
->gtt
.pte_ops
;
2031 struct intel_gvt_gtt_gma_ops
*gma_ops
= gvt
->gtt
.gma_ops
;
2032 unsigned long gpa
= INTEL_GVT_INVALID_ADDR
;
2033 unsigned long gma_index
[4];
2034 struct intel_gvt_gtt_entry e
;
2038 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
&&
2039 mm
->type
!= INTEL_GVT_MM_PPGTT
);
2041 if (mm
->type
== INTEL_GVT_MM_GGTT
) {
2042 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
2045 ggtt_get_guest_entry(mm
, &e
,
2046 gma_ops
->gma_to_ggtt_pte_index(gma
));
2048 gpa
= (pte_ops
->get_pfn(&e
) << I915_GTT_PAGE_SHIFT
)
2049 + (gma
& ~I915_GTT_PAGE_MASK
);
2051 trace_gma_translate(vgpu
->id
, "ggtt", 0, 0, gma
, gpa
);
2053 switch (mm
->ppgtt_mm
.root_entry_type
) {
2054 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY
:
2055 ppgtt_get_shadow_root_entry(mm
, &e
, 0);
2057 gma_index
[0] = gma_ops
->gma_to_pml4_index(gma
);
2058 gma_index
[1] = gma_ops
->gma_to_l4_pdp_index(gma
);
2059 gma_index
[2] = gma_ops
->gma_to_pde_index(gma
);
2060 gma_index
[3] = gma_ops
->gma_to_pte_index(gma
);
2063 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY
:
2064 ppgtt_get_shadow_root_entry(mm
, &e
,
2065 gma_ops
->gma_to_l3_pdp_index(gma
));
2067 gma_index
[0] = gma_ops
->gma_to_pde_index(gma
);
2068 gma_index
[1] = gma_ops
->gma_to_pte_index(gma
);
2075 /* walk the shadow page table and get gpa from guest entry */
2076 for (i
= 0; i
< levels
; i
++) {
2077 ret
= ppgtt_get_next_level_entry(mm
, &e
, gma_index
[i
],
2082 if (!pte_ops
->test_present(&e
)) {
2083 gvt_dbg_core("GMA 0x%lx is not present\n", gma
);
2088 gpa
= (pte_ops
->get_pfn(&e
) << I915_GTT_PAGE_SHIFT
) +
2089 (gma
& ~I915_GTT_PAGE_MASK
);
2090 trace_gma_translate(vgpu
->id
, "ppgtt", 0,
2091 mm
->ppgtt_mm
.root_entry_type
, gma
, gpa
);
2096 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm
->type
, gma
);
2097 return INTEL_GVT_INVALID_ADDR
;
2100 static int emulate_ggtt_mmio_read(struct intel_vgpu
*vgpu
,
2101 unsigned int off
, void *p_data
, unsigned int bytes
)
2103 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
2104 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
2105 unsigned long index
= off
>> info
->gtt_entry_size_shift
;
2106 struct intel_gvt_gtt_entry e
;
2108 if (bytes
!= 4 && bytes
!= 8)
2111 ggtt_get_guest_entry(ggtt_mm
, &e
, index
);
2112 memcpy(p_data
, (void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)),
2118 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
2120 * @off: register offset
2121 * @p_data: data will be returned to guest
2122 * @bytes: data length
2124 * This function is used to emulate the GTT MMIO register read
2127 * Zero on success, error code if failed.
2129 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu
*vgpu
, unsigned int off
,
2130 void *p_data
, unsigned int bytes
)
2132 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
2135 if (bytes
!= 4 && bytes
!= 8)
2138 off
-= info
->gtt_start_offset
;
2139 ret
= emulate_ggtt_mmio_read(vgpu
, off
, p_data
, bytes
);
2143 static void ggtt_invalidate_pte(struct intel_vgpu
*vgpu
,
2144 struct intel_gvt_gtt_entry
*entry
)
2146 struct intel_gvt_gtt_pte_ops
*pte_ops
= vgpu
->gvt
->gtt
.pte_ops
;
2149 pfn
= pte_ops
->get_pfn(entry
);
2150 if (pfn
!= vgpu
->gvt
->gtt
.scratch_mfn
)
2151 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu
,
2155 static int emulate_ggtt_mmio_write(struct intel_vgpu
*vgpu
, unsigned int off
,
2156 void *p_data
, unsigned int bytes
)
2158 struct intel_gvt
*gvt
= vgpu
->gvt
;
2159 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
2160 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
2161 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
2162 unsigned long g_gtt_index
= off
>> info
->gtt_entry_size_shift
;
2163 unsigned long gma
, gfn
;
2164 struct intel_gvt_gtt_entry e
= {.val64
= 0, .type
= GTT_TYPE_GGTT_PTE
};
2165 struct intel_gvt_gtt_entry m
= {.val64
= 0, .type
= GTT_TYPE_GGTT_PTE
};
2166 dma_addr_t dma_addr
;
2169 if (bytes
!= 4 && bytes
!= 8)
2172 gma
= g_gtt_index
<< I915_GTT_PAGE_SHIFT
;
2174 /* the VM may configure the whole GM space when ballooning is used */
2175 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
2178 ggtt_get_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
2180 memcpy((void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)), p_data
,
2183 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2184 * write, we assume the two 4 bytes writes are consecutive.
2185 * Otherwise, we abort and report error
2187 if (bytes
< info
->gtt_entry_size
) {
2188 if (ggtt_mm
->ggtt_mm
.last_partial_off
== -1UL) {
2189 /* the first partial part*/
2190 ggtt_mm
->ggtt_mm
.last_partial_off
= off
;
2191 ggtt_mm
->ggtt_mm
.last_partial_data
= e
.val64
;
2193 } else if ((g_gtt_index
==
2194 (ggtt_mm
->ggtt_mm
.last_partial_off
>>
2195 info
->gtt_entry_size_shift
)) &&
2196 (off
!= ggtt_mm
->ggtt_mm
.last_partial_off
)) {
2197 /* the second partial part */
2199 int last_off
= ggtt_mm
->ggtt_mm
.last_partial_off
&
2200 (info
->gtt_entry_size
- 1);
2202 memcpy((void *)&e
.val64
+ last_off
,
2203 (void *)&ggtt_mm
->ggtt_mm
.last_partial_data
+
2206 ggtt_mm
->ggtt_mm
.last_partial_off
= -1UL;
2210 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
2211 ggtt_mm
->ggtt_mm
.last_partial_off
, off
,
2212 bytes
, info
->gtt_entry_size
);
2214 /* set host ggtt entry to scratch page and clear
2215 * virtual ggtt entry as not present for last
2216 * partially write offset
2218 last_offset
= ggtt_mm
->ggtt_mm
.last_partial_off
&
2219 (~(info
->gtt_entry_size
- 1));
2221 ggtt_get_host_entry(ggtt_mm
, &m
, last_offset
);
2222 ggtt_invalidate_pte(vgpu
, &m
);
2223 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2224 ops
->clear_present(&m
);
2225 ggtt_set_host_entry(ggtt_mm
, &m
, last_offset
);
2226 ggtt_invalidate(gvt
->dev_priv
);
2228 ggtt_get_guest_entry(ggtt_mm
, &e
, last_offset
);
2229 ops
->clear_present(&e
);
2230 ggtt_set_guest_entry(ggtt_mm
, &e
, last_offset
);
2232 ggtt_mm
->ggtt_mm
.last_partial_off
= off
;
2233 ggtt_mm
->ggtt_mm
.last_partial_data
= e
.val64
;
2239 if (ops
->test_present(&e
)) {
2240 gfn
= ops
->get_pfn(&e
);
2244 /* one PTE update may be issued in multiple writes and the
2245 * first write may not construct a valid gfn
2247 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu
, gfn
)) {
2248 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2252 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
, gfn
,
2253 PAGE_SIZE
, &dma_addr
);
2255 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2256 /* guest driver may read/write the entry when partial
2257 * update the entry in this situation p2m will fail
2258 * settting the shadow entry to point to a scratch page
2260 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2262 ops
->set_pfn(&m
, dma_addr
>> PAGE_SHIFT
);
2264 ggtt_get_host_entry(ggtt_mm
, &m
, g_gtt_index
);
2265 ggtt_invalidate_pte(vgpu
, &m
);
2266 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2267 ops
->clear_present(&m
);
2271 ggtt_set_host_entry(ggtt_mm
, &m
, g_gtt_index
);
2272 ggtt_invalidate(gvt
->dev_priv
);
2273 ggtt_set_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
2278 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2280 * @off: register offset
2281 * @p_data: data from guest write
2282 * @bytes: data length
2284 * This function is used to emulate the GTT MMIO register write
2287 * Zero on success, error code if failed.
2289 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu
*vgpu
,
2290 unsigned int off
, void *p_data
, unsigned int bytes
)
2292 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
2295 if (bytes
!= 4 && bytes
!= 8)
2298 off
-= info
->gtt_start_offset
;
2299 ret
= emulate_ggtt_mmio_write(vgpu
, off
, p_data
, bytes
);
2303 static int alloc_scratch_pages(struct intel_vgpu
*vgpu
,
2304 intel_gvt_gtt_type_t type
)
2306 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
2307 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2308 int page_entry_num
= I915_GTT_PAGE_SIZE
>>
2309 vgpu
->gvt
->device_info
.gtt_entry_size_shift
;
2312 struct device
*dev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
2315 if (WARN_ON(type
< GTT_TYPE_PPGTT_PTE_PT
|| type
>= GTT_TYPE_MAX
))
2318 scratch_pt
= (void *)get_zeroed_page(GFP_KERNEL
);
2320 gvt_vgpu_err("fail to allocate scratch page\n");
2324 daddr
= dma_map_page(dev
, virt_to_page(scratch_pt
), 0,
2325 4096, PCI_DMA_BIDIRECTIONAL
);
2326 if (dma_mapping_error(dev
, daddr
)) {
2327 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2328 __free_page(virt_to_page(scratch_pt
));
2331 gtt
->scratch_pt
[type
].page_mfn
=
2332 (unsigned long)(daddr
>> I915_GTT_PAGE_SHIFT
);
2333 gtt
->scratch_pt
[type
].page
= virt_to_page(scratch_pt
);
2334 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2335 vgpu
->id
, type
, gtt
->scratch_pt
[type
].page_mfn
);
2337 /* Build the tree by full filled the scratch pt with the entries which
2338 * point to the next level scratch pt or scratch page. The
2339 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2341 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2342 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2343 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2345 if (type
> GTT_TYPE_PPGTT_PTE_PT
) {
2346 struct intel_gvt_gtt_entry se
;
2348 memset(&se
, 0, sizeof(struct intel_gvt_gtt_entry
));
2349 se
.type
= get_entry_type(type
- 1);
2350 ops
->set_pfn(&se
, gtt
->scratch_pt
[type
- 1].page_mfn
);
2352 /* The entry parameters like present/writeable/cache type
2353 * set to the same as i915's scratch page tree.
2355 se
.val64
|= _PAGE_PRESENT
| _PAGE_RW
;
2356 if (type
== GTT_TYPE_PPGTT_PDE_PT
)
2357 se
.val64
|= PPAT_CACHED
;
2359 for (i
= 0; i
< page_entry_num
; i
++)
2360 ops
->set_entry(scratch_pt
, &se
, i
, false, 0, vgpu
);
2366 static int release_scratch_page_tree(struct intel_vgpu
*vgpu
)
2369 struct device
*dev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
2372 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2373 if (vgpu
->gtt
.scratch_pt
[i
].page
!= NULL
) {
2374 daddr
= (dma_addr_t
)(vgpu
->gtt
.scratch_pt
[i
].page_mfn
<<
2375 I915_GTT_PAGE_SHIFT
);
2376 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2377 __free_page(vgpu
->gtt
.scratch_pt
[i
].page
);
2378 vgpu
->gtt
.scratch_pt
[i
].page
= NULL
;
2379 vgpu
->gtt
.scratch_pt
[i
].page_mfn
= 0;
2386 static int create_scratch_page_tree(struct intel_vgpu
*vgpu
)
2390 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2391 ret
= alloc_scratch_pages(vgpu
, i
);
2399 release_scratch_page_tree(vgpu
);
2404 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2407 * This function is used to initialize per-vGPU graphics memory virtualization
2411 * Zero on success, error code if failed.
2413 int intel_vgpu_init_gtt(struct intel_vgpu
*vgpu
)
2415 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
2417 INIT_RADIX_TREE(>t
->spt_tree
, GFP_KERNEL
);
2419 INIT_LIST_HEAD(>t
->ppgtt_mm_list_head
);
2420 INIT_LIST_HEAD(>t
->oos_page_list_head
);
2421 INIT_LIST_HEAD(>t
->post_shadow_list_head
);
2423 gtt
->ggtt_mm
= intel_vgpu_create_ggtt_mm(vgpu
);
2424 if (IS_ERR(gtt
->ggtt_mm
)) {
2425 gvt_vgpu_err("fail to create mm for ggtt.\n");
2426 return PTR_ERR(gtt
->ggtt_mm
);
2429 intel_vgpu_reset_ggtt(vgpu
, false);
2431 return create_scratch_page_tree(vgpu
);
2434 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu
*vgpu
)
2436 struct list_head
*pos
, *n
;
2437 struct intel_vgpu_mm
*mm
;
2439 list_for_each_safe(pos
, n
, &vgpu
->gtt
.ppgtt_mm_list_head
) {
2440 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.list
);
2441 intel_vgpu_destroy_mm(mm
);
2444 if (GEM_WARN_ON(!list_empty(&vgpu
->gtt
.ppgtt_mm_list_head
)))
2445 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2447 if (GEM_WARN_ON(!radix_tree_empty(&vgpu
->gtt
.spt_tree
))) {
2448 gvt_err("Why we still has spt not freed?\n");
2449 ppgtt_free_all_spt(vgpu
);
2453 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu
*vgpu
)
2455 intel_vgpu_destroy_mm(vgpu
->gtt
.ggtt_mm
);
2456 vgpu
->gtt
.ggtt_mm
= NULL
;
2460 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2463 * This function is used to clean up per-vGPU graphics memory virtualization
2467 * Zero on success, error code if failed.
2469 void intel_vgpu_clean_gtt(struct intel_vgpu
*vgpu
)
2471 intel_vgpu_destroy_all_ppgtt_mm(vgpu
);
2472 intel_vgpu_destroy_ggtt_mm(vgpu
);
2473 release_scratch_page_tree(vgpu
);
2476 static void clean_spt_oos(struct intel_gvt
*gvt
)
2478 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2479 struct list_head
*pos
, *n
;
2480 struct intel_vgpu_oos_page
*oos_page
;
2482 WARN(!list_empty(>t
->oos_page_use_list_head
),
2483 "someone is still using oos page\n");
2485 list_for_each_safe(pos
, n
, >t
->oos_page_free_list_head
) {
2486 oos_page
= container_of(pos
, struct intel_vgpu_oos_page
, list
);
2487 list_del(&oos_page
->list
);
2492 static int setup_spt_oos(struct intel_gvt
*gvt
)
2494 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2495 struct intel_vgpu_oos_page
*oos_page
;
2499 INIT_LIST_HEAD(>t
->oos_page_free_list_head
);
2500 INIT_LIST_HEAD(>t
->oos_page_use_list_head
);
2502 for (i
= 0; i
< preallocated_oos_pages
; i
++) {
2503 oos_page
= kzalloc(sizeof(*oos_page
), GFP_KERNEL
);
2509 INIT_LIST_HEAD(&oos_page
->list
);
2510 INIT_LIST_HEAD(&oos_page
->vm_list
);
2512 list_add_tail(&oos_page
->list
, >t
->oos_page_free_list_head
);
2515 gvt_dbg_mm("%d oos pages preallocated\n", i
);
2524 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2526 * @page_table_level: PPGTT page table level
2527 * @root_entry: PPGTT page table root pointers
2529 * This function is used to find a PPGTT mm object from mm object pool
2532 * pointer to mm object on success, NULL if failed.
2534 struct intel_vgpu_mm
*intel_vgpu_find_ppgtt_mm(struct intel_vgpu
*vgpu
,
2537 struct intel_vgpu_mm
*mm
;
2538 struct list_head
*pos
;
2540 list_for_each(pos
, &vgpu
->gtt
.ppgtt_mm_list_head
) {
2541 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.list
);
2543 switch (mm
->ppgtt_mm
.root_entry_type
) {
2544 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY
:
2545 if (pdps
[0] == mm
->ppgtt_mm
.guest_pdps
[0])
2548 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY
:
2549 if (!memcmp(pdps
, mm
->ppgtt_mm
.guest_pdps
,
2550 sizeof(mm
->ppgtt_mm
.guest_pdps
)))
2561 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2563 * @root_entry_type: ppgtt root entry type
2566 * This function is used to find or create a PPGTT mm object from a guest.
2569 * Zero on success, negative error code if failed.
2571 struct intel_vgpu_mm
*intel_vgpu_get_ppgtt_mm(struct intel_vgpu
*vgpu
,
2572 intel_gvt_gtt_type_t root_entry_type
, u64 pdps
[])
2574 struct intel_vgpu_mm
*mm
;
2576 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, pdps
);
2578 intel_vgpu_mm_get(mm
);
2580 mm
= intel_vgpu_create_ppgtt_mm(vgpu
, root_entry_type
, pdps
);
2582 gvt_vgpu_err("fail to create mm\n");
2588 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2592 * This function is used to find a PPGTT mm object from a guest and destroy it.
2595 * Zero on success, negative error code if failed.
2597 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu
*vgpu
, u64 pdps
[])
2599 struct intel_vgpu_mm
*mm
;
2601 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, pdps
);
2603 gvt_vgpu_err("fail to find ppgtt instance.\n");
2606 intel_vgpu_mm_put(mm
);
2611 * intel_gvt_init_gtt - initialize mm components of a GVT device
2614 * This function is called at the initialization stage, to initialize
2615 * the mm components of a GVT device.
2618 * zero on success, negative error code if failed.
2620 int intel_gvt_init_gtt(struct intel_gvt
*gvt
)
2624 struct device
*dev
= &gvt
->dev_priv
->drm
.pdev
->dev
;
2627 gvt_dbg_core("init gtt\n");
2629 gvt
->gtt
.pte_ops
= &gen8_gtt_pte_ops
;
2630 gvt
->gtt
.gma_ops
= &gen8_gtt_gma_ops
;
2632 page
= (void *)get_zeroed_page(GFP_KERNEL
);
2634 gvt_err("fail to allocate scratch ggtt page\n");
2638 daddr
= dma_map_page(dev
, virt_to_page(page
), 0,
2639 4096, PCI_DMA_BIDIRECTIONAL
);
2640 if (dma_mapping_error(dev
, daddr
)) {
2641 gvt_err("fail to dmamap scratch ggtt page\n");
2642 __free_page(virt_to_page(page
));
2646 gvt
->gtt
.scratch_page
= virt_to_page(page
);
2647 gvt
->gtt
.scratch_mfn
= (unsigned long)(daddr
>> I915_GTT_PAGE_SHIFT
);
2649 if (enable_out_of_sync
) {
2650 ret
= setup_spt_oos(gvt
);
2652 gvt_err("fail to initialize SPT oos\n");
2653 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2654 __free_page(gvt
->gtt
.scratch_page
);
2658 INIT_LIST_HEAD(&gvt
->gtt
.ppgtt_mm_lru_list_head
);
2663 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2666 * This function is called at the driver unloading stage, to clean up the
2667 * the mm components of a GVT device.
2670 void intel_gvt_clean_gtt(struct intel_gvt
*gvt
)
2672 struct device
*dev
= &gvt
->dev_priv
->drm
.pdev
->dev
;
2673 dma_addr_t daddr
= (dma_addr_t
)(gvt
->gtt
.scratch_mfn
<<
2674 I915_GTT_PAGE_SHIFT
);
2676 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2678 __free_page(gvt
->gtt
.scratch_page
);
2680 if (enable_out_of_sync
)
2685 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2688 * This function is called when invalidate all PPGTT instances of a vGPU.
2691 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu
*vgpu
)
2693 struct list_head
*pos
, *n
;
2694 struct intel_vgpu_mm
*mm
;
2696 list_for_each_safe(pos
, n
, &vgpu
->gtt
.ppgtt_mm_list_head
) {
2697 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.list
);
2698 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
2699 list_del_init(&mm
->ppgtt_mm
.lru_list
);
2700 if (mm
->ppgtt_mm
.shadowed
)
2701 invalidate_ppgtt_mm(mm
);
2707 * intel_vgpu_reset_ggtt - reset the GGTT entry
2709 * @invalidate_old: invalidate old entries
2711 * This function is called at the vGPU create stage
2712 * to reset all the GGTT entries.
2715 void intel_vgpu_reset_ggtt(struct intel_vgpu
*vgpu
, bool invalidate_old
)
2717 struct intel_gvt
*gvt
= vgpu
->gvt
;
2718 struct drm_i915_private
*dev_priv
= gvt
->dev_priv
;
2719 struct intel_gvt_gtt_pte_ops
*pte_ops
= vgpu
->gvt
->gtt
.pte_ops
;
2720 struct intel_gvt_gtt_entry entry
= {.type
= GTT_TYPE_GGTT_PTE
};
2721 struct intel_gvt_gtt_entry old_entry
;
2725 pte_ops
->set_pfn(&entry
, gvt
->gtt
.scratch_mfn
);
2726 pte_ops
->set_present(&entry
);
2728 index
= vgpu_aperture_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2729 num_entries
= vgpu_aperture_sz(vgpu
) >> PAGE_SHIFT
;
2730 while (num_entries
--) {
2731 if (invalidate_old
) {
2732 ggtt_get_host_entry(vgpu
->gtt
.ggtt_mm
, &old_entry
, index
);
2733 ggtt_invalidate_pte(vgpu
, &old_entry
);
2735 ggtt_set_host_entry(vgpu
->gtt
.ggtt_mm
, &entry
, index
++);
2738 index
= vgpu_hidden_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2739 num_entries
= vgpu_hidden_sz(vgpu
) >> PAGE_SHIFT
;
2740 while (num_entries
--) {
2741 if (invalidate_old
) {
2742 ggtt_get_host_entry(vgpu
->gtt
.ggtt_mm
, &old_entry
, index
);
2743 ggtt_invalidate_pte(vgpu
, &old_entry
);
2745 ggtt_set_host_entry(vgpu
->gtt
.ggtt_mm
, &entry
, index
++);
2748 ggtt_invalidate(dev_priv
);
2752 * intel_vgpu_reset_gtt - reset the all GTT related status
2755 * This function is called from vfio core to reset reset all
2756 * GTT related status, including GGTT, PPGTT, scratch page.
2759 void intel_vgpu_reset_gtt(struct intel_vgpu
*vgpu
)
2761 /* Shadow pages are only created when there is no page
2762 * table tracking data, so remove page tracking data after
2763 * removing the shadow pages.
2765 intel_vgpu_destroy_all_ppgtt_mm(vgpu
);
2766 intel_vgpu_reset_ggtt(vgpu
, true);