4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 #if defined(VERBOSE_DEBUG)
42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
44 #define gvt_vdbg_mm(fmt, args...)
47 static bool enable_out_of_sync
= false;
48 static int preallocated_oos_pages
= 8192;
51 * validate a gm address and related range size,
52 * translate it to host gm address
54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu
*vgpu
, u64 addr
, u32 size
)
57 return vgpu_gmadr_is_valid(vgpu
, addr
);
59 if (vgpu_gmadr_is_aperture(vgpu
, addr
) &&
60 vgpu_gmadr_is_aperture(vgpu
, addr
+ size
- 1))
62 else if (vgpu_gmadr_is_hidden(vgpu
, addr
) &&
63 vgpu_gmadr_is_hidden(vgpu
, addr
+ size
- 1))
66 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
71 /* translate a guest gmadr to host gmadr */
72 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu
*vgpu
, u64 g_addr
, u64
*h_addr
)
74 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
76 if (drm_WARN(&i915
->drm
, !vgpu_gmadr_is_valid(vgpu
, g_addr
),
77 "invalid guest gmadr %llx\n", g_addr
))
80 if (vgpu_gmadr_is_aperture(vgpu
, g_addr
))
81 *h_addr
= vgpu_aperture_gmadr_base(vgpu
)
82 + (g_addr
- vgpu_aperture_offset(vgpu
));
84 *h_addr
= vgpu_hidden_gmadr_base(vgpu
)
85 + (g_addr
- vgpu_hidden_offset(vgpu
));
89 /* translate a host gmadr to guest gmadr */
90 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu
*vgpu
, u64 h_addr
, u64
*g_addr
)
92 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
94 if (drm_WARN(&i915
->drm
, !gvt_gmadr_is_valid(vgpu
->gvt
, h_addr
),
95 "invalid host gmadr %llx\n", h_addr
))
98 if (gvt_gmadr_is_aperture(vgpu
->gvt
, h_addr
))
99 *g_addr
= vgpu_aperture_gmadr_base(vgpu
)
100 + (h_addr
- gvt_aperture_gmadr_base(vgpu
->gvt
));
102 *g_addr
= vgpu_hidden_gmadr_base(vgpu
)
103 + (h_addr
- gvt_hidden_gmadr_base(vgpu
->gvt
));
107 int intel_gvt_ggtt_index_g2h(struct intel_vgpu
*vgpu
, unsigned long g_index
,
108 unsigned long *h_index
)
113 ret
= intel_gvt_ggtt_gmadr_g2h(vgpu
, g_index
<< I915_GTT_PAGE_SHIFT
,
118 *h_index
= h_addr
>> I915_GTT_PAGE_SHIFT
;
122 int intel_gvt_ggtt_h2g_index(struct intel_vgpu
*vgpu
, unsigned long h_index
,
123 unsigned long *g_index
)
128 ret
= intel_gvt_ggtt_gmadr_h2g(vgpu
, h_index
<< I915_GTT_PAGE_SHIFT
,
133 *g_index
= g_addr
>> I915_GTT_PAGE_SHIFT
;
137 #define gtt_type_is_entry(type) \
138 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
139 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
140 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
142 #define gtt_type_is_pt(type) \
143 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
145 #define gtt_type_is_pte_pt(type) \
146 (type == GTT_TYPE_PPGTT_PTE_PT)
148 #define gtt_type_is_root_pointer(type) \
149 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
151 #define gtt_init_entry(e, t, p, v) do { \
154 memcpy(&(e)->val64, &v, sizeof(v)); \
158 * Mappings between GTT_TYPE* enumerations.
159 * Following information can be found according to the given type:
160 * - type of next level page table
161 * - type of entry inside this level page table
162 * - type of entry with PSE set
164 * If the given type doesn't have such a kind of information,
165 * e.g. give a l4 root entry type, then request to get its PSE type,
166 * give a PTE page table type, then request to get its next level page
167 * table type, as we know l4 root entry doesn't have a PSE bit,
168 * and a PTE page table doesn't have a next level page table type,
169 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
173 struct gtt_type_table_entry
{
180 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
182 .entry_type = e_type, \
183 .pt_type = cpt_type, \
184 .next_pt_type = npt_type, \
185 .pse_entry_type = pse_type, \
188 static struct gtt_type_table_entry gtt_type_table
[] = {
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
190 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
192 GTT_TYPE_PPGTT_PML4_PT
,
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT
,
195 GTT_TYPE_PPGTT_PML4_ENTRY
,
196 GTT_TYPE_PPGTT_PML4_PT
,
197 GTT_TYPE_PPGTT_PDP_PT
,
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY
,
200 GTT_TYPE_PPGTT_PML4_ENTRY
,
201 GTT_TYPE_PPGTT_PML4_PT
,
202 GTT_TYPE_PPGTT_PDP_PT
,
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT
,
205 GTT_TYPE_PPGTT_PDP_ENTRY
,
206 GTT_TYPE_PPGTT_PDP_PT
,
207 GTT_TYPE_PPGTT_PDE_PT
,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
210 GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
212 GTT_TYPE_PPGTT_PDE_PT
,
213 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY
,
215 GTT_TYPE_PPGTT_PDP_ENTRY
,
216 GTT_TYPE_PPGTT_PDP_PT
,
217 GTT_TYPE_PPGTT_PDE_PT
,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT
,
220 GTT_TYPE_PPGTT_PDE_ENTRY
,
221 GTT_TYPE_PPGTT_PDE_PT
,
222 GTT_TYPE_PPGTT_PTE_PT
,
223 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY
,
225 GTT_TYPE_PPGTT_PDE_ENTRY
,
226 GTT_TYPE_PPGTT_PDE_PT
,
227 GTT_TYPE_PPGTT_PTE_PT
,
228 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
229 /* We take IPS bit as 'PSE' for PTE level. */
230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT
,
231 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
232 GTT_TYPE_PPGTT_PTE_PT
,
234 GTT_TYPE_PPGTT_PTE_64K_ENTRY
),
235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
236 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
237 GTT_TYPE_PPGTT_PTE_PT
,
239 GTT_TYPE_PPGTT_PTE_64K_ENTRY
),
240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY
,
241 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
242 GTT_TYPE_PPGTT_PTE_PT
,
244 GTT_TYPE_PPGTT_PTE_64K_ENTRY
),
245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY
,
246 GTT_TYPE_PPGTT_PDE_ENTRY
,
247 GTT_TYPE_PPGTT_PDE_PT
,
249 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
250 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY
,
251 GTT_TYPE_PPGTT_PDP_ENTRY
,
252 GTT_TYPE_PPGTT_PDP_PT
,
254 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
255 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE
,
262 static inline int get_next_pt_type(int type
)
264 return gtt_type_table
[type
].next_pt_type
;
267 static inline int get_pt_type(int type
)
269 return gtt_type_table
[type
].pt_type
;
272 static inline int get_entry_type(int type
)
274 return gtt_type_table
[type
].entry_type
;
277 static inline int get_pse_type(int type
)
279 return gtt_type_table
[type
].pse_entry_type
;
282 static u64
read_pte64(struct i915_ggtt
*ggtt
, unsigned long index
)
284 void __iomem
*addr
= (gen8_pte_t __iomem
*)ggtt
->gsm
+ index
;
289 static void ggtt_invalidate(struct intel_gt
*gt
)
291 mmio_hw_access_pre(gt
);
292 intel_uncore_write(gt
->uncore
, GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
293 mmio_hw_access_post(gt
);
296 static void write_pte64(struct i915_ggtt
*ggtt
, unsigned long index
, u64 pte
)
298 void __iomem
*addr
= (gen8_pte_t __iomem
*)ggtt
->gsm
+ index
;
303 static inline int gtt_get_entry64(void *pt
,
304 struct intel_gvt_gtt_entry
*e
,
305 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
306 struct intel_vgpu
*vgpu
)
308 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
311 if (WARN_ON(info
->gtt_entry_size
!= 8))
314 if (hypervisor_access
) {
315 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, gpa
+
316 (index
<< info
->gtt_entry_size_shift
),
321 e
->val64
= read_pte64(vgpu
->gvt
->gt
->ggtt
, index
);
323 e
->val64
= *((u64
*)pt
+ index
);
328 static inline int gtt_set_entry64(void *pt
,
329 struct intel_gvt_gtt_entry
*e
,
330 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
331 struct intel_vgpu
*vgpu
)
333 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
336 if (WARN_ON(info
->gtt_entry_size
!= 8))
339 if (hypervisor_access
) {
340 ret
= intel_gvt_hypervisor_write_gpa(vgpu
, gpa
+
341 (index
<< info
->gtt_entry_size_shift
),
346 write_pte64(vgpu
->gvt
->gt
->ggtt
, index
, e
->val64
);
348 *((u64
*)pt
+ index
) = e
->val64
;
355 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
356 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
357 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
358 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
360 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
361 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
363 #define GTT_64K_PTE_STRIDE 16
365 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry
*e
)
369 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
)
370 pfn
= (e
->val64
& ADDR_1G_MASK
) >> PAGE_SHIFT
;
371 else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
)
372 pfn
= (e
->val64
& ADDR_2M_MASK
) >> PAGE_SHIFT
;
373 else if (e
->type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
)
374 pfn
= (e
->val64
& ADDR_64K_MASK
) >> PAGE_SHIFT
;
376 pfn
= (e
->val64
& ADDR_4K_MASK
) >> PAGE_SHIFT
;
380 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry
*e
, unsigned long pfn
)
382 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
) {
383 e
->val64
&= ~ADDR_1G_MASK
;
384 pfn
&= (ADDR_1G_MASK
>> PAGE_SHIFT
);
385 } else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
) {
386 e
->val64
&= ~ADDR_2M_MASK
;
387 pfn
&= (ADDR_2M_MASK
>> PAGE_SHIFT
);
388 } else if (e
->type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
) {
389 e
->val64
&= ~ADDR_64K_MASK
;
390 pfn
&= (ADDR_64K_MASK
>> PAGE_SHIFT
);
392 e
->val64
&= ~ADDR_4K_MASK
;
393 pfn
&= (ADDR_4K_MASK
>> PAGE_SHIFT
);
396 e
->val64
|= (pfn
<< PAGE_SHIFT
);
399 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry
*e
)
401 return !!(e
->val64
& _PAGE_PSE
);
404 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry
*e
)
406 if (gen8_gtt_test_pse(e
)) {
408 case GTT_TYPE_PPGTT_PTE_2M_ENTRY
:
409 e
->val64
&= ~_PAGE_PSE
;
410 e
->type
= GTT_TYPE_PPGTT_PDE_ENTRY
;
412 case GTT_TYPE_PPGTT_PTE_1G_ENTRY
:
413 e
->type
= GTT_TYPE_PPGTT_PDP_ENTRY
;
414 e
->val64
&= ~_PAGE_PSE
;
422 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry
*e
)
424 if (GEM_WARN_ON(e
->type
!= GTT_TYPE_PPGTT_PDE_ENTRY
))
427 return !!(e
->val64
& GEN8_PDE_IPS_64K
);
430 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry
*e
)
432 if (GEM_WARN_ON(e
->type
!= GTT_TYPE_PPGTT_PDE_ENTRY
))
435 e
->val64
&= ~GEN8_PDE_IPS_64K
;
438 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry
*e
)
441 * i915 writes PDP root pointer registers without present bit,
442 * it also works, so we need to treat root pointer entry
445 if (e
->type
== GTT_TYPE_PPGTT_ROOT_L3_ENTRY
446 || e
->type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
)
447 return (e
->val64
!= 0);
449 return (e
->val64
& _PAGE_PRESENT
);
452 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry
*e
)
454 e
->val64
&= ~_PAGE_PRESENT
;
457 static void gtt_entry_set_present(struct intel_gvt_gtt_entry
*e
)
459 e
->val64
|= _PAGE_PRESENT
;
462 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry
*e
)
464 return !!(e
->val64
& GTT_SPTE_FLAG_64K_SPLITED
);
467 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry
*e
)
469 e
->val64
|= GTT_SPTE_FLAG_64K_SPLITED
;
472 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry
*e
)
474 e
->val64
&= ~GTT_SPTE_FLAG_64K_SPLITED
;
478 * Per-platform GMA routines.
480 static unsigned long gma_to_ggtt_pte_index(unsigned long gma
)
482 unsigned long x
= (gma
>> I915_GTT_PAGE_SHIFT
);
484 trace_gma_index(__func__
, gma
, x
);
488 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
489 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
491 unsigned long x = (exp); \
492 trace_gma_index(__func__, gma, x); \
496 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pte
, (gma
>> 12 & 0x1ff));
497 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pde
, (gma
>> 21 & 0x1ff));
498 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l3_pdp
, (gma
>> 30 & 0x3));
499 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l4_pdp
, (gma
>> 30 & 0x1ff));
500 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pml4
, (gma
>> 39 & 0x1ff));
502 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops
= {
503 .get_entry
= gtt_get_entry64
,
504 .set_entry
= gtt_set_entry64
,
505 .clear_present
= gtt_entry_clear_present
,
506 .set_present
= gtt_entry_set_present
,
507 .test_present
= gen8_gtt_test_present
,
508 .test_pse
= gen8_gtt_test_pse
,
509 .clear_pse
= gen8_gtt_clear_pse
,
510 .clear_ips
= gen8_gtt_clear_ips
,
511 .test_ips
= gen8_gtt_test_ips
,
512 .clear_64k_splited
= gen8_gtt_clear_64k_splited
,
513 .set_64k_splited
= gen8_gtt_set_64k_splited
,
514 .test_64k_splited
= gen8_gtt_test_64k_splited
,
515 .get_pfn
= gen8_gtt_get_pfn
,
516 .set_pfn
= gen8_gtt_set_pfn
,
519 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops
= {
520 .gma_to_ggtt_pte_index
= gma_to_ggtt_pte_index
,
521 .gma_to_pte_index
= gen8_gma_to_pte_index
,
522 .gma_to_pde_index
= gen8_gma_to_pde_index
,
523 .gma_to_l3_pdp_index
= gen8_gma_to_l3_pdp_index
,
524 .gma_to_l4_pdp_index
= gen8_gma_to_l4_pdp_index
,
525 .gma_to_pml4_index
= gen8_gma_to_pml4_index
,
528 /* Update entry type per pse and ips bit. */
529 static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops
*pte_ops
,
530 struct intel_gvt_gtt_entry
*entry
, bool ips
)
532 switch (entry
->type
) {
533 case GTT_TYPE_PPGTT_PDE_ENTRY
:
534 case GTT_TYPE_PPGTT_PDP_ENTRY
:
535 if (pte_ops
->test_pse(entry
))
536 entry
->type
= get_pse_type(entry
->type
);
538 case GTT_TYPE_PPGTT_PTE_4K_ENTRY
:
540 entry
->type
= get_pse_type(entry
->type
);
543 GEM_BUG_ON(!gtt_type_is_entry(entry
->type
));
546 GEM_BUG_ON(entry
->type
== GTT_TYPE_INVALID
);
552 static void _ppgtt_get_root_entry(struct intel_vgpu_mm
*mm
,
553 struct intel_gvt_gtt_entry
*entry
, unsigned long index
,
556 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
558 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_PPGTT
);
560 entry
->type
= mm
->ppgtt_mm
.root_entry_type
;
561 pte_ops
->get_entry(guest
? mm
->ppgtt_mm
.guest_pdps
:
562 mm
->ppgtt_mm
.shadow_pdps
,
563 entry
, index
, false, 0, mm
->vgpu
);
564 update_entry_type_for_real(pte_ops
, entry
, false);
567 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm
*mm
,
568 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
570 _ppgtt_get_root_entry(mm
, entry
, index
, true);
573 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm
*mm
,
574 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
576 _ppgtt_get_root_entry(mm
, entry
, index
, false);
579 static void _ppgtt_set_root_entry(struct intel_vgpu_mm
*mm
,
580 struct intel_gvt_gtt_entry
*entry
, unsigned long index
,
583 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
585 pte_ops
->set_entry(guest
? mm
->ppgtt_mm
.guest_pdps
:
586 mm
->ppgtt_mm
.shadow_pdps
,
587 entry
, index
, false, 0, mm
->vgpu
);
590 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm
*mm
,
591 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
593 _ppgtt_set_root_entry(mm
, entry
, index
, true);
596 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm
*mm
,
597 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
599 _ppgtt_set_root_entry(mm
, entry
, index
, false);
602 static void ggtt_get_guest_entry(struct intel_vgpu_mm
*mm
,
603 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
605 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
607 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
609 entry
->type
= GTT_TYPE_GGTT_PTE
;
610 pte_ops
->get_entry(mm
->ggtt_mm
.virtual_ggtt
, entry
, index
,
614 static void ggtt_set_guest_entry(struct intel_vgpu_mm
*mm
,
615 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
617 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
619 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
621 pte_ops
->set_entry(mm
->ggtt_mm
.virtual_ggtt
, entry
, index
,
625 static void ggtt_get_host_entry(struct intel_vgpu_mm
*mm
,
626 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
628 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
630 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
632 pte_ops
->get_entry(NULL
, entry
, index
, false, 0, mm
->vgpu
);
635 static void ggtt_set_host_entry(struct intel_vgpu_mm
*mm
,
636 struct intel_gvt_gtt_entry
*entry
, unsigned long index
)
638 struct intel_gvt_gtt_pte_ops
*pte_ops
= mm
->vgpu
->gvt
->gtt
.pte_ops
;
639 unsigned long offset
= index
;
641 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
);
643 if (vgpu_gmadr_is_aperture(mm
->vgpu
, index
<< I915_GTT_PAGE_SHIFT
)) {
644 offset
-= (vgpu_aperture_gmadr_base(mm
->vgpu
) >> PAGE_SHIFT
);
645 mm
->ggtt_mm
.host_ggtt_aperture
[offset
] = entry
->val64
;
646 } else if (vgpu_gmadr_is_hidden(mm
->vgpu
, index
<< I915_GTT_PAGE_SHIFT
)) {
647 offset
-= (vgpu_hidden_gmadr_base(mm
->vgpu
) >> PAGE_SHIFT
);
648 mm
->ggtt_mm
.host_ggtt_hidden
[offset
] = entry
->val64
;
651 pte_ops
->set_entry(NULL
, entry
, index
, false, 0, mm
->vgpu
);
655 * PPGTT shadow page table helpers.
657 static inline int ppgtt_spt_get_entry(
658 struct intel_vgpu_ppgtt_spt
*spt
,
659 void *page_table
, int type
,
660 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
663 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
664 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
667 e
->type
= get_entry_type(type
);
669 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
672 ret
= ops
->get_entry(page_table
, e
, index
, guest
,
673 spt
->guest_page
.gfn
<< I915_GTT_PAGE_SHIFT
,
678 update_entry_type_for_real(ops
, e
, guest
?
679 spt
->guest_page
.pde_ips
: false);
681 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
682 type
, e
->type
, index
, e
->val64
);
686 static inline int ppgtt_spt_set_entry(
687 struct intel_vgpu_ppgtt_spt
*spt
,
688 void *page_table
, int type
,
689 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
692 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
693 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
695 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
698 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
699 type
, e
->type
, index
, e
->val64
);
701 return ops
->set_entry(page_table
, e
, index
, guest
,
702 spt
->guest_page
.gfn
<< I915_GTT_PAGE_SHIFT
,
706 #define ppgtt_get_guest_entry(spt, e, index) \
707 ppgtt_spt_get_entry(spt, NULL, \
708 spt->guest_page.type, e, index, true)
710 #define ppgtt_set_guest_entry(spt, e, index) \
711 ppgtt_spt_set_entry(spt, NULL, \
712 spt->guest_page.type, e, index, true)
714 #define ppgtt_get_shadow_entry(spt, e, index) \
715 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
716 spt->shadow_page.type, e, index, false)
718 #define ppgtt_set_shadow_entry(spt, e, index) \
719 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
720 spt->shadow_page.type, e, index, false)
722 static void *alloc_spt(gfp_t gfp_mask
)
724 struct intel_vgpu_ppgtt_spt
*spt
;
726 spt
= kzalloc(sizeof(*spt
), gfp_mask
);
730 spt
->shadow_page
.page
= alloc_page(gfp_mask
);
731 if (!spt
->shadow_page
.page
) {
738 static void free_spt(struct intel_vgpu_ppgtt_spt
*spt
)
740 __free_page(spt
->shadow_page
.page
);
744 static int detach_oos_page(struct intel_vgpu
*vgpu
,
745 struct intel_vgpu_oos_page
*oos_page
);
747 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt
*spt
)
749 struct device
*kdev
= &spt
->vgpu
->gvt
->gt
->i915
->drm
.pdev
->dev
;
751 trace_spt_free(spt
->vgpu
->id
, spt
, spt
->guest_page
.type
);
753 dma_unmap_page(kdev
, spt
->shadow_page
.mfn
<< I915_GTT_PAGE_SHIFT
, 4096,
754 PCI_DMA_BIDIRECTIONAL
);
756 radix_tree_delete(&spt
->vgpu
->gtt
.spt_tree
, spt
->shadow_page
.mfn
);
758 if (spt
->guest_page
.gfn
) {
759 if (spt
->guest_page
.oos_page
)
760 detach_oos_page(spt
->vgpu
, spt
->guest_page
.oos_page
);
762 intel_vgpu_unregister_page_track(spt
->vgpu
, spt
->guest_page
.gfn
);
765 list_del_init(&spt
->post_shadow_list
);
769 static void ppgtt_free_all_spt(struct intel_vgpu
*vgpu
)
771 struct intel_vgpu_ppgtt_spt
*spt
, *spn
;
772 struct radix_tree_iter iter
;
777 radix_tree_for_each_slot(slot
, &vgpu
->gtt
.spt_tree
, &iter
, 0) {
778 spt
= radix_tree_deref_slot(slot
);
779 list_move(&spt
->post_shadow_list
, &all_spt
);
783 list_for_each_entry_safe(spt
, spn
, &all_spt
, post_shadow_list
)
787 static int ppgtt_handle_guest_write_page_table_bytes(
788 struct intel_vgpu_ppgtt_spt
*spt
,
789 u64 pa
, void *p_data
, int bytes
);
791 static int ppgtt_write_protection_handler(
792 struct intel_vgpu_page_track
*page_track
,
793 u64 gpa
, void *data
, int bytes
)
795 struct intel_vgpu_ppgtt_spt
*spt
= page_track
->priv_data
;
799 if (bytes
!= 4 && bytes
!= 8)
802 ret
= ppgtt_handle_guest_write_page_table_bytes(spt
, gpa
, data
, bytes
);
808 /* Find a spt by guest gfn. */
809 static struct intel_vgpu_ppgtt_spt
*intel_vgpu_find_spt_by_gfn(
810 struct intel_vgpu
*vgpu
, unsigned long gfn
)
812 struct intel_vgpu_page_track
*track
;
814 track
= intel_vgpu_find_page_track(vgpu
, gfn
);
815 if (track
&& track
->handler
== ppgtt_write_protection_handler
)
816 return track
->priv_data
;
821 /* Find the spt by shadow page mfn. */
822 static inline struct intel_vgpu_ppgtt_spt
*intel_vgpu_find_spt_by_mfn(
823 struct intel_vgpu
*vgpu
, unsigned long mfn
)
825 return radix_tree_lookup(&vgpu
->gtt
.spt_tree
, mfn
);
828 static int reclaim_one_ppgtt_mm(struct intel_gvt
*gvt
);
830 /* Allocate shadow page table without guest page. */
831 static struct intel_vgpu_ppgtt_spt
*ppgtt_alloc_spt(
832 struct intel_vgpu
*vgpu
, enum intel_gvt_gtt_type type
)
834 struct device
*kdev
= &vgpu
->gvt
->gt
->i915
->drm
.pdev
->dev
;
835 struct intel_vgpu_ppgtt_spt
*spt
= NULL
;
840 spt
= alloc_spt(GFP_KERNEL
| __GFP_ZERO
);
842 if (reclaim_one_ppgtt_mm(vgpu
->gvt
))
845 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
846 return ERR_PTR(-ENOMEM
);
850 atomic_set(&spt
->refcount
, 1);
851 INIT_LIST_HEAD(&spt
->post_shadow_list
);
856 spt
->shadow_page
.type
= type
;
857 daddr
= dma_map_page(kdev
, spt
->shadow_page
.page
,
858 0, 4096, PCI_DMA_BIDIRECTIONAL
);
859 if (dma_mapping_error(kdev
, daddr
)) {
860 gvt_vgpu_err("fail to map dma addr\n");
864 spt
->shadow_page
.vaddr
= page_address(spt
->shadow_page
.page
);
865 spt
->shadow_page
.mfn
= daddr
>> I915_GTT_PAGE_SHIFT
;
867 ret
= radix_tree_insert(&vgpu
->gtt
.spt_tree
, spt
->shadow_page
.mfn
, spt
);
874 dma_unmap_page(kdev
, daddr
, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
880 /* Allocate shadow page table associated with specific gfn. */
881 static struct intel_vgpu_ppgtt_spt
*ppgtt_alloc_spt_gfn(
882 struct intel_vgpu
*vgpu
, enum intel_gvt_gtt_type type
,
883 unsigned long gfn
, bool guest_pde_ips
)
885 struct intel_vgpu_ppgtt_spt
*spt
;
888 spt
= ppgtt_alloc_spt(vgpu
, type
);
895 ret
= intel_vgpu_register_page_track(vgpu
, gfn
,
896 ppgtt_write_protection_handler
, spt
);
902 spt
->guest_page
.type
= type
;
903 spt
->guest_page
.gfn
= gfn
;
904 spt
->guest_page
.pde_ips
= guest_pde_ips
;
906 trace_spt_alloc(vgpu
->id
, spt
, type
, spt
->shadow_page
.mfn
, gfn
);
911 #define pt_entry_size_shift(spt) \
912 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
914 #define pt_entries(spt) \
915 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
917 #define for_each_present_guest_entry(spt, e, i) \
918 for (i = 0; i < pt_entries(spt); \
919 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
920 if (!ppgtt_get_guest_entry(spt, e, i) && \
921 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
923 #define for_each_present_shadow_entry(spt, e, i) \
924 for (i = 0; i < pt_entries(spt); \
925 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
926 if (!ppgtt_get_shadow_entry(spt, e, i) && \
927 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
929 #define for_each_shadow_entry(spt, e, i) \
930 for (i = 0; i < pt_entries(spt); \
931 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
932 if (!ppgtt_get_shadow_entry(spt, e, i))
934 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt
*spt
)
936 int v
= atomic_read(&spt
->refcount
);
938 trace_spt_refcount(spt
->vgpu
->id
, "inc", spt
, v
, (v
+ 1));
939 atomic_inc(&spt
->refcount
);
942 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt
*spt
)
944 int v
= atomic_read(&spt
->refcount
);
946 trace_spt_refcount(spt
->vgpu
->id
, "dec", spt
, v
, (v
- 1));
947 return atomic_dec_return(&spt
->refcount
);
950 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt
*spt
);
952 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu
*vgpu
,
953 struct intel_gvt_gtt_entry
*e
)
955 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
956 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
957 struct intel_vgpu_ppgtt_spt
*s
;
958 enum intel_gvt_gtt_type cur_pt_type
;
960 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e
->type
)));
962 if (e
->type
!= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
963 && e
->type
!= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
964 cur_pt_type
= get_next_pt_type(e
->type
);
966 if (!gtt_type_is_pt(cur_pt_type
) ||
967 !gtt_type_is_pt(cur_pt_type
+ 1)) {
968 drm_WARN(&i915
->drm
, 1,
969 "Invalid page table type, cur_pt_type is: %d\n",
976 if (ops
->get_pfn(e
) ==
977 vgpu
->gtt
.scratch_pt
[cur_pt_type
].page_mfn
)
980 s
= intel_vgpu_find_spt_by_mfn(vgpu
, ops
->get_pfn(e
));
982 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
986 return ppgtt_invalidate_spt(s
);
989 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt
*spt
,
990 struct intel_gvt_gtt_entry
*entry
)
992 struct intel_vgpu
*vgpu
= spt
->vgpu
;
993 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
997 pfn
= ops
->get_pfn(entry
);
998 type
= spt
->shadow_page
.type
;
1000 /* Uninitialized spte or unshadowed spte. */
1001 if (!pfn
|| pfn
== vgpu
->gtt
.scratch_pt
[type
].page_mfn
)
1004 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu
, pfn
<< PAGE_SHIFT
);
1007 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt
*spt
)
1009 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1010 struct intel_gvt_gtt_entry e
;
1011 unsigned long index
;
1014 trace_spt_change(spt
->vgpu
->id
, "die", spt
,
1015 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
1017 if (ppgtt_put_spt(spt
) > 0)
1020 for_each_present_shadow_entry(spt
, &e
, index
) {
1022 case GTT_TYPE_PPGTT_PTE_4K_ENTRY
:
1023 gvt_vdbg_mm("invalidate 4K entry\n");
1024 ppgtt_invalidate_pte(spt
, &e
);
1026 case GTT_TYPE_PPGTT_PTE_64K_ENTRY
:
1027 /* We don't setup 64K shadow entry so far. */
1028 WARN(1, "suspicious 64K gtt entry\n");
1030 case GTT_TYPE_PPGTT_PTE_2M_ENTRY
:
1031 gvt_vdbg_mm("invalidate 2M entry\n");
1033 case GTT_TYPE_PPGTT_PTE_1G_ENTRY
:
1034 WARN(1, "GVT doesn't support 1GB page\n");
1036 case GTT_TYPE_PPGTT_PML4_ENTRY
:
1037 case GTT_TYPE_PPGTT_PDP_ENTRY
:
1038 case GTT_TYPE_PPGTT_PDE_ENTRY
:
1039 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1040 ret
= ppgtt_invalidate_spt_by_shadow_entry(
1050 trace_spt_change(spt
->vgpu
->id
, "release", spt
,
1051 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
1052 ppgtt_free_spt(spt
);
1055 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1056 spt
, e
.val64
, e
.type
);
1060 static bool vgpu_ips_enabled(struct intel_vgpu
*vgpu
)
1062 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->gt
->i915
;
1064 if (INTEL_GEN(dev_priv
) == 9 || INTEL_GEN(dev_priv
) == 10) {
1065 u32 ips
= vgpu_vreg_t(vgpu
, GEN8_GAMW_ECO_DEV_RW_IA
) &
1066 GAMW_ECO_ENABLE_64K_IPS_FIELD
;
1068 return ips
== GAMW_ECO_ENABLE_64K_IPS_FIELD
;
1069 } else if (INTEL_GEN(dev_priv
) >= 11) {
1070 /* 64K paging only controlled by IPS bit in PTE now. */
1076 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt
*spt
);
1078 static struct intel_vgpu_ppgtt_spt
*ppgtt_populate_spt_by_guest_entry(
1079 struct intel_vgpu
*vgpu
, struct intel_gvt_gtt_entry
*we
)
1081 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1082 struct intel_vgpu_ppgtt_spt
*spt
= NULL
;
1086 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we
->type
)));
1088 if (we
->type
== GTT_TYPE_PPGTT_PDE_ENTRY
)
1089 ips
= vgpu_ips_enabled(vgpu
) && ops
->test_ips(we
);
1091 spt
= intel_vgpu_find_spt_by_gfn(vgpu
, ops
->get_pfn(we
));
1095 if (ips
!= spt
->guest_page
.pde_ips
) {
1096 spt
->guest_page
.pde_ips
= ips
;
1098 gvt_dbg_mm("reshadow PDE since ips changed\n");
1099 clear_page(spt
->shadow_page
.vaddr
);
1100 ret
= ppgtt_populate_spt(spt
);
1107 int type
= get_next_pt_type(we
->type
);
1109 if (!gtt_type_is_pt(type
)) {
1114 spt
= ppgtt_alloc_spt_gfn(vgpu
, type
, ops
->get_pfn(we
), ips
);
1120 ret
= intel_vgpu_enable_page_track(vgpu
, spt
->guest_page
.gfn
);
1124 ret
= ppgtt_populate_spt(spt
);
1128 trace_spt_change(vgpu
->id
, "new", spt
, spt
->guest_page
.gfn
,
1129 spt
->shadow_page
.type
);
1134 ppgtt_free_spt(spt
);
1137 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1138 spt
, we
->val64
, we
->type
);
1139 return ERR_PTR(ret
);
1142 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry
*se
,
1143 struct intel_vgpu_ppgtt_spt
*s
, struct intel_gvt_gtt_entry
*ge
)
1145 struct intel_gvt_gtt_pte_ops
*ops
= s
->vgpu
->gvt
->gtt
.pte_ops
;
1147 se
->type
= ge
->type
;
1148 se
->val64
= ge
->val64
;
1150 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1151 if (se
->type
== GTT_TYPE_PPGTT_PDE_ENTRY
)
1154 ops
->set_pfn(se
, s
->shadow_page
.mfn
);
1158 * Check if can do 2M page
1159 * @vgpu: target vgpu
1160 * @entry: target pfn's gtt entry
1162 * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
1163 * negtive if found err.
1165 static int is_2MB_gtt_possible(struct intel_vgpu
*vgpu
,
1166 struct intel_gvt_gtt_entry
*entry
)
1168 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1171 if (!HAS_PAGE_SIZES(vgpu
->gvt
->gt
->i915
, I915_GTT_PAGE_SIZE_2M
))
1174 pfn
= intel_gvt_hypervisor_gfn_to_mfn(vgpu
, ops
->get_pfn(entry
));
1175 if (pfn
== INTEL_GVT_INVALID_ADDR
)
1178 return PageTransHuge(pfn_to_page(pfn
));
1181 static int split_2MB_gtt_entry(struct intel_vgpu
*vgpu
,
1182 struct intel_vgpu_ppgtt_spt
*spt
, unsigned long index
,
1183 struct intel_gvt_gtt_entry
*se
)
1185 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1186 struct intel_vgpu_ppgtt_spt
*sub_spt
;
1187 struct intel_gvt_gtt_entry sub_se
;
1188 unsigned long start_gfn
;
1189 dma_addr_t dma_addr
;
1190 unsigned long sub_index
;
1193 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index
);
1195 start_gfn
= ops
->get_pfn(se
);
1197 sub_spt
= ppgtt_alloc_spt(vgpu
, GTT_TYPE_PPGTT_PTE_PT
);
1198 if (IS_ERR(sub_spt
))
1199 return PTR_ERR(sub_spt
);
1201 for_each_shadow_entry(sub_spt
, &sub_se
, sub_index
) {
1202 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
,
1203 start_gfn
+ sub_index
, PAGE_SIZE
, &dma_addr
);
1205 ppgtt_invalidate_spt(spt
);
1208 sub_se
.val64
= se
->val64
;
1210 /* Copy the PAT field from PDE. */
1211 sub_se
.val64
&= ~_PAGE_PAT
;
1212 sub_se
.val64
|= (se
->val64
& _PAGE_PAT_LARGE
) >> 5;
1214 ops
->set_pfn(&sub_se
, dma_addr
>> PAGE_SHIFT
);
1215 ppgtt_set_shadow_entry(sub_spt
, &sub_se
, sub_index
);
1218 /* Clear dirty field. */
1219 se
->val64
&= ~_PAGE_DIRTY
;
1223 ops
->set_pfn(se
, sub_spt
->shadow_page
.mfn
);
1224 ppgtt_set_shadow_entry(spt
, se
, index
);
1228 static int split_64KB_gtt_entry(struct intel_vgpu
*vgpu
,
1229 struct intel_vgpu_ppgtt_spt
*spt
, unsigned long index
,
1230 struct intel_gvt_gtt_entry
*se
)
1232 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1233 struct intel_gvt_gtt_entry entry
= *se
;
1234 unsigned long start_gfn
;
1235 dma_addr_t dma_addr
;
1238 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index
);
1240 GEM_BUG_ON(index
% GTT_64K_PTE_STRIDE
);
1242 start_gfn
= ops
->get_pfn(se
);
1244 entry
.type
= GTT_TYPE_PPGTT_PTE_4K_ENTRY
;
1245 ops
->set_64k_splited(&entry
);
1247 for (i
= 0; i
< GTT_64K_PTE_STRIDE
; i
++) {
1248 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
,
1249 start_gfn
+ i
, PAGE_SIZE
, &dma_addr
);
1253 ops
->set_pfn(&entry
, dma_addr
>> PAGE_SHIFT
);
1254 ppgtt_set_shadow_entry(spt
, &entry
, index
+ i
);
1259 static int ppgtt_populate_shadow_entry(struct intel_vgpu
*vgpu
,
1260 struct intel_vgpu_ppgtt_spt
*spt
, unsigned long index
,
1261 struct intel_gvt_gtt_entry
*ge
)
1263 struct intel_gvt_gtt_pte_ops
*pte_ops
= vgpu
->gvt
->gtt
.pte_ops
;
1264 struct intel_gvt_gtt_entry se
= *ge
;
1265 unsigned long gfn
, page_size
= PAGE_SIZE
;
1266 dma_addr_t dma_addr
;
1269 if (!pte_ops
->test_present(ge
))
1272 gfn
= pte_ops
->get_pfn(ge
);
1275 case GTT_TYPE_PPGTT_PTE_4K_ENTRY
:
1276 gvt_vdbg_mm("shadow 4K gtt entry\n");
1278 case GTT_TYPE_PPGTT_PTE_64K_ENTRY
:
1279 gvt_vdbg_mm("shadow 64K gtt entry\n");
1281 * The layout of 64K page is special, the page size is
1282 * controlled by uper PDE. To be simple, we always split
1283 * 64K page to smaller 4K pages in shadow PT.
1285 return split_64KB_gtt_entry(vgpu
, spt
, index
, &se
);
1286 case GTT_TYPE_PPGTT_PTE_2M_ENTRY
:
1287 gvt_vdbg_mm("shadow 2M gtt entry\n");
1288 ret
= is_2MB_gtt_possible(vgpu
, ge
);
1290 return split_2MB_gtt_entry(vgpu
, spt
, index
, &se
);
1293 page_size
= I915_GTT_PAGE_SIZE_2M
;
1295 case GTT_TYPE_PPGTT_PTE_1G_ENTRY
:
1296 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1303 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
, gfn
, page_size
,
1308 pte_ops
->set_pfn(&se
, dma_addr
>> PAGE_SHIFT
);
1309 ppgtt_set_shadow_entry(spt
, &se
, index
);
1313 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt
*spt
)
1315 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1316 struct intel_gvt
*gvt
= vgpu
->gvt
;
1317 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1318 struct intel_vgpu_ppgtt_spt
*s
;
1319 struct intel_gvt_gtt_entry se
, ge
;
1320 unsigned long gfn
, i
;
1323 trace_spt_change(spt
->vgpu
->id
, "born", spt
,
1324 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
1326 for_each_present_guest_entry(spt
, &ge
, i
) {
1327 if (gtt_type_is_pt(get_next_pt_type(ge
.type
))) {
1328 s
= ppgtt_populate_spt_by_guest_entry(vgpu
, &ge
);
1333 ppgtt_get_shadow_entry(spt
, &se
, i
);
1334 ppgtt_generate_shadow_entry(&se
, s
, &ge
);
1335 ppgtt_set_shadow_entry(spt
, &se
, i
);
1337 gfn
= ops
->get_pfn(&ge
);
1338 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu
, gfn
)) {
1339 ops
->set_pfn(&se
, gvt
->gtt
.scratch_mfn
);
1340 ppgtt_set_shadow_entry(spt
, &se
, i
);
1344 ret
= ppgtt_populate_shadow_entry(vgpu
, spt
, i
, &ge
);
1351 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1352 spt
, ge
.val64
, ge
.type
);
1356 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt
*spt
,
1357 struct intel_gvt_gtt_entry
*se
, unsigned long index
)
1359 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1360 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1363 trace_spt_guest_change(spt
->vgpu
->id
, "remove", spt
,
1364 spt
->shadow_page
.type
, se
->val64
, index
);
1366 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1367 se
->type
, index
, se
->val64
);
1369 if (!ops
->test_present(se
))
1372 if (ops
->get_pfn(se
) ==
1373 vgpu
->gtt
.scratch_pt
[spt
->shadow_page
.type
].page_mfn
)
1376 if (gtt_type_is_pt(get_next_pt_type(se
->type
))) {
1377 struct intel_vgpu_ppgtt_spt
*s
=
1378 intel_vgpu_find_spt_by_mfn(vgpu
, ops
->get_pfn(se
));
1380 gvt_vgpu_err("fail to find guest page\n");
1384 ret
= ppgtt_invalidate_spt(s
);
1388 /* We don't setup 64K shadow entry so far. */
1389 WARN(se
->type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
,
1390 "suspicious 64K entry\n");
1391 ppgtt_invalidate_pte(spt
, se
);
1396 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1397 spt
, se
->val64
, se
->type
);
1401 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt
*spt
,
1402 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1404 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1405 struct intel_gvt_gtt_entry m
;
1406 struct intel_vgpu_ppgtt_spt
*s
;
1409 trace_spt_guest_change(spt
->vgpu
->id
, "add", spt
, spt
->shadow_page
.type
,
1412 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1413 we
->type
, index
, we
->val64
);
1415 if (gtt_type_is_pt(get_next_pt_type(we
->type
))) {
1416 s
= ppgtt_populate_spt_by_guest_entry(vgpu
, we
);
1421 ppgtt_get_shadow_entry(spt
, &m
, index
);
1422 ppgtt_generate_shadow_entry(&m
, s
, we
);
1423 ppgtt_set_shadow_entry(spt
, &m
, index
);
1425 ret
= ppgtt_populate_shadow_entry(vgpu
, spt
, index
, we
);
1431 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1432 spt
, we
->val64
, we
->type
);
1436 static int sync_oos_page(struct intel_vgpu
*vgpu
,
1437 struct intel_vgpu_oos_page
*oos_page
)
1439 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1440 struct intel_gvt
*gvt
= vgpu
->gvt
;
1441 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1442 struct intel_vgpu_ppgtt_spt
*spt
= oos_page
->spt
;
1443 struct intel_gvt_gtt_entry old
, new;
1447 trace_oos_change(vgpu
->id
, "sync", oos_page
->id
,
1448 spt
, spt
->guest_page
.type
);
1450 old
.type
= new.type
= get_entry_type(spt
->guest_page
.type
);
1451 old
.val64
= new.val64
= 0;
1453 for (index
= 0; index
< (I915_GTT_PAGE_SIZE
>>
1454 info
->gtt_entry_size_shift
); index
++) {
1455 ops
->get_entry(oos_page
->mem
, &old
, index
, false, 0, vgpu
);
1456 ops
->get_entry(NULL
, &new, index
, true,
1457 spt
->guest_page
.gfn
<< PAGE_SHIFT
, vgpu
);
1459 if (old
.val64
== new.val64
1460 && !test_and_clear_bit(index
, spt
->post_shadow_bitmap
))
1463 trace_oos_sync(vgpu
->id
, oos_page
->id
,
1464 spt
, spt
->guest_page
.type
,
1467 ret
= ppgtt_populate_shadow_entry(vgpu
, spt
, index
, &new);
1471 ops
->set_entry(oos_page
->mem
, &new, index
, false, 0, vgpu
);
1474 spt
->guest_page
.write_cnt
= 0;
1475 list_del_init(&spt
->post_shadow_list
);
1479 static int detach_oos_page(struct intel_vgpu
*vgpu
,
1480 struct intel_vgpu_oos_page
*oos_page
)
1482 struct intel_gvt
*gvt
= vgpu
->gvt
;
1483 struct intel_vgpu_ppgtt_spt
*spt
= oos_page
->spt
;
1485 trace_oos_change(vgpu
->id
, "detach", oos_page
->id
,
1486 spt
, spt
->guest_page
.type
);
1488 spt
->guest_page
.write_cnt
= 0;
1489 spt
->guest_page
.oos_page
= NULL
;
1490 oos_page
->spt
= NULL
;
1492 list_del_init(&oos_page
->vm_list
);
1493 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_free_list_head
);
1498 static int attach_oos_page(struct intel_vgpu_oos_page
*oos_page
,
1499 struct intel_vgpu_ppgtt_spt
*spt
)
1501 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
1504 ret
= intel_gvt_hypervisor_read_gpa(spt
->vgpu
,
1505 spt
->guest_page
.gfn
<< I915_GTT_PAGE_SHIFT
,
1506 oos_page
->mem
, I915_GTT_PAGE_SIZE
);
1510 oos_page
->spt
= spt
;
1511 spt
->guest_page
.oos_page
= oos_page
;
1513 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_use_list_head
);
1515 trace_oos_change(spt
->vgpu
->id
, "attach", oos_page
->id
,
1516 spt
, spt
->guest_page
.type
);
1520 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt
*spt
)
1522 struct intel_vgpu_oos_page
*oos_page
= spt
->guest_page
.oos_page
;
1525 ret
= intel_vgpu_enable_page_track(spt
->vgpu
, spt
->guest_page
.gfn
);
1529 trace_oos_change(spt
->vgpu
->id
, "set page sync", oos_page
->id
,
1530 spt
, spt
->guest_page
.type
);
1532 list_del_init(&oos_page
->vm_list
);
1533 return sync_oos_page(spt
->vgpu
, oos_page
);
1536 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt
*spt
)
1538 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
1539 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1540 struct intel_vgpu_oos_page
*oos_page
= spt
->guest_page
.oos_page
;
1543 WARN(oos_page
, "shadow PPGTT page has already has a oos page\n");
1545 if (list_empty(>t
->oos_page_free_list_head
)) {
1546 oos_page
= container_of(gtt
->oos_page_use_list_head
.next
,
1547 struct intel_vgpu_oos_page
, list
);
1548 ret
= ppgtt_set_guest_page_sync(oos_page
->spt
);
1551 ret
= detach_oos_page(spt
->vgpu
, oos_page
);
1555 oos_page
= container_of(gtt
->oos_page_free_list_head
.next
,
1556 struct intel_vgpu_oos_page
, list
);
1557 return attach_oos_page(oos_page
, spt
);
1560 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt
*spt
)
1562 struct intel_vgpu_oos_page
*oos_page
= spt
->guest_page
.oos_page
;
1564 if (WARN(!oos_page
, "shadow PPGTT page should have a oos page\n"))
1567 trace_oos_change(spt
->vgpu
->id
, "set page out of sync", oos_page
->id
,
1568 spt
, spt
->guest_page
.type
);
1570 list_add_tail(&oos_page
->vm_list
, &spt
->vgpu
->gtt
.oos_page_list_head
);
1571 return intel_vgpu_disable_page_track(spt
->vgpu
, spt
->guest_page
.gfn
);
1575 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1578 * This function is called before submitting a guest workload to host,
1579 * to sync all the out-of-synced shadow for vGPU
1582 * Zero on success, negative error code if failed.
1584 int intel_vgpu_sync_oos_pages(struct intel_vgpu
*vgpu
)
1586 struct list_head
*pos
, *n
;
1587 struct intel_vgpu_oos_page
*oos_page
;
1590 if (!enable_out_of_sync
)
1593 list_for_each_safe(pos
, n
, &vgpu
->gtt
.oos_page_list_head
) {
1594 oos_page
= container_of(pos
,
1595 struct intel_vgpu_oos_page
, vm_list
);
1596 ret
= ppgtt_set_guest_page_sync(oos_page
->spt
);
1604 * The heart of PPGTT shadow page table.
1606 static int ppgtt_handle_guest_write_page_table(
1607 struct intel_vgpu_ppgtt_spt
*spt
,
1608 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1610 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1611 int type
= spt
->shadow_page
.type
;
1612 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1613 struct intel_gvt_gtt_entry old_se
;
1617 new_present
= ops
->test_present(we
);
1620 * Adding the new entry first and then removing the old one, that can
1621 * guarantee the ppgtt table is validated during the window between
1622 * adding and removal.
1624 ppgtt_get_shadow_entry(spt
, &old_se
, index
);
1627 ret
= ppgtt_handle_guest_entry_add(spt
, we
, index
);
1632 ret
= ppgtt_handle_guest_entry_removal(spt
, &old_se
, index
);
1637 /* For 64KB splited entries, we need clear them all. */
1638 if (ops
->test_64k_splited(&old_se
) &&
1639 !(index
% GTT_64K_PTE_STRIDE
)) {
1640 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1641 for (i
= 0; i
< GTT_64K_PTE_STRIDE
; i
++) {
1642 ops
->clear_64k_splited(&old_se
);
1643 ops
->set_pfn(&old_se
,
1644 vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1645 ppgtt_set_shadow_entry(spt
, &old_se
, index
+ i
);
1647 } else if (old_se
.type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
||
1648 old_se
.type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
) {
1649 ops
->clear_pse(&old_se
);
1650 ops
->set_pfn(&old_se
,
1651 vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1652 ppgtt_set_shadow_entry(spt
, &old_se
, index
);
1654 ops
->set_pfn(&old_se
,
1655 vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1656 ppgtt_set_shadow_entry(spt
, &old_se
, index
);
1662 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1663 spt
, we
->val64
, we
->type
);
1669 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt
*spt
)
1671 return enable_out_of_sync
1672 && gtt_type_is_pte_pt(spt
->guest_page
.type
)
1673 && spt
->guest_page
.write_cnt
>= 2;
1676 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt
*spt
,
1677 unsigned long index
)
1679 set_bit(index
, spt
->post_shadow_bitmap
);
1680 if (!list_empty(&spt
->post_shadow_list
))
1683 list_add_tail(&spt
->post_shadow_list
,
1684 &spt
->vgpu
->gtt
.post_shadow_list_head
);
1688 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1691 * This function is called before submitting a guest workload to host,
1692 * to flush all the post shadows for a vGPU.
1695 * Zero on success, negative error code if failed.
1697 int intel_vgpu_flush_post_shadow(struct intel_vgpu
*vgpu
)
1699 struct list_head
*pos
, *n
;
1700 struct intel_vgpu_ppgtt_spt
*spt
;
1701 struct intel_gvt_gtt_entry ge
;
1702 unsigned long index
;
1705 list_for_each_safe(pos
, n
, &vgpu
->gtt
.post_shadow_list_head
) {
1706 spt
= container_of(pos
, struct intel_vgpu_ppgtt_spt
,
1709 for_each_set_bit(index
, spt
->post_shadow_bitmap
,
1710 GTT_ENTRY_NUM_IN_ONE_PAGE
) {
1711 ppgtt_get_guest_entry(spt
, &ge
, index
);
1713 ret
= ppgtt_handle_guest_write_page_table(spt
,
1717 clear_bit(index
, spt
->post_shadow_bitmap
);
1719 list_del_init(&spt
->post_shadow_list
);
1724 static int ppgtt_handle_guest_write_page_table_bytes(
1725 struct intel_vgpu_ppgtt_spt
*spt
,
1726 u64 pa
, void *p_data
, int bytes
)
1728 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1729 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1730 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1731 struct intel_gvt_gtt_entry we
, se
;
1732 unsigned long index
;
1735 index
= (pa
& (PAGE_SIZE
- 1)) >> info
->gtt_entry_size_shift
;
1737 ppgtt_get_guest_entry(spt
, &we
, index
);
1740 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1741 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1744 if (we
.type
== GTT_TYPE_PPGTT_PTE_64K_ENTRY
&&
1745 (index
% GTT_64K_PTE_STRIDE
)) {
1746 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1751 if (bytes
== info
->gtt_entry_size
) {
1752 ret
= ppgtt_handle_guest_write_page_table(spt
, &we
, index
);
1756 if (!test_bit(index
, spt
->post_shadow_bitmap
)) {
1757 int type
= spt
->shadow_page
.type
;
1759 ppgtt_get_shadow_entry(spt
, &se
, index
);
1760 ret
= ppgtt_handle_guest_entry_removal(spt
, &se
, index
);
1763 ops
->set_pfn(&se
, vgpu
->gtt
.scratch_pt
[type
].page_mfn
);
1764 ppgtt_set_shadow_entry(spt
, &se
, index
);
1766 ppgtt_set_post_shadow(spt
, index
);
1769 if (!enable_out_of_sync
)
1772 spt
->guest_page
.write_cnt
++;
1774 if (spt
->guest_page
.oos_page
)
1775 ops
->set_entry(spt
->guest_page
.oos_page
->mem
, &we
, index
,
1778 if (can_do_out_of_sync(spt
)) {
1779 if (!spt
->guest_page
.oos_page
)
1780 ppgtt_allocate_oos_page(spt
);
1782 ret
= ppgtt_set_guest_page_oos(spt
);
1789 static void invalidate_ppgtt_mm(struct intel_vgpu_mm
*mm
)
1791 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1792 struct intel_gvt
*gvt
= vgpu
->gvt
;
1793 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1794 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1795 struct intel_gvt_gtt_entry se
;
1798 if (!mm
->ppgtt_mm
.shadowed
)
1801 for (index
= 0; index
< ARRAY_SIZE(mm
->ppgtt_mm
.shadow_pdps
); index
++) {
1802 ppgtt_get_shadow_root_entry(mm
, &se
, index
);
1804 if (!ops
->test_present(&se
))
1807 ppgtt_invalidate_spt_by_shadow_entry(vgpu
, &se
);
1809 ppgtt_set_shadow_root_entry(mm
, &se
, index
);
1811 trace_spt_guest_change(vgpu
->id
, "destroy root pointer",
1812 NULL
, se
.type
, se
.val64
, index
);
1815 mm
->ppgtt_mm
.shadowed
= false;
1819 static int shadow_ppgtt_mm(struct intel_vgpu_mm
*mm
)
1821 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1822 struct intel_gvt
*gvt
= vgpu
->gvt
;
1823 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1824 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1825 struct intel_vgpu_ppgtt_spt
*spt
;
1826 struct intel_gvt_gtt_entry ge
, se
;
1829 if (mm
->ppgtt_mm
.shadowed
)
1832 mm
->ppgtt_mm
.shadowed
= true;
1834 for (index
= 0; index
< ARRAY_SIZE(mm
->ppgtt_mm
.guest_pdps
); index
++) {
1835 ppgtt_get_guest_root_entry(mm
, &ge
, index
);
1837 if (!ops
->test_present(&ge
))
1840 trace_spt_guest_change(vgpu
->id
, __func__
, NULL
,
1841 ge
.type
, ge
.val64
, index
);
1843 spt
= ppgtt_populate_spt_by_guest_entry(vgpu
, &ge
);
1845 gvt_vgpu_err("fail to populate guest root pointer\n");
1849 ppgtt_generate_shadow_entry(&se
, spt
, &ge
);
1850 ppgtt_set_shadow_root_entry(mm
, &se
, index
);
1852 trace_spt_guest_change(vgpu
->id
, "populate root pointer",
1853 NULL
, se
.type
, se
.val64
, index
);
1858 invalidate_ppgtt_mm(mm
);
1862 static struct intel_vgpu_mm
*vgpu_alloc_mm(struct intel_vgpu
*vgpu
)
1864 struct intel_vgpu_mm
*mm
;
1866 mm
= kzalloc(sizeof(*mm
), GFP_KERNEL
);
1871 kref_init(&mm
->ref
);
1872 atomic_set(&mm
->pincount
, 0);
1877 static void vgpu_free_mm(struct intel_vgpu_mm
*mm
)
1883 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1885 * @root_entry_type: ppgtt root entry type
1886 * @pdps: guest pdps.
1888 * This function is used to create a ppgtt mm object for a vGPU.
1891 * Zero on success, negative error code in pointer if failed.
1893 struct intel_vgpu_mm
*intel_vgpu_create_ppgtt_mm(struct intel_vgpu
*vgpu
,
1894 enum intel_gvt_gtt_type root_entry_type
, u64 pdps
[])
1896 struct intel_gvt
*gvt
= vgpu
->gvt
;
1897 struct intel_vgpu_mm
*mm
;
1900 mm
= vgpu_alloc_mm(vgpu
);
1902 return ERR_PTR(-ENOMEM
);
1904 mm
->type
= INTEL_GVT_MM_PPGTT
;
1906 GEM_BUG_ON(root_entry_type
!= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&&
1907 root_entry_type
!= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
);
1908 mm
->ppgtt_mm
.root_entry_type
= root_entry_type
;
1910 INIT_LIST_HEAD(&mm
->ppgtt_mm
.list
);
1911 INIT_LIST_HEAD(&mm
->ppgtt_mm
.lru_list
);
1912 INIT_LIST_HEAD(&mm
->ppgtt_mm
.link
);
1914 if (root_entry_type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
)
1915 mm
->ppgtt_mm
.guest_pdps
[0] = pdps
[0];
1917 memcpy(mm
->ppgtt_mm
.guest_pdps
, pdps
,
1918 sizeof(mm
->ppgtt_mm
.guest_pdps
));
1920 ret
= shadow_ppgtt_mm(mm
);
1922 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1924 return ERR_PTR(ret
);
1927 list_add_tail(&mm
->ppgtt_mm
.list
, &vgpu
->gtt
.ppgtt_mm_list_head
);
1929 mutex_lock(&gvt
->gtt
.ppgtt_mm_lock
);
1930 list_add_tail(&mm
->ppgtt_mm
.lru_list
, &gvt
->gtt
.ppgtt_mm_lru_list_head
);
1931 mutex_unlock(&gvt
->gtt
.ppgtt_mm_lock
);
1936 static struct intel_vgpu_mm
*intel_vgpu_create_ggtt_mm(struct intel_vgpu
*vgpu
)
1938 struct intel_vgpu_mm
*mm
;
1939 unsigned long nr_entries
;
1941 mm
= vgpu_alloc_mm(vgpu
);
1943 return ERR_PTR(-ENOMEM
);
1945 mm
->type
= INTEL_GVT_MM_GGTT
;
1947 nr_entries
= gvt_ggtt_gm_sz(vgpu
->gvt
) >> I915_GTT_PAGE_SHIFT
;
1948 mm
->ggtt_mm
.virtual_ggtt
=
1949 vzalloc(array_size(nr_entries
,
1950 vgpu
->gvt
->device_info
.gtt_entry_size
));
1951 if (!mm
->ggtt_mm
.virtual_ggtt
) {
1953 return ERR_PTR(-ENOMEM
);
1956 mm
->ggtt_mm
.host_ggtt_aperture
= vzalloc((vgpu_aperture_sz(vgpu
) >> PAGE_SHIFT
) * sizeof(u64
));
1957 if (!mm
->ggtt_mm
.host_ggtt_aperture
) {
1958 vfree(mm
->ggtt_mm
.virtual_ggtt
);
1960 return ERR_PTR(-ENOMEM
);
1963 mm
->ggtt_mm
.host_ggtt_hidden
= vzalloc((vgpu_hidden_sz(vgpu
) >> PAGE_SHIFT
) * sizeof(u64
));
1964 if (!mm
->ggtt_mm
.host_ggtt_hidden
) {
1965 vfree(mm
->ggtt_mm
.host_ggtt_aperture
);
1966 vfree(mm
->ggtt_mm
.virtual_ggtt
);
1968 return ERR_PTR(-ENOMEM
);
1975 * _intel_vgpu_mm_release - destroy a mm object
1976 * @mm_ref: a kref object
1978 * This function is used to destroy a mm object for vGPU
1981 void _intel_vgpu_mm_release(struct kref
*mm_ref
)
1983 struct intel_vgpu_mm
*mm
= container_of(mm_ref
, typeof(*mm
), ref
);
1985 if (GEM_WARN_ON(atomic_read(&mm
->pincount
)))
1986 gvt_err("vgpu mm pin count bug detected\n");
1988 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1989 list_del(&mm
->ppgtt_mm
.list
);
1991 mutex_lock(&mm
->vgpu
->gvt
->gtt
.ppgtt_mm_lock
);
1992 list_del(&mm
->ppgtt_mm
.lru_list
);
1993 mutex_unlock(&mm
->vgpu
->gvt
->gtt
.ppgtt_mm_lock
);
1995 invalidate_ppgtt_mm(mm
);
1997 vfree(mm
->ggtt_mm
.virtual_ggtt
);
1998 vfree(mm
->ggtt_mm
.host_ggtt_aperture
);
1999 vfree(mm
->ggtt_mm
.host_ggtt_hidden
);
2006 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
2007 * @mm: a vGPU mm object
2009 * This function is called when user doesn't want to use a vGPU mm object
2011 void intel_vgpu_unpin_mm(struct intel_vgpu_mm
*mm
)
2013 atomic_dec_if_positive(&mm
->pincount
);
2017 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
2018 * @mm: target vgpu mm
2020 * This function is called when user wants to use a vGPU mm object. If this
2021 * mm object hasn't been shadowed yet, the shadow will be populated at this
2025 * Zero on success, negative error code if failed.
2027 int intel_vgpu_pin_mm(struct intel_vgpu_mm
*mm
)
2031 atomic_inc(&mm
->pincount
);
2033 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
2034 ret
= shadow_ppgtt_mm(mm
);
2038 mutex_lock(&mm
->vgpu
->gvt
->gtt
.ppgtt_mm_lock
);
2039 list_move_tail(&mm
->ppgtt_mm
.lru_list
,
2040 &mm
->vgpu
->gvt
->gtt
.ppgtt_mm_lru_list_head
);
2041 mutex_unlock(&mm
->vgpu
->gvt
->gtt
.ppgtt_mm_lock
);
2047 static int reclaim_one_ppgtt_mm(struct intel_gvt
*gvt
)
2049 struct intel_vgpu_mm
*mm
;
2050 struct list_head
*pos
, *n
;
2052 mutex_lock(&gvt
->gtt
.ppgtt_mm_lock
);
2054 list_for_each_safe(pos
, n
, &gvt
->gtt
.ppgtt_mm_lru_list_head
) {
2055 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.lru_list
);
2057 if (atomic_read(&mm
->pincount
))
2060 list_del_init(&mm
->ppgtt_mm
.lru_list
);
2061 mutex_unlock(&gvt
->gtt
.ppgtt_mm_lock
);
2062 invalidate_ppgtt_mm(mm
);
2065 mutex_unlock(&gvt
->gtt
.ppgtt_mm_lock
);
2070 * GMA translation APIs.
2072 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm
*mm
,
2073 struct intel_gvt_gtt_entry
*e
, unsigned long index
, bool guest
)
2075 struct intel_vgpu
*vgpu
= mm
->vgpu
;
2076 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2077 struct intel_vgpu_ppgtt_spt
*s
;
2079 s
= intel_vgpu_find_spt_by_mfn(vgpu
, ops
->get_pfn(e
));
2084 ppgtt_get_shadow_entry(s
, e
, index
);
2086 ppgtt_get_guest_entry(s
, e
, index
);
2091 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2092 * @mm: mm object. could be a PPGTT or GGTT mm object
2093 * @gma: graphics memory address in this mm object
2095 * This function is used to translate a graphics memory address in specific
2096 * graphics memory space to guest physical address.
2099 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2101 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm
*mm
, unsigned long gma
)
2103 struct intel_vgpu
*vgpu
= mm
->vgpu
;
2104 struct intel_gvt
*gvt
= vgpu
->gvt
;
2105 struct intel_gvt_gtt_pte_ops
*pte_ops
= gvt
->gtt
.pte_ops
;
2106 struct intel_gvt_gtt_gma_ops
*gma_ops
= gvt
->gtt
.gma_ops
;
2107 unsigned long gpa
= INTEL_GVT_INVALID_ADDR
;
2108 unsigned long gma_index
[4];
2109 struct intel_gvt_gtt_entry e
;
2113 GEM_BUG_ON(mm
->type
!= INTEL_GVT_MM_GGTT
&&
2114 mm
->type
!= INTEL_GVT_MM_PPGTT
);
2116 if (mm
->type
== INTEL_GVT_MM_GGTT
) {
2117 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
2120 ggtt_get_guest_entry(mm
, &e
,
2121 gma_ops
->gma_to_ggtt_pte_index(gma
));
2123 gpa
= (pte_ops
->get_pfn(&e
) << I915_GTT_PAGE_SHIFT
)
2124 + (gma
& ~I915_GTT_PAGE_MASK
);
2126 trace_gma_translate(vgpu
->id
, "ggtt", 0, 0, gma
, gpa
);
2128 switch (mm
->ppgtt_mm
.root_entry_type
) {
2129 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY
:
2130 ppgtt_get_shadow_root_entry(mm
, &e
, 0);
2132 gma_index
[0] = gma_ops
->gma_to_pml4_index(gma
);
2133 gma_index
[1] = gma_ops
->gma_to_l4_pdp_index(gma
);
2134 gma_index
[2] = gma_ops
->gma_to_pde_index(gma
);
2135 gma_index
[3] = gma_ops
->gma_to_pte_index(gma
);
2138 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY
:
2139 ppgtt_get_shadow_root_entry(mm
, &e
,
2140 gma_ops
->gma_to_l3_pdp_index(gma
));
2142 gma_index
[0] = gma_ops
->gma_to_pde_index(gma
);
2143 gma_index
[1] = gma_ops
->gma_to_pte_index(gma
);
2150 /* walk the shadow page table and get gpa from guest entry */
2151 for (i
= 0; i
< levels
; i
++) {
2152 ret
= ppgtt_get_next_level_entry(mm
, &e
, gma_index
[i
],
2157 if (!pte_ops
->test_present(&e
)) {
2158 gvt_dbg_core("GMA 0x%lx is not present\n", gma
);
2163 gpa
= (pte_ops
->get_pfn(&e
) << I915_GTT_PAGE_SHIFT
) +
2164 (gma
& ~I915_GTT_PAGE_MASK
);
2165 trace_gma_translate(vgpu
->id
, "ppgtt", 0,
2166 mm
->ppgtt_mm
.root_entry_type
, gma
, gpa
);
2171 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm
->type
, gma
);
2172 return INTEL_GVT_INVALID_ADDR
;
2175 static int emulate_ggtt_mmio_read(struct intel_vgpu
*vgpu
,
2176 unsigned int off
, void *p_data
, unsigned int bytes
)
2178 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
2179 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
2180 unsigned long index
= off
>> info
->gtt_entry_size_shift
;
2182 struct intel_gvt_gtt_entry e
;
2184 if (bytes
!= 4 && bytes
!= 8)
2187 gma
= index
<< I915_GTT_PAGE_SHIFT
;
2188 if (!intel_gvt_ggtt_validate_range(vgpu
,
2189 gma
, 1 << I915_GTT_PAGE_SHIFT
)) {
2190 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma
);
2191 memset(p_data
, 0, bytes
);
2195 ggtt_get_guest_entry(ggtt_mm
, &e
, index
);
2196 memcpy(p_data
, (void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)),
2202 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
2204 * @off: register offset
2205 * @p_data: data will be returned to guest
2206 * @bytes: data length
2208 * This function is used to emulate the GTT MMIO register read
2211 * Zero on success, error code if failed.
2213 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu
*vgpu
, unsigned int off
,
2214 void *p_data
, unsigned int bytes
)
2216 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
2219 if (bytes
!= 4 && bytes
!= 8)
2222 off
-= info
->gtt_start_offset
;
2223 ret
= emulate_ggtt_mmio_read(vgpu
, off
, p_data
, bytes
);
2227 static void ggtt_invalidate_pte(struct intel_vgpu
*vgpu
,
2228 struct intel_gvt_gtt_entry
*entry
)
2230 struct intel_gvt_gtt_pte_ops
*pte_ops
= vgpu
->gvt
->gtt
.pte_ops
;
2233 pfn
= pte_ops
->get_pfn(entry
);
2234 if (pfn
!= vgpu
->gvt
->gtt
.scratch_mfn
)
2235 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu
,
2239 static int emulate_ggtt_mmio_write(struct intel_vgpu
*vgpu
, unsigned int off
,
2240 void *p_data
, unsigned int bytes
)
2242 struct intel_gvt
*gvt
= vgpu
->gvt
;
2243 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
2244 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
2245 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
2246 unsigned long g_gtt_index
= off
>> info
->gtt_entry_size_shift
;
2247 unsigned long gma
, gfn
;
2248 struct intel_gvt_gtt_entry e
= {.val64
= 0, .type
= GTT_TYPE_GGTT_PTE
};
2249 struct intel_gvt_gtt_entry m
= {.val64
= 0, .type
= GTT_TYPE_GGTT_PTE
};
2250 dma_addr_t dma_addr
;
2252 struct intel_gvt_partial_pte
*partial_pte
, *pos
, *n
;
2253 bool partial_update
= false;
2255 if (bytes
!= 4 && bytes
!= 8)
2258 gma
= g_gtt_index
<< I915_GTT_PAGE_SHIFT
;
2260 /* the VM may configure the whole GM space when ballooning is used */
2261 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
2264 e
.type
= GTT_TYPE_GGTT_PTE
;
2265 memcpy((void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)), p_data
,
2268 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2269 * write, save the first 4 bytes in a list and update virtual
2270 * PTE. Only update shadow PTE when the second 4 bytes comes.
2272 if (bytes
< info
->gtt_entry_size
) {
2275 list_for_each_entry_safe(pos
, n
,
2276 &ggtt_mm
->ggtt_mm
.partial_pte_list
, list
) {
2277 if (g_gtt_index
== pos
->offset
>>
2278 info
->gtt_entry_size_shift
) {
2279 if (off
!= pos
->offset
) {
2280 /* the second partial part*/
2281 int last_off
= pos
->offset
&
2282 (info
->gtt_entry_size
- 1);
2284 memcpy((void *)&e
.val64
+ last_off
,
2285 (void *)&pos
->data
+ last_off
,
2288 list_del(&pos
->list
);
2294 /* update of the first partial part */
2295 pos
->data
= e
.val64
;
2296 ggtt_set_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
2302 /* the first partial part */
2303 partial_pte
= kzalloc(sizeof(*partial_pte
), GFP_KERNEL
);
2306 partial_pte
->offset
= off
;
2307 partial_pte
->data
= e
.val64
;
2308 list_add_tail(&partial_pte
->list
,
2309 &ggtt_mm
->ggtt_mm
.partial_pte_list
);
2310 partial_update
= true;
2314 if (!partial_update
&& (ops
->test_present(&e
))) {
2315 gfn
= ops
->get_pfn(&e
);
2319 /* one PTE update may be issued in multiple writes and the
2320 * first write may not construct a valid gfn
2322 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu
, gfn
)) {
2323 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2327 ret
= intel_gvt_hypervisor_dma_map_guest_page(vgpu
, gfn
,
2328 PAGE_SIZE
, &dma_addr
);
2330 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2331 /* guest driver may read/write the entry when partial
2332 * update the entry in this situation p2m will fail
2333 * settting the shadow entry to point to a scratch page
2335 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2337 ops
->set_pfn(&m
, dma_addr
>> PAGE_SHIFT
);
2339 ops
->set_pfn(&m
, gvt
->gtt
.scratch_mfn
);
2340 ops
->clear_present(&m
);
2344 ggtt_set_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
2346 ggtt_get_host_entry(ggtt_mm
, &e
, g_gtt_index
);
2347 ggtt_invalidate_pte(vgpu
, &e
);
2349 ggtt_set_host_entry(ggtt_mm
, &m
, g_gtt_index
);
2350 ggtt_invalidate(gvt
->gt
);
2355 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2357 * @off: register offset
2358 * @p_data: data from guest write
2359 * @bytes: data length
2361 * This function is used to emulate the GTT MMIO register write
2364 * Zero on success, error code if failed.
2366 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu
*vgpu
,
2367 unsigned int off
, void *p_data
, unsigned int bytes
)
2369 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
2371 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
2372 struct intel_engine_cs
*engine
;
2375 if (bytes
!= 4 && bytes
!= 8)
2378 off
-= info
->gtt_start_offset
;
2379 ret
= emulate_ggtt_mmio_write(vgpu
, off
, p_data
, bytes
);
2381 /* if ggtt of last submitted context is written,
2382 * that context is probably got unpinned.
2383 * Set last shadowed ctx to invalid.
2385 for_each_engine(engine
, vgpu
->gvt
->gt
, i
) {
2386 if (!s
->last_ctx
[i
].valid
)
2389 if (s
->last_ctx
[i
].lrca
== (off
>> info
->gtt_entry_size_shift
))
2390 s
->last_ctx
[i
].valid
= false;
2395 static int alloc_scratch_pages(struct intel_vgpu
*vgpu
,
2396 enum intel_gvt_gtt_type type
)
2398 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
2399 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
2400 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2401 int page_entry_num
= I915_GTT_PAGE_SIZE
>>
2402 vgpu
->gvt
->device_info
.gtt_entry_size_shift
;
2405 struct device
*dev
= &vgpu
->gvt
->gt
->i915
->drm
.pdev
->dev
;
2408 if (drm_WARN_ON(&i915
->drm
,
2409 type
< GTT_TYPE_PPGTT_PTE_PT
|| type
>= GTT_TYPE_MAX
))
2412 scratch_pt
= (void *)get_zeroed_page(GFP_KERNEL
);
2414 gvt_vgpu_err("fail to allocate scratch page\n");
2418 daddr
= dma_map_page(dev
, virt_to_page(scratch_pt
), 0,
2419 4096, PCI_DMA_BIDIRECTIONAL
);
2420 if (dma_mapping_error(dev
, daddr
)) {
2421 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2422 __free_page(virt_to_page(scratch_pt
));
2425 gtt
->scratch_pt
[type
].page_mfn
=
2426 (unsigned long)(daddr
>> I915_GTT_PAGE_SHIFT
);
2427 gtt
->scratch_pt
[type
].page
= virt_to_page(scratch_pt
);
2428 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2429 vgpu
->id
, type
, gtt
->scratch_pt
[type
].page_mfn
);
2431 /* Build the tree by full filled the scratch pt with the entries which
2432 * point to the next level scratch pt or scratch page. The
2433 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2435 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2436 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2437 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2439 if (type
> GTT_TYPE_PPGTT_PTE_PT
) {
2440 struct intel_gvt_gtt_entry se
;
2442 memset(&se
, 0, sizeof(struct intel_gvt_gtt_entry
));
2443 se
.type
= get_entry_type(type
- 1);
2444 ops
->set_pfn(&se
, gtt
->scratch_pt
[type
- 1].page_mfn
);
2446 /* The entry parameters like present/writeable/cache type
2447 * set to the same as i915's scratch page tree.
2449 se
.val64
|= _PAGE_PRESENT
| _PAGE_RW
;
2450 if (type
== GTT_TYPE_PPGTT_PDE_PT
)
2451 se
.val64
|= PPAT_CACHED
;
2453 for (i
= 0; i
< page_entry_num
; i
++)
2454 ops
->set_entry(scratch_pt
, &se
, i
, false, 0, vgpu
);
2460 static int release_scratch_page_tree(struct intel_vgpu
*vgpu
)
2463 struct device
*dev
= &vgpu
->gvt
->gt
->i915
->drm
.pdev
->dev
;
2466 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2467 if (vgpu
->gtt
.scratch_pt
[i
].page
!= NULL
) {
2468 daddr
= (dma_addr_t
)(vgpu
->gtt
.scratch_pt
[i
].page_mfn
<<
2469 I915_GTT_PAGE_SHIFT
);
2470 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2471 __free_page(vgpu
->gtt
.scratch_pt
[i
].page
);
2472 vgpu
->gtt
.scratch_pt
[i
].page
= NULL
;
2473 vgpu
->gtt
.scratch_pt
[i
].page_mfn
= 0;
2480 static int create_scratch_page_tree(struct intel_vgpu
*vgpu
)
2484 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2485 ret
= alloc_scratch_pages(vgpu
, i
);
2493 release_scratch_page_tree(vgpu
);
2498 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2501 * This function is used to initialize per-vGPU graphics memory virtualization
2505 * Zero on success, error code if failed.
2507 int intel_vgpu_init_gtt(struct intel_vgpu
*vgpu
)
2509 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
2511 INIT_RADIX_TREE(>t
->spt_tree
, GFP_KERNEL
);
2513 INIT_LIST_HEAD(>t
->ppgtt_mm_list_head
);
2514 INIT_LIST_HEAD(>t
->oos_page_list_head
);
2515 INIT_LIST_HEAD(>t
->post_shadow_list_head
);
2517 gtt
->ggtt_mm
= intel_vgpu_create_ggtt_mm(vgpu
);
2518 if (IS_ERR(gtt
->ggtt_mm
)) {
2519 gvt_vgpu_err("fail to create mm for ggtt.\n");
2520 return PTR_ERR(gtt
->ggtt_mm
);
2523 intel_vgpu_reset_ggtt(vgpu
, false);
2525 INIT_LIST_HEAD(>t
->ggtt_mm
->ggtt_mm
.partial_pte_list
);
2527 return create_scratch_page_tree(vgpu
);
2530 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu
*vgpu
)
2532 struct list_head
*pos
, *n
;
2533 struct intel_vgpu_mm
*mm
;
2535 list_for_each_safe(pos
, n
, &vgpu
->gtt
.ppgtt_mm_list_head
) {
2536 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.list
);
2537 intel_vgpu_destroy_mm(mm
);
2540 if (GEM_WARN_ON(!list_empty(&vgpu
->gtt
.ppgtt_mm_list_head
)))
2541 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2543 if (GEM_WARN_ON(!radix_tree_empty(&vgpu
->gtt
.spt_tree
))) {
2544 gvt_err("Why we still has spt not freed?\n");
2545 ppgtt_free_all_spt(vgpu
);
2549 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu
*vgpu
)
2551 struct intel_gvt_partial_pte
*pos
, *next
;
2553 list_for_each_entry_safe(pos
, next
,
2554 &vgpu
->gtt
.ggtt_mm
->ggtt_mm
.partial_pte_list
,
2556 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2557 pos
->offset
, pos
->data
);
2560 intel_vgpu_destroy_mm(vgpu
->gtt
.ggtt_mm
);
2561 vgpu
->gtt
.ggtt_mm
= NULL
;
2565 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2568 * This function is used to clean up per-vGPU graphics memory virtualization
2572 * Zero on success, error code if failed.
2574 void intel_vgpu_clean_gtt(struct intel_vgpu
*vgpu
)
2576 intel_vgpu_destroy_all_ppgtt_mm(vgpu
);
2577 intel_vgpu_destroy_ggtt_mm(vgpu
);
2578 release_scratch_page_tree(vgpu
);
2581 static void clean_spt_oos(struct intel_gvt
*gvt
)
2583 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2584 struct list_head
*pos
, *n
;
2585 struct intel_vgpu_oos_page
*oos_page
;
2587 WARN(!list_empty(>t
->oos_page_use_list_head
),
2588 "someone is still using oos page\n");
2590 list_for_each_safe(pos
, n
, >t
->oos_page_free_list_head
) {
2591 oos_page
= container_of(pos
, struct intel_vgpu_oos_page
, list
);
2592 list_del(&oos_page
->list
);
2593 free_page((unsigned long)oos_page
->mem
);
2598 static int setup_spt_oos(struct intel_gvt
*gvt
)
2600 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2601 struct intel_vgpu_oos_page
*oos_page
;
2605 INIT_LIST_HEAD(>t
->oos_page_free_list_head
);
2606 INIT_LIST_HEAD(>t
->oos_page_use_list_head
);
2608 for (i
= 0; i
< preallocated_oos_pages
; i
++) {
2609 oos_page
= kzalloc(sizeof(*oos_page
), GFP_KERNEL
);
2614 oos_page
->mem
= (void *)__get_free_pages(GFP_KERNEL
, 0);
2615 if (!oos_page
->mem
) {
2621 INIT_LIST_HEAD(&oos_page
->list
);
2622 INIT_LIST_HEAD(&oos_page
->vm_list
);
2624 list_add_tail(&oos_page
->list
, >t
->oos_page_free_list_head
);
2627 gvt_dbg_mm("%d oos pages preallocated\n", i
);
2636 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2638 * @pdps: pdp root array
2640 * This function is used to find a PPGTT mm object from mm object pool
2643 * pointer to mm object on success, NULL if failed.
2645 struct intel_vgpu_mm
*intel_vgpu_find_ppgtt_mm(struct intel_vgpu
*vgpu
,
2648 struct intel_vgpu_mm
*mm
;
2649 struct list_head
*pos
;
2651 list_for_each(pos
, &vgpu
->gtt
.ppgtt_mm_list_head
) {
2652 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.list
);
2654 switch (mm
->ppgtt_mm
.root_entry_type
) {
2655 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY
:
2656 if (pdps
[0] == mm
->ppgtt_mm
.guest_pdps
[0])
2659 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY
:
2660 if (!memcmp(pdps
, mm
->ppgtt_mm
.guest_pdps
,
2661 sizeof(mm
->ppgtt_mm
.guest_pdps
)))
2672 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2674 * @root_entry_type: ppgtt root entry type
2677 * This function is used to find or create a PPGTT mm object from a guest.
2680 * Zero on success, negative error code if failed.
2682 struct intel_vgpu_mm
*intel_vgpu_get_ppgtt_mm(struct intel_vgpu
*vgpu
,
2683 enum intel_gvt_gtt_type root_entry_type
, u64 pdps
[])
2685 struct intel_vgpu_mm
*mm
;
2687 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, pdps
);
2689 intel_vgpu_mm_get(mm
);
2691 mm
= intel_vgpu_create_ppgtt_mm(vgpu
, root_entry_type
, pdps
);
2693 gvt_vgpu_err("fail to create mm\n");
2699 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2703 * This function is used to find a PPGTT mm object from a guest and destroy it.
2706 * Zero on success, negative error code if failed.
2708 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu
*vgpu
, u64 pdps
[])
2710 struct intel_vgpu_mm
*mm
;
2712 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, pdps
);
2714 gvt_vgpu_err("fail to find ppgtt instance.\n");
2717 intel_vgpu_mm_put(mm
);
2722 * intel_gvt_init_gtt - initialize mm components of a GVT device
2725 * This function is called at the initialization stage, to initialize
2726 * the mm components of a GVT device.
2729 * zero on success, negative error code if failed.
2731 int intel_gvt_init_gtt(struct intel_gvt
*gvt
)
2735 struct device
*dev
= &gvt
->gt
->i915
->drm
.pdev
->dev
;
2738 gvt_dbg_core("init gtt\n");
2740 gvt
->gtt
.pte_ops
= &gen8_gtt_pte_ops
;
2741 gvt
->gtt
.gma_ops
= &gen8_gtt_gma_ops
;
2743 page
= (void *)get_zeroed_page(GFP_KERNEL
);
2745 gvt_err("fail to allocate scratch ggtt page\n");
2749 daddr
= dma_map_page(dev
, virt_to_page(page
), 0,
2750 4096, PCI_DMA_BIDIRECTIONAL
);
2751 if (dma_mapping_error(dev
, daddr
)) {
2752 gvt_err("fail to dmamap scratch ggtt page\n");
2753 __free_page(virt_to_page(page
));
2757 gvt
->gtt
.scratch_page
= virt_to_page(page
);
2758 gvt
->gtt
.scratch_mfn
= (unsigned long)(daddr
>> I915_GTT_PAGE_SHIFT
);
2760 if (enable_out_of_sync
) {
2761 ret
= setup_spt_oos(gvt
);
2763 gvt_err("fail to initialize SPT oos\n");
2764 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2765 __free_page(gvt
->gtt
.scratch_page
);
2769 INIT_LIST_HEAD(&gvt
->gtt
.ppgtt_mm_lru_list_head
);
2770 mutex_init(&gvt
->gtt
.ppgtt_mm_lock
);
2775 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2778 * This function is called at the driver unloading stage, to clean up the
2779 * the mm components of a GVT device.
2782 void intel_gvt_clean_gtt(struct intel_gvt
*gvt
)
2784 struct device
*dev
= &gvt
->gt
->i915
->drm
.pdev
->dev
;
2785 dma_addr_t daddr
= (dma_addr_t
)(gvt
->gtt
.scratch_mfn
<<
2786 I915_GTT_PAGE_SHIFT
);
2788 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2790 __free_page(gvt
->gtt
.scratch_page
);
2792 if (enable_out_of_sync
)
2797 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2800 * This function is called when invalidate all PPGTT instances of a vGPU.
2803 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu
*vgpu
)
2805 struct list_head
*pos
, *n
;
2806 struct intel_vgpu_mm
*mm
;
2808 list_for_each_safe(pos
, n
, &vgpu
->gtt
.ppgtt_mm_list_head
) {
2809 mm
= container_of(pos
, struct intel_vgpu_mm
, ppgtt_mm
.list
);
2810 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
2811 mutex_lock(&vgpu
->gvt
->gtt
.ppgtt_mm_lock
);
2812 list_del_init(&mm
->ppgtt_mm
.lru_list
);
2813 mutex_unlock(&vgpu
->gvt
->gtt
.ppgtt_mm_lock
);
2814 if (mm
->ppgtt_mm
.shadowed
)
2815 invalidate_ppgtt_mm(mm
);
2821 * intel_vgpu_reset_ggtt - reset the GGTT entry
2823 * @invalidate_old: invalidate old entries
2825 * This function is called at the vGPU create stage
2826 * to reset all the GGTT entries.
2829 void intel_vgpu_reset_ggtt(struct intel_vgpu
*vgpu
, bool invalidate_old
)
2831 struct intel_gvt
*gvt
= vgpu
->gvt
;
2832 struct intel_gvt_gtt_pte_ops
*pte_ops
= vgpu
->gvt
->gtt
.pte_ops
;
2833 struct intel_gvt_gtt_entry entry
= {.type
= GTT_TYPE_GGTT_PTE
};
2834 struct intel_gvt_gtt_entry old_entry
;
2838 pte_ops
->set_pfn(&entry
, gvt
->gtt
.scratch_mfn
);
2839 pte_ops
->set_present(&entry
);
2841 index
= vgpu_aperture_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2842 num_entries
= vgpu_aperture_sz(vgpu
) >> PAGE_SHIFT
;
2843 while (num_entries
--) {
2844 if (invalidate_old
) {
2845 ggtt_get_host_entry(vgpu
->gtt
.ggtt_mm
, &old_entry
, index
);
2846 ggtt_invalidate_pte(vgpu
, &old_entry
);
2848 ggtt_set_host_entry(vgpu
->gtt
.ggtt_mm
, &entry
, index
++);
2851 index
= vgpu_hidden_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2852 num_entries
= vgpu_hidden_sz(vgpu
) >> PAGE_SHIFT
;
2853 while (num_entries
--) {
2854 if (invalidate_old
) {
2855 ggtt_get_host_entry(vgpu
->gtt
.ggtt_mm
, &old_entry
, index
);
2856 ggtt_invalidate_pte(vgpu
, &old_entry
);
2858 ggtt_set_host_entry(vgpu
->gtt
.ggtt_mm
, &entry
, index
++);
2861 ggtt_invalidate(gvt
->gt
);
2865 * intel_vgpu_reset_gtt - reset the all GTT related status
2868 * This function is called from vfio core to reset reset all
2869 * GTT related status, including GGTT, PPGTT, scratch page.
2872 void intel_vgpu_reset_gtt(struct intel_vgpu
*vgpu
)
2874 /* Shadow pages are only created when there is no page
2875 * table tracking data, so remove page tracking data after
2876 * removing the shadow pages.
2878 intel_vgpu_destroy_all_ppgtt_mm(vgpu
);
2879 intel_vgpu_reset_ggtt(vgpu
, true);
2883 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2884 * @gvt: intel gvt device
2886 * This function is called at driver resume stage to restore
2887 * GGTT entries of every vGPU.
2890 void intel_gvt_restore_ggtt(struct intel_gvt
*gvt
)
2892 struct intel_vgpu
*vgpu
;
2893 struct intel_vgpu_mm
*mm
;
2896 u32 idx
, num_low
, num_hi
, offset
;
2898 /* Restore dirty host ggtt for all vGPUs */
2899 idr_for_each_entry(&(gvt
)->vgpu_idr
, vgpu
, id
) {
2900 mm
= vgpu
->gtt
.ggtt_mm
;
2902 num_low
= vgpu_aperture_sz(vgpu
) >> PAGE_SHIFT
;
2903 offset
= vgpu_aperture_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2904 for (idx
= 0; idx
< num_low
; idx
++) {
2905 pte
= mm
->ggtt_mm
.host_ggtt_aperture
[idx
];
2906 if (pte
& _PAGE_PRESENT
)
2907 write_pte64(vgpu
->gvt
->gt
->ggtt
, offset
+ idx
, pte
);
2910 num_hi
= vgpu_hidden_sz(vgpu
) >> PAGE_SHIFT
;
2911 offset
= vgpu_hidden_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2912 for (idx
= 0; idx
< num_hi
; idx
++) {
2913 pte
= mm
->ggtt_mm
.host_ggtt_hidden
[idx
];
2914 if (pte
& _PAGE_PRESENT
)
2915 write_pte64(vgpu
->gvt
->gt
->ggtt
, offset
+ idx
, pte
);