2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
25 * Zhenyu Wang <zhenyuw@linux.intel.com>
26 * Xiao Zheng <xiao.zheng@intel.com>
29 * Min He <min.he@intel.com>
30 * Bing Niu <bing.niu@intel.com>
37 #define I915_GTT_PAGE_SHIFT 12
41 #define INTEL_GVT_INVALID_ADDR (~0UL)
43 struct intel_gvt_gtt_entry
{
48 struct intel_gvt_gtt_pte_ops
{
49 int (*get_entry
)(void *pt
,
50 struct intel_gvt_gtt_entry
*e
,
52 bool hypervisor_access
,
54 struct intel_vgpu
*vgpu
);
55 int (*set_entry
)(void *pt
,
56 struct intel_gvt_gtt_entry
*e
,
58 bool hypervisor_access
,
60 struct intel_vgpu
*vgpu
);
61 bool (*test_present
)(struct intel_gvt_gtt_entry
*e
);
62 void (*clear_present
)(struct intel_gvt_gtt_entry
*e
);
63 void (*set_present
)(struct intel_gvt_gtt_entry
*e
);
64 bool (*test_pse
)(struct intel_gvt_gtt_entry
*e
);
65 void (*clear_pse
)(struct intel_gvt_gtt_entry
*e
);
66 bool (*test_ips
)(struct intel_gvt_gtt_entry
*e
);
67 void (*clear_ips
)(struct intel_gvt_gtt_entry
*e
);
68 bool (*test_64k_splited
)(struct intel_gvt_gtt_entry
*e
);
69 void (*clear_64k_splited
)(struct intel_gvt_gtt_entry
*e
);
70 void (*set_64k_splited
)(struct intel_gvt_gtt_entry
*e
);
71 void (*set_pfn
)(struct intel_gvt_gtt_entry
*e
, unsigned long pfn
);
72 unsigned long (*get_pfn
)(struct intel_gvt_gtt_entry
*e
);
75 struct intel_gvt_gtt_gma_ops
{
76 unsigned long (*gma_to_ggtt_pte_index
)(unsigned long gma
);
77 unsigned long (*gma_to_pte_index
)(unsigned long gma
);
78 unsigned long (*gma_to_pde_index
)(unsigned long gma
);
79 unsigned long (*gma_to_l3_pdp_index
)(unsigned long gma
);
80 unsigned long (*gma_to_l4_pdp_index
)(unsigned long gma
);
81 unsigned long (*gma_to_pml4_index
)(unsigned long gma
);
84 struct intel_gvt_gtt
{
85 struct intel_gvt_gtt_pte_ops
*pte_ops
;
86 struct intel_gvt_gtt_gma_ops
*gma_ops
;
87 int (*mm_alloc_page_table
)(struct intel_vgpu_mm
*mm
);
88 void (*mm_free_page_table
)(struct intel_vgpu_mm
*mm
);
89 struct list_head oos_page_use_list_head
;
90 struct list_head oos_page_free_list_head
;
91 struct mutex ppgtt_mm_lock
;
92 struct list_head ppgtt_mm_lru_list_head
;
94 struct page
*scratch_page
;
95 unsigned long scratch_mfn
;
98 enum intel_gvt_gtt_type
{
103 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
104 GTT_TYPE_PPGTT_PTE_64K_ENTRY
,
105 GTT_TYPE_PPGTT_PTE_2M_ENTRY
,
106 GTT_TYPE_PPGTT_PTE_1G_ENTRY
,
108 GTT_TYPE_PPGTT_PTE_ENTRY
,
110 GTT_TYPE_PPGTT_PDE_ENTRY
,
111 GTT_TYPE_PPGTT_PDP_ENTRY
,
112 GTT_TYPE_PPGTT_PML4_ENTRY
,
114 GTT_TYPE_PPGTT_ROOT_ENTRY
,
116 GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
117 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
119 GTT_TYPE_PPGTT_ENTRY
,
121 GTT_TYPE_PPGTT_PTE_PT
,
122 GTT_TYPE_PPGTT_PDE_PT
,
123 GTT_TYPE_PPGTT_PDP_PT
,
124 GTT_TYPE_PPGTT_PML4_PT
,
129 enum intel_gvt_mm_type
{
134 #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
136 struct intel_gvt_partial_pte
{
137 unsigned long offset
;
139 struct list_head list
;
142 struct intel_vgpu_mm
{
143 enum intel_gvt_mm_type type
;
144 struct intel_vgpu
*vgpu
;
151 enum intel_gvt_gtt_type root_entry_type
;
153 * The 4 PDPs in ring context. For 48bit addressing,
154 * only PDP0 is valid and point to PML4. For 32it
155 * addressing, all 4 are used as true PDPs.
157 u64 guest_pdps
[GVT_RING_CTX_NR_PDPS
];
158 u64 shadow_pdps
[GVT_RING_CTX_NR_PDPS
];
161 struct list_head list
;
162 struct list_head lru_list
;
163 struct list_head link
; /* possible LRI shadow mm list */
167 /* Save/restore for PM */
168 u64
*host_ggtt_aperture
;
169 u64
*host_ggtt_hidden
;
170 struct list_head partial_pte_list
;
175 struct intel_vgpu_mm
*intel_vgpu_create_ppgtt_mm(struct intel_vgpu
*vgpu
,
176 enum intel_gvt_gtt_type root_entry_type
, u64 pdps
[]);
178 static inline void intel_vgpu_mm_get(struct intel_vgpu_mm
*mm
)
183 void _intel_vgpu_mm_release(struct kref
*mm_ref
);
185 static inline void intel_vgpu_mm_put(struct intel_vgpu_mm
*mm
)
187 kref_put(&mm
->ref
, _intel_vgpu_mm_release
);
190 static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm
*mm
)
192 intel_vgpu_mm_put(mm
);
195 struct intel_vgpu_guest_page
;
197 struct intel_vgpu_scratch_pt
{
199 unsigned long page_mfn
;
202 struct intel_vgpu_gtt
{
203 struct intel_vgpu_mm
*ggtt_mm
;
204 unsigned long active_ppgtt_mm_bitmap
;
205 struct list_head ppgtt_mm_list_head
;
206 struct radix_tree_root spt_tree
;
207 struct list_head oos_page_list_head
;
208 struct list_head post_shadow_list_head
;
209 struct intel_vgpu_scratch_pt scratch_pt
[GTT_TYPE_MAX
];
212 int intel_vgpu_init_gtt(struct intel_vgpu
*vgpu
);
213 void intel_vgpu_clean_gtt(struct intel_vgpu
*vgpu
);
214 void intel_vgpu_reset_ggtt(struct intel_vgpu
*vgpu
, bool invalidate_old
);
215 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu
*vgpu
);
217 int intel_gvt_init_gtt(struct intel_gvt
*gvt
);
218 void intel_vgpu_reset_gtt(struct intel_vgpu
*vgpu
);
219 void intel_gvt_clean_gtt(struct intel_gvt
*gvt
);
221 struct intel_vgpu_mm
*intel_gvt_find_ppgtt_mm(struct intel_vgpu
*vgpu
,
222 int page_table_level
,
225 struct intel_vgpu_oos_page
{
226 struct intel_vgpu_ppgtt_spt
*spt
;
227 struct list_head list
;
228 struct list_head vm_list
;
233 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
235 /* Represent a vgpu shadow page table. */
236 struct intel_vgpu_ppgtt_spt
{
238 struct intel_vgpu
*vgpu
;
241 enum intel_gvt_gtt_type type
;
242 bool pde_ips
; /* for 64KB PTEs */
249 enum intel_gvt_gtt_type type
;
250 bool pde_ips
; /* for 64KB PTEs */
252 unsigned long write_cnt
;
253 struct intel_vgpu_oos_page
*oos_page
;
256 DECLARE_BITMAP(post_shadow_bitmap
, GTT_ENTRY_NUM_IN_ONE_PAGE
);
257 struct list_head post_shadow_list
;
260 int intel_vgpu_sync_oos_pages(struct intel_vgpu
*vgpu
);
262 int intel_vgpu_flush_post_shadow(struct intel_vgpu
*vgpu
);
264 int intel_vgpu_pin_mm(struct intel_vgpu_mm
*mm
);
266 void intel_vgpu_unpin_mm(struct intel_vgpu_mm
*mm
);
268 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm
*mm
,
271 struct intel_vgpu_mm
*intel_vgpu_find_ppgtt_mm(struct intel_vgpu
*vgpu
,
274 struct intel_vgpu_mm
*intel_vgpu_get_ppgtt_mm(struct intel_vgpu
*vgpu
,
275 enum intel_gvt_gtt_type root_entry_type
, u64 pdps
[]);
277 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu
*vgpu
, u64 pdps
[]);
279 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu
*vgpu
,
280 unsigned int off
, void *p_data
, unsigned int bytes
);
282 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu
*vgpu
,
283 unsigned int off
, void *p_data
, unsigned int bytes
);
285 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu
*vgpu
);
286 void intel_gvt_restore_ggtt(struct intel_gvt
*gvt
);
288 #endif /* _GVT_GTT_H_ */