2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
41 * Defined in Intel Open Source PRM.
42 * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
44 #define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
45 #define TRNULLDETCT _MMIO(0x4de8)
46 #define TRINVTILEDETCT _MMIO(0x4dec)
47 #define TRVADR _MMIO(0x4df0)
48 #define TRTTE _MMIO(0x4df4)
49 #define RING_EXCC(base) _MMIO((base) + 0x28)
50 #define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
51 #define VF_GUARDBAND _MMIO(0x83a4)
53 #define GEN9_MOCS_SIZE 64
55 /* Raw offset is appened to each line for convenience. */
56 static struct engine_mmio gen8_engine_mmio_list
[] __cacheline_aligned
= {
57 {RCS
, GFX_MODE_GEN7
, 0xffff, false}, /* 0x229c */
58 {RCS
, GEN9_CTX_PREEMPT_REG
, 0x0, false}, /* 0x2248 */
59 {RCS
, HWSTAM
, 0x0, false}, /* 0x2098 */
60 {RCS
, INSTPM
, 0xffff, true}, /* 0x20c0 */
61 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 0), 0, false}, /* 0x24d0 */
62 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 1), 0, false}, /* 0x24d4 */
63 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 2), 0, false}, /* 0x24d8 */
64 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 3), 0, false}, /* 0x24dc */
65 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 4), 0, false}, /* 0x24e0 */
66 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 5), 0, false}, /* 0x24e4 */
67 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 6), 0, false}, /* 0x24e8 */
68 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 7), 0, false}, /* 0x24ec */
69 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 8), 0, false}, /* 0x24f0 */
70 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 9), 0, false}, /* 0x24f4 */
71 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 10), 0, false}, /* 0x24f8 */
72 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 11), 0, false}, /* 0x24fc */
73 {RCS
, CACHE_MODE_1
, 0xffff, true}, /* 0x7004 */
74 {RCS
, GEN7_GT_MODE
, 0xffff, true}, /* 0x7008 */
75 {RCS
, CACHE_MODE_0_GEN7
, 0xffff, true}, /* 0x7000 */
76 {RCS
, GEN7_COMMON_SLICE_CHICKEN1
, 0xffff, true}, /* 0x7010 */
77 {RCS
, HDC_CHICKEN0
, 0xffff, true}, /* 0x7300 */
78 {RCS
, VF_GUARDBAND
, 0xffff, true}, /* 0x83a4 */
80 {BCS
, RING_GFX_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2229c */
81 {BCS
, RING_MI_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2209c */
82 {BCS
, RING_INSTPM(BLT_RING_BASE
), 0xffff, false}, /* 0x220c0 */
83 {BCS
, RING_HWSTAM(BLT_RING_BASE
), 0x0, false}, /* 0x22098 */
84 {BCS
, RING_EXCC(BLT_RING_BASE
), 0x0, false}, /* 0x22028 */
85 {RCS
, INVALID_MMIO_REG
, 0, false } /* Terminated */
88 static struct engine_mmio gen9_engine_mmio_list
[] __cacheline_aligned
= {
89 {RCS
, GFX_MODE_GEN7
, 0xffff, false}, /* 0x229c */
90 {RCS
, GEN9_CTX_PREEMPT_REG
, 0x0, false}, /* 0x2248 */
91 {RCS
, HWSTAM
, 0x0, false}, /* 0x2098 */
92 {RCS
, INSTPM
, 0xffff, true}, /* 0x20c0 */
93 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 0), 0, false}, /* 0x24d0 */
94 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 1), 0, false}, /* 0x24d4 */
95 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 2), 0, false}, /* 0x24d8 */
96 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 3), 0, false}, /* 0x24dc */
97 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 4), 0, false}, /* 0x24e0 */
98 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 5), 0, false}, /* 0x24e4 */
99 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 6), 0, false}, /* 0x24e8 */
100 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 7), 0, false}, /* 0x24ec */
101 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 8), 0, false}, /* 0x24f0 */
102 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 9), 0, false}, /* 0x24f4 */
103 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 10), 0, false}, /* 0x24f8 */
104 {RCS
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 11), 0, false}, /* 0x24fc */
105 {RCS
, CACHE_MODE_1
, 0xffff, true}, /* 0x7004 */
106 {RCS
, GEN7_GT_MODE
, 0xffff, true}, /* 0x7008 */
107 {RCS
, CACHE_MODE_0_GEN7
, 0xffff, true}, /* 0x7000 */
108 {RCS
, GEN7_COMMON_SLICE_CHICKEN1
, 0xffff, true}, /* 0x7010 */
109 {RCS
, HDC_CHICKEN0
, 0xffff, true}, /* 0x7300 */
110 {RCS
, VF_GUARDBAND
, 0xffff, true}, /* 0x83a4 */
112 {RCS
, GEN8_PRIVATE_PAT_LO
, 0, false}, /* 0x40e0 */
113 {RCS
, GEN8_PRIVATE_PAT_HI
, 0, false}, /* 0x40e4 */
114 {RCS
, GEN8_CS_CHICKEN1
, 0xffff, true}, /* 0x2580 */
115 {RCS
, COMMON_SLICE_CHICKEN2
, 0xffff, true}, /* 0x7014 */
116 {RCS
, GEN9_CS_DEBUG_MODE1
, 0xffff, false}, /* 0x20ec */
117 {RCS
, GEN8_L3SQCREG4
, 0, false}, /* 0xb118 */
118 {RCS
, GEN7_HALF_SLICE_CHICKEN1
, 0xffff, true}, /* 0xe100 */
119 {RCS
, HALF_SLICE_CHICKEN2
, 0xffff, true}, /* 0xe180 */
120 {RCS
, HALF_SLICE_CHICKEN3
, 0xffff, true}, /* 0xe184 */
121 {RCS
, GEN9_HALF_SLICE_CHICKEN5
, 0xffff, true}, /* 0xe188 */
122 {RCS
, GEN9_HALF_SLICE_CHICKEN7
, 0xffff, true}, /* 0xe194 */
123 {RCS
, GEN8_ROW_CHICKEN
, 0xffff, true}, /* 0xe4f0 */
124 {RCS
, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
125 {RCS
, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
126 {RCS
, TRNULLDETCT
, 0, false}, /* 0x4de8 */
127 {RCS
, TRINVTILEDETCT
, 0, false}, /* 0x4dec */
128 {RCS
, TRVADR
, 0, false}, /* 0x4df0 */
129 {RCS
, TRTTE
, 0, false}, /* 0x4df4 */
131 {BCS
, RING_GFX_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2229c */
132 {BCS
, RING_MI_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2209c */
133 {BCS
, RING_INSTPM(BLT_RING_BASE
), 0xffff, false}, /* 0x220c0 */
134 {BCS
, RING_HWSTAM(BLT_RING_BASE
), 0x0, false}, /* 0x22098 */
135 {BCS
, RING_EXCC(BLT_RING_BASE
), 0x0, false}, /* 0x22028 */
137 {VCS2
, RING_EXCC(GEN8_BSD2_RING_BASE
), 0xffff, false}, /* 0x1c028 */
139 {VECS
, RING_EXCC(VEBOX_RING_BASE
), 0xffff, false}, /* 0x1a028 */
141 {RCS
, GEN8_HDC_CHICKEN1
, 0xffff, true}, /* 0x7304 */
142 {RCS
, GEN9_CTX_PREEMPT_REG
, 0x0, false}, /* 0x2248 */
143 {RCS
, GEN7_UCGCTL4
, 0x0, false}, /* 0x940c */
144 {RCS
, GAMT_CHKN_BIT_REG
, 0x0, false}, /* 0x4ab8 */
146 {RCS
, GEN9_GAMT_ECO_REG_RW_IA
, 0x0, false}, /* 0x4ab0 */
147 {RCS
, GEN9_CSFE_CHICKEN1_RCS
, 0x0, false}, /* 0x20d4 */
149 {RCS
, GEN8_GARBCNTL
, 0x0, false}, /* 0xb004 */
150 {RCS
, GEN7_FF_THREAD_MODE
, 0x0, false}, /* 0x20a0 */
151 {RCS
, FF_SLICE_CS_CHICKEN2
, 0xffff, false}, /* 0x20e4 */
152 {RCS
, INVALID_MMIO_REG
, 0, false } /* Terminated */
157 u32 control_table
[I915_NUM_ENGINES
][GEN9_MOCS_SIZE
];
158 u32 l3cc_table
[GEN9_MOCS_SIZE
/ 2];
161 static void load_render_mocs(struct drm_i915_private
*dev_priv
)
173 for (ring_id
= 0; ring_id
< ARRAY_SIZE(regs
); ring_id
++) {
174 offset
.reg
= regs
[ring_id
];
175 for (i
= 0; i
< GEN9_MOCS_SIZE
; i
++) {
176 gen9_render_mocs
.control_table
[ring_id
][i
] =
177 I915_READ_FW(offset
);
183 for (i
= 0; i
< GEN9_MOCS_SIZE
/ 2; i
++) {
184 gen9_render_mocs
.l3cc_table
[i
] =
185 I915_READ_FW(offset
);
188 gen9_render_mocs
.initialized
= true;
192 restore_context_mmio_for_inhibit(struct intel_vgpu
*vgpu
,
193 struct i915_request
*req
)
197 struct engine_mmio
*mmio
;
198 struct intel_gvt
*gvt
= vgpu
->gvt
;
199 int ring_id
= req
->engine
->id
;
200 int count
= gvt
->engine_mmio_list
.ctx_mmio_count
[ring_id
];
205 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
209 cs
= intel_ring_begin(req
, count
* 2 + 2);
213 *cs
++ = MI_LOAD_REGISTER_IMM(count
);
214 for (mmio
= gvt
->engine_mmio_list
.mmio
;
215 i915_mmio_reg_valid(mmio
->reg
); mmio
++) {
216 if (mmio
->ring_id
!= ring_id
||
220 *cs
++ = i915_mmio_reg_offset(mmio
->reg
);
221 *cs
++ = vgpu_vreg_t(vgpu
, mmio
->reg
) |
223 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
224 *(cs
-2), *(cs
-1), vgpu
->id
, ring_id
);
228 intel_ring_advance(req
, cs
);
230 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
238 restore_render_mocs_control_for_inhibit(struct intel_vgpu
*vgpu
,
239 struct i915_request
*req
)
244 cs
= intel_ring_begin(req
, 2 * GEN9_MOCS_SIZE
+ 2);
248 *cs
++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE
);
250 for (index
= 0; index
< GEN9_MOCS_SIZE
; index
++) {
251 *cs
++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index
));
252 *cs
++ = vgpu_vreg_t(vgpu
, GEN9_GFX_MOCS(index
));
253 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
254 *(cs
-2), *(cs
-1), vgpu
->id
, req
->engine
->id
);
259 intel_ring_advance(req
, cs
);
265 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu
*vgpu
,
266 struct i915_request
*req
)
271 cs
= intel_ring_begin(req
, 2 * GEN9_MOCS_SIZE
/ 2 + 2);
275 *cs
++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE
/ 2);
277 for (index
= 0; index
< GEN9_MOCS_SIZE
/ 2; index
++) {
278 *cs
++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index
));
279 *cs
++ = vgpu_vreg_t(vgpu
, GEN9_LNCFCMOCS(index
));
280 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
281 *(cs
-2), *(cs
-1), vgpu
->id
, req
->engine
->id
);
286 intel_ring_advance(req
, cs
);
292 * Use lri command to initialize the mmio which is in context state image for
293 * inhibit context, it contains tracked engine mmio, render_mocs and
296 int intel_vgpu_restore_inhibit_context(struct intel_vgpu
*vgpu
,
297 struct i915_request
*req
)
302 cs
= intel_ring_begin(req
, 2);
306 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_DISABLE
;
308 intel_ring_advance(req
, cs
);
310 ret
= restore_context_mmio_for_inhibit(vgpu
, req
);
314 /* no MOCS register in context except render engine */
315 if (req
->engine
->id
!= RCS
)
318 ret
= restore_render_mocs_control_for_inhibit(vgpu
, req
);
322 ret
= restore_render_mocs_l3cc_for_inhibit(vgpu
, req
);
327 cs
= intel_ring_begin(req
, 2);
331 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_ENABLE
;
333 intel_ring_advance(req
, cs
);
338 static void handle_tlb_pending_event(struct intel_vgpu
*vgpu
, int ring_id
)
340 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
341 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
342 enum forcewake_domains fw
;
352 if (WARN_ON(ring_id
>= ARRAY_SIZE(regs
)))
355 if (!test_and_clear_bit(ring_id
, (void *)s
->tlb_handle_pending
))
358 reg
= _MMIO(regs
[ring_id
]);
360 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
361 * we need to put a forcewake when invalidating RCS TLB caches,
362 * otherwise device can go to RC6 state and interrupt invalidation
365 fw
= intel_uncore_forcewake_for_reg(dev_priv
, reg
,
366 FW_REG_READ
| FW_REG_WRITE
);
367 if (ring_id
== RCS
&& (IS_SKYLAKE(dev_priv
) ||
368 IS_KABYLAKE(dev_priv
) || IS_BROXTON(dev_priv
)))
369 fw
|= FORCEWAKE_RENDER
;
371 intel_uncore_forcewake_get(dev_priv
, fw
);
373 I915_WRITE_FW(reg
, 0x1);
375 if (wait_for_atomic((I915_READ_FW(reg
) == 0), 50))
376 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id
);
378 vgpu_vreg_t(vgpu
, reg
) = 0;
380 intel_uncore_forcewake_put(dev_priv
, fw
);
382 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id
);
385 static void switch_mocs(struct intel_vgpu
*pre
, struct intel_vgpu
*next
,
388 struct drm_i915_private
*dev_priv
;
389 i915_reg_t offset
, l3_offset
;
401 dev_priv
= pre
? pre
->gvt
->dev_priv
: next
->gvt
->dev_priv
;
402 if (WARN_ON(ring_id
>= ARRAY_SIZE(regs
)))
405 if ((IS_KABYLAKE(dev_priv
) || IS_BROXTON(dev_priv
)) && ring_id
== RCS
)
408 if (!pre
&& !gen9_render_mocs
.initialized
)
409 load_render_mocs(dev_priv
);
411 offset
.reg
= regs
[ring_id
];
412 for (i
= 0; i
< GEN9_MOCS_SIZE
; i
++) {
414 old_v
= vgpu_vreg_t(pre
, offset
);
416 old_v
= gen9_render_mocs
.control_table
[ring_id
][i
];
418 new_v
= vgpu_vreg_t(next
, offset
);
420 new_v
= gen9_render_mocs
.control_table
[ring_id
][i
];
423 I915_WRITE_FW(offset
, new_v
);
428 if (ring_id
== RCS
) {
429 l3_offset
.reg
= 0xb020;
430 for (i
= 0; i
< GEN9_MOCS_SIZE
/ 2; i
++) {
432 old_v
= vgpu_vreg_t(pre
, l3_offset
);
434 old_v
= gen9_render_mocs
.l3cc_table
[i
];
436 new_v
= vgpu_vreg_t(next
, l3_offset
);
438 new_v
= gen9_render_mocs
.l3cc_table
[i
];
441 I915_WRITE_FW(l3_offset
, new_v
);
448 #define CTX_CONTEXT_CONTROL_VAL 0x03
450 bool is_inhibit_context(struct intel_context
*ce
)
452 const u32
*reg_state
= ce
->lrc_reg_state
;
454 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT
);
456 return inhibit_mask
==
457 (reg_state
[CTX_CONTEXT_CONTROL_VAL
] & inhibit_mask
);
460 /* Switch ring mmio values (context). */
461 static void switch_mmio(struct intel_vgpu
*pre
,
462 struct intel_vgpu
*next
,
465 struct drm_i915_private
*dev_priv
;
466 struct intel_vgpu_submission
*s
;
467 struct engine_mmio
*mmio
;
470 dev_priv
= pre
? pre
->gvt
->dev_priv
: next
->gvt
->dev_priv
;
471 if (IS_SKYLAKE(dev_priv
)
472 || IS_KABYLAKE(dev_priv
)
473 || IS_BROXTON(dev_priv
))
474 switch_mocs(pre
, next
, ring_id
);
476 for (mmio
= dev_priv
->gvt
->engine_mmio_list
.mmio
;
477 i915_mmio_reg_valid(mmio
->reg
); mmio
++) {
478 if (mmio
->ring_id
!= ring_id
)
481 * No need to do save or restore of the mmio which is in context
482 * state image on kabylake, it's initialized by lri command and
483 * save or restore with context together.
485 if ((IS_KABYLAKE(dev_priv
) || IS_BROXTON(dev_priv
))
491 vgpu_vreg_t(pre
, mmio
->reg
) = I915_READ_FW(mmio
->reg
);
493 vgpu_vreg_t(pre
, mmio
->reg
) &=
495 old_v
= vgpu_vreg_t(pre
, mmio
->reg
);
497 old_v
= mmio
->value
= I915_READ_FW(mmio
->reg
);
501 s
= &next
->submission
;
503 * No need to restore the mmio which is in context state
504 * image if it's not inhibit context, it will restore
507 if (mmio
->in_context
&&
508 !is_inhibit_context(&s
->shadow_ctx
->__engine
[ring_id
]))
512 new_v
= vgpu_vreg_t(next
, mmio
->reg
) |
515 new_v
= vgpu_vreg_t(next
, mmio
->reg
);
517 if (mmio
->in_context
)
520 new_v
= mmio
->value
| (mmio
->mask
<< 16);
525 I915_WRITE_FW(mmio
->reg
, new_v
);
527 trace_render_mmio(pre
? pre
->id
: 0,
530 i915_mmio_reg_offset(mmio
->reg
),
535 handle_tlb_pending_event(next
, ring_id
);
539 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
540 * @pre: the last vGPU that own the engine
541 * @next: the vGPU to switch to
542 * @ring_id: specify the engine
544 * If pre is null indicates that host own the engine. If next is null
545 * indicates that we are switching to host workload.
547 void intel_gvt_switch_mmio(struct intel_vgpu
*pre
,
548 struct intel_vgpu
*next
, int ring_id
)
550 struct drm_i915_private
*dev_priv
;
552 if (WARN_ON(!pre
&& !next
))
555 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id
,
556 pre
? "vGPU" : "host", next
? "vGPU" : "HOST");
558 dev_priv
= pre
? pre
->gvt
->dev_priv
: next
->gvt
->dev_priv
;
561 * We are using raw mmio access wrapper to improve the
562 * performace for batch mmio read/write, so we need
563 * handle forcewake mannually.
565 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
566 switch_mmio(pre
, next
, ring_id
);
567 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
571 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
575 void intel_gvt_init_engine_mmio_context(struct intel_gvt
*gvt
)
577 struct engine_mmio
*mmio
;
579 if (IS_SKYLAKE(gvt
->dev_priv
) ||
580 IS_KABYLAKE(gvt
->dev_priv
) ||
581 IS_BROXTON(gvt
->dev_priv
))
582 gvt
->engine_mmio_list
.mmio
= gen9_engine_mmio_list
;
584 gvt
->engine_mmio_list
.mmio
= gen8_engine_mmio_list
;
586 for (mmio
= gvt
->engine_mmio_list
.mmio
;
587 i915_mmio_reg_valid(mmio
->reg
); mmio
++) {
588 if (mmio
->in_context
) {
589 gvt
->engine_mmio_list
.ctx_mmio_count
[mmio
->ring_id
]++;
590 intel_gvt_mmio_set_in_ctx(gvt
, mmio
->reg
.reg
);