2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context
*ring_context
,
48 struct execlist_mmio_pair
*pdp_pair
= &ring_context
->pdp3_UDW
;
51 for (i
= 0; i
< 8; i
++)
52 pdp_pair
[i
].val
= pdp
[7 - i
];
55 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
57 struct intel_vgpu
*vgpu
= workload
->vgpu
;
58 struct intel_gvt
*gvt
= vgpu
->gvt
;
59 int ring_id
= workload
->ring_id
;
60 struct i915_gem_context
*shadow_ctx
= vgpu
->submission
.shadow_ctx
;
61 struct drm_i915_gem_object
*ctx_obj
=
62 shadow_ctx
->engine
[ring_id
].state
->obj
;
63 struct execlist_ring_context
*shadow_ring_context
;
66 unsigned long context_gpa
, context_page_num
;
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id
,
70 workload
->ctx_desc
.lrca
);
72 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
74 context_page_num
= context_page_num
>> PAGE_SHIFT
;
76 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
77 context_page_num
= 19;
81 while (i
< context_page_num
) {
82 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
83 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
84 I915_GTT_PAGE_SHIFT
));
85 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
86 gvt_vgpu_err("Invalid guest context descriptor\n");
90 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
92 intel_gvt_hypervisor_read_gpa(vgpu
, context_gpa
, dst
,
98 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
99 shadow_ring_context
= kmap(page
);
101 #define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
106 COPY_REG(ctx_timestamp
);
108 if (ring_id
== RCS
) {
109 COPY_REG(bb_per_ctx_ptr
);
110 COPY_REG(rcs_indirect_ctx
);
111 COPY_REG(rcs_indirect_ctx_offset
);
115 set_context_pdp_root_pointer(shadow_ring_context
,
116 workload
->shadow_mm
->shadow_page_table
);
118 intel_gvt_hypervisor_read_gpa(vgpu
,
119 workload
->ring_context_gpa
+
120 sizeof(*shadow_ring_context
),
121 (void *)shadow_ring_context
+
122 sizeof(*shadow_ring_context
),
123 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
129 static inline bool is_gvt_request(struct drm_i915_gem_request
*req
)
131 return i915_gem_context_force_single_submission(req
->ctx
);
134 static void save_ring_hw_state(struct intel_vgpu
*vgpu
, int ring_id
)
136 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
137 u32 ring_base
= dev_priv
->engine
[ring_id
]->mmio_base
;
140 reg
= RING_INSTDONE(ring_base
);
141 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
142 reg
= RING_ACTHD(ring_base
);
143 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
144 reg
= RING_ACTHD_UDW(ring_base
);
145 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
148 static int shadow_context_status_change(struct notifier_block
*nb
,
149 unsigned long action
, void *data
)
151 struct drm_i915_gem_request
*req
= (struct drm_i915_gem_request
*)data
;
152 struct intel_gvt
*gvt
= container_of(nb
, struct intel_gvt
,
153 shadow_ctx_notifier_block
[req
->engine
->id
]);
154 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
155 enum intel_engine_id ring_id
= req
->engine
->id
;
156 struct intel_vgpu_workload
*workload
;
159 if (!is_gvt_request(req
)) {
160 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
161 if (action
== INTEL_CONTEXT_SCHEDULE_IN
&&
162 scheduler
->engine_owner
[ring_id
]) {
163 /* Switch ring from vGPU to host. */
164 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
166 scheduler
->engine_owner
[ring_id
] = NULL
;
168 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
173 workload
= scheduler
->current_workload
[ring_id
];
174 if (unlikely(!workload
))
178 case INTEL_CONTEXT_SCHEDULE_IN
:
179 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
180 if (workload
->vgpu
!= scheduler
->engine_owner
[ring_id
]) {
181 /* Switch ring from host to vGPU or vGPU to vGPU. */
182 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
183 workload
->vgpu
, ring_id
);
184 scheduler
->engine_owner
[ring_id
] = workload
->vgpu
;
186 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
187 ring_id
, workload
->vgpu
->id
);
188 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
189 atomic_set(&workload
->shadow_ctx_active
, 1);
191 case INTEL_CONTEXT_SCHEDULE_OUT
:
192 save_ring_hw_state(workload
->vgpu
, ring_id
);
193 atomic_set(&workload
->shadow_ctx_active
, 0);
195 case INTEL_CONTEXT_SCHEDULE_PREEMPTED
:
196 save_ring_hw_state(workload
->vgpu
, ring_id
);
202 wake_up(&workload
->shadow_ctx_status_wq
);
206 static void shadow_context_descriptor_update(struct i915_gem_context
*ctx
,
207 struct intel_engine_cs
*engine
)
209 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
214 /* Update bits 0-11 of the context descriptor which includes flags
215 * like GEN8_CTX_* cached in desc_template
217 desc
&= U64_MAX
<< 12;
218 desc
|= ctx
->desc_template
& ((1ULL << 12) - 1);
223 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload
*workload
)
225 struct intel_vgpu
*vgpu
= workload
->vgpu
;
226 void *shadow_ring_buffer_va
;
229 /* allocate shadow ring buffer */
230 cs
= intel_ring_begin(workload
->req
, workload
->rb_len
/ sizeof(u32
));
232 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
237 shadow_ring_buffer_va
= workload
->shadow_ring_buffer_va
;
239 /* get shadow ring buffer va */
240 workload
->shadow_ring_buffer_va
= cs
;
242 memcpy(cs
, shadow_ring_buffer_va
,
245 cs
+= workload
->rb_len
/ sizeof(u32
);
246 intel_ring_advance(workload
->req
, cs
);
251 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
253 if (!wa_ctx
->indirect_ctx
.obj
)
256 i915_gem_object_unpin_map(wa_ctx
->indirect_ctx
.obj
);
257 i915_gem_object_put(wa_ctx
->indirect_ctx
.obj
);
261 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
262 * shadow it as well, include ringbuffer,wa_ctx and ctx.
263 * @workload: an abstract entity for each execlist submission.
265 * This function is called before the workload submitting to i915, to make
266 * sure the content of the workload is valid.
268 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
)
270 struct intel_vgpu
*vgpu
= workload
->vgpu
;
271 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
272 struct i915_gem_context
*shadow_ctx
= s
->shadow_ctx
;
273 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
274 int ring_id
= workload
->ring_id
;
275 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
276 struct intel_ring
*ring
;
279 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
281 if (workload
->shadowed
)
284 shadow_ctx
->desc_template
&= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT
);
285 shadow_ctx
->desc_template
|= workload
->ctx_desc
.addressing_mode
<<
286 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
288 if (!test_and_set_bit(ring_id
, s
->shadow_ctx_desc_updated
))
289 shadow_context_descriptor_update(shadow_ctx
,
290 dev_priv
->engine
[ring_id
]);
292 ret
= intel_gvt_scan_and_shadow_ringbuffer(workload
);
296 if ((workload
->ring_id
== RCS
) &&
297 (workload
->wa_ctx
.indirect_ctx
.size
!= 0)) {
298 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
303 /* pin shadow context by gvt even the shadow context will be pinned
304 * when i915 alloc request. That is because gvt will update the guest
305 * context from shadow context when workload is completed, and at that
306 * moment, i915 may already unpined the shadow context to make the
307 * shadow_ctx pages invalid. So gvt need to pin itself. After update
308 * the guest context, gvt can unpin the shadow_ctx safely.
310 ring
= engine
->context_pin(engine
, shadow_ctx
);
313 gvt_vgpu_err("fail to pin shadow context\n");
317 ret
= populate_shadow_context(workload
);
320 workload
->shadowed
= true;
324 engine
->context_unpin(engine
, shadow_ctx
);
326 release_shadow_wa_ctx(&workload
->wa_ctx
);
331 static int intel_gvt_generate_request(struct intel_vgpu_workload
*workload
)
333 int ring_id
= workload
->ring_id
;
334 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
335 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
336 struct drm_i915_gem_request
*rq
;
337 struct intel_vgpu
*vgpu
= workload
->vgpu
;
338 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
339 struct i915_gem_context
*shadow_ctx
= s
->shadow_ctx
;
342 rq
= i915_gem_request_alloc(dev_priv
->engine
[ring_id
], shadow_ctx
);
344 gvt_vgpu_err("fail to allocate gem request\n");
349 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id
, rq
);
351 workload
->req
= i915_gem_request_get(rq
);
352 ret
= copy_workload_to_ring_buffer(workload
);
358 engine
->context_unpin(engine
, shadow_ctx
);
359 release_shadow_wa_ctx(&workload
->wa_ctx
);
363 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
);
365 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
367 struct intel_gvt
*gvt
= workload
->vgpu
->gvt
;
368 const int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
369 struct intel_vgpu_shadow_bb
*bb
;
372 list_for_each_entry(bb
, &workload
->shadow_bb
, list
) {
373 bb
->vma
= i915_gem_object_ggtt_pin(bb
->obj
, NULL
, 0, 0, 0);
374 if (IS_ERR(bb
->vma
)) {
375 ret
= PTR_ERR(bb
->vma
);
379 /* relocate shadow batch buffer */
380 bb
->bb_start_cmd_va
[1] = i915_ggtt_offset(bb
->vma
);
381 if (gmadr_bytes
== 8)
382 bb
->bb_start_cmd_va
[2] = 0;
384 /* No one is going to touch shadow bb from now on. */
385 if (bb
->clflush
& CLFLUSH_AFTER
) {
386 drm_clflush_virt_range(bb
->va
, bb
->obj
->base
.size
);
387 bb
->clflush
&= ~CLFLUSH_AFTER
;
390 ret
= i915_gem_object_set_to_gtt_domain(bb
->obj
, false);
394 i915_gem_obj_finish_shmem_access(bb
->obj
);
395 bb
->accessing
= false;
397 i915_vma_move_to_active(bb
->vma
, workload
->req
, 0);
401 release_shadow_batch_buffer(workload
);
405 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
407 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
408 struct intel_vgpu_workload
,
410 int ring_id
= workload
->ring_id
;
411 struct intel_vgpu_submission
*s
= &workload
->vgpu
->submission
;
412 struct i915_gem_context
*shadow_ctx
= s
->shadow_ctx
;
413 struct drm_i915_gem_object
*ctx_obj
=
414 shadow_ctx
->engine
[ring_id
].state
->obj
;
415 struct execlist_ring_context
*shadow_ring_context
;
418 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
419 shadow_ring_context
= kmap_atomic(page
);
421 shadow_ring_context
->bb_per_ctx_ptr
.val
=
422 (shadow_ring_context
->bb_per_ctx_ptr
.val
&
423 (~PER_CTX_ADDR_MASK
)) | wa_ctx
->per_ctx
.shadow_gma
;
424 shadow_ring_context
->rcs_indirect_ctx
.val
=
425 (shadow_ring_context
->rcs_indirect_ctx
.val
&
426 (~INDIRECT_CTX_ADDR_MASK
)) | wa_ctx
->indirect_ctx
.shadow_gma
;
428 kunmap_atomic(shadow_ring_context
);
432 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
434 struct i915_vma
*vma
;
435 unsigned char *per_ctx_va
=
436 (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
437 wa_ctx
->indirect_ctx
.size
;
439 if (wa_ctx
->indirect_ctx
.size
== 0)
442 vma
= i915_gem_object_ggtt_pin(wa_ctx
->indirect_ctx
.obj
, NULL
,
443 0, CACHELINE_BYTES
, 0);
447 /* FIXME: we are not tracking our pinned VMA leaving it
448 * up to the core to fix up the stray pin_count upon
452 wa_ctx
->indirect_ctx
.shadow_gma
= i915_ggtt_offset(vma
);
454 wa_ctx
->per_ctx
.shadow_gma
= *((unsigned int *)per_ctx_va
+ 1);
455 memset(per_ctx_va
, 0, CACHELINE_BYTES
);
457 update_wa_ctx_2_shadow_ctx(wa_ctx
);
461 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
463 struct intel_vgpu
*vgpu
= workload
->vgpu
;
464 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
465 struct intel_vgpu_shadow_bb
*bb
, *pos
;
467 if (list_empty(&workload
->shadow_bb
))
470 bb
= list_first_entry(&workload
->shadow_bb
,
471 struct intel_vgpu_shadow_bb
, list
);
473 mutex_lock(&dev_priv
->drm
.struct_mutex
);
475 list_for_each_entry_safe(bb
, pos
, &workload
->shadow_bb
, list
) {
478 i915_gem_obj_finish_shmem_access(bb
->obj
);
480 if (bb
->va
&& !IS_ERR(bb
->va
))
481 i915_gem_object_unpin_map(bb
->obj
);
483 if (bb
->vma
&& !IS_ERR(bb
->vma
)) {
484 i915_vma_unpin(bb
->vma
);
485 i915_vma_close(bb
->vma
);
487 __i915_gem_object_release_unless_active(bb
->obj
);
493 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
496 static int prepare_workload(struct intel_vgpu_workload
*workload
)
498 struct intel_vgpu
*vgpu
= workload
->vgpu
;
501 ret
= intel_vgpu_pin_mm(workload
->shadow_mm
);
503 gvt_vgpu_err("fail to vgpu pin mm\n");
507 ret
= intel_vgpu_sync_oos_pages(workload
->vgpu
);
509 gvt_vgpu_err("fail to vgpu sync oos pages\n");
513 ret
= intel_vgpu_flush_post_shadow(workload
->vgpu
);
515 gvt_vgpu_err("fail to flush post shadow\n");
519 ret
= intel_gvt_generate_request(workload
);
521 gvt_vgpu_err("fail to generate request\n");
525 ret
= prepare_shadow_batch_buffer(workload
);
527 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
531 ret
= prepare_shadow_wa_ctx(&workload
->wa_ctx
);
533 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
534 goto err_shadow_batch
;
537 if (workload
->prepare
) {
538 ret
= workload
->prepare(workload
);
540 goto err_shadow_wa_ctx
;
545 release_shadow_wa_ctx(&workload
->wa_ctx
);
547 release_shadow_batch_buffer(workload
);
549 intel_vgpu_unpin_mm(workload
->shadow_mm
);
553 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
555 struct intel_vgpu
*vgpu
= workload
->vgpu
;
556 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
557 struct i915_gem_context
*shadow_ctx
= s
->shadow_ctx
;
558 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
559 int ring_id
= workload
->ring_id
;
560 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
563 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
566 mutex_lock(&dev_priv
->drm
.struct_mutex
);
568 ret
= intel_gvt_scan_and_shadow_workload(workload
);
572 ret
= prepare_workload(workload
);
574 engine
->context_unpin(engine
, shadow_ctx
);
580 workload
->status
= ret
;
582 if (!IS_ERR_OR_NULL(workload
->req
)) {
583 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
584 ring_id
, workload
->req
);
585 i915_add_request(workload
->req
);
586 workload
->dispatched
= true;
589 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
593 static struct intel_vgpu_workload
*pick_next_workload(
594 struct intel_gvt
*gvt
, int ring_id
)
596 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
597 struct intel_vgpu_workload
*workload
= NULL
;
599 mutex_lock(&gvt
->lock
);
602 * no current vgpu / will be scheduled out / no workload
605 if (!scheduler
->current_vgpu
) {
606 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id
);
610 if (scheduler
->need_reschedule
) {
611 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id
);
615 if (list_empty(workload_q_head(scheduler
->current_vgpu
, ring_id
)))
619 * still have current workload, maybe the workload disptacher
620 * fail to submit it for some reason, resubmit it.
622 if (scheduler
->current_workload
[ring_id
]) {
623 workload
= scheduler
->current_workload
[ring_id
];
624 gvt_dbg_sched("ring id %d still have current workload %p\n",
630 * pick a workload as current workload
631 * once current workload is set, schedule policy routines
632 * will wait the current workload is finished when trying to
633 * schedule out a vgpu.
635 scheduler
->current_workload
[ring_id
] = container_of(
636 workload_q_head(scheduler
->current_vgpu
, ring_id
)->next
,
637 struct intel_vgpu_workload
, list
);
639 workload
= scheduler
->current_workload
[ring_id
];
641 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id
, workload
);
643 atomic_inc(&workload
->vgpu
->submission
.running_workload_num
);
645 mutex_unlock(&gvt
->lock
);
649 static void update_guest_context(struct intel_vgpu_workload
*workload
)
651 struct intel_vgpu
*vgpu
= workload
->vgpu
;
652 struct intel_gvt
*gvt
= vgpu
->gvt
;
653 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
654 struct i915_gem_context
*shadow_ctx
= s
->shadow_ctx
;
655 int ring_id
= workload
->ring_id
;
656 struct drm_i915_gem_object
*ctx_obj
=
657 shadow_ctx
->engine
[ring_id
].state
->obj
;
658 struct execlist_ring_context
*shadow_ring_context
;
661 unsigned long context_gpa
, context_page_num
;
664 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id
,
665 workload
->ctx_desc
.lrca
);
667 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
669 context_page_num
= context_page_num
>> PAGE_SHIFT
;
671 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
672 context_page_num
= 19;
676 while (i
< context_page_num
) {
677 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
678 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
679 I915_GTT_PAGE_SHIFT
));
680 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
681 gvt_vgpu_err("invalid guest context descriptor\n");
685 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
687 intel_gvt_hypervisor_write_gpa(vgpu
, context_gpa
, src
,
693 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
694 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
696 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
697 shadow_ring_context
= kmap(page
);
699 #define COPY_REG(name) \
700 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
701 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
704 COPY_REG(ctx_timestamp
);
708 intel_gvt_hypervisor_write_gpa(vgpu
,
709 workload
->ring_context_gpa
+
710 sizeof(*shadow_ring_context
),
711 (void *)shadow_ring_context
+
712 sizeof(*shadow_ring_context
),
713 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
718 static void clean_workloads(struct intel_vgpu
*vgpu
, unsigned long engine_mask
)
720 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
721 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
722 struct intel_engine_cs
*engine
;
723 struct intel_vgpu_workload
*pos
, *n
;
726 /* free the unsubmited workloads in the queues. */
727 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
) {
728 list_for_each_entry_safe(pos
, n
,
729 &s
->workload_q_head
[engine
->id
], list
) {
730 list_del_init(&pos
->list
);
731 intel_vgpu_destroy_workload(pos
);
733 clear_bit(engine
->id
, s
->shadow_ctx_desc_updated
);
737 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
739 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
740 struct intel_vgpu_workload
*workload
=
741 scheduler
->current_workload
[ring_id
];
742 struct intel_vgpu
*vgpu
= workload
->vgpu
;
743 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
746 mutex_lock(&gvt
->lock
);
748 /* For the workload w/ request, needs to wait for the context
749 * switch to make sure request is completed.
750 * For the workload w/o request, directly complete the workload.
753 struct drm_i915_private
*dev_priv
=
754 workload
->vgpu
->gvt
->dev_priv
;
755 struct intel_engine_cs
*engine
=
756 dev_priv
->engine
[workload
->ring_id
];
757 wait_event(workload
->shadow_ctx_status_wq
,
758 !atomic_read(&workload
->shadow_ctx_active
));
760 /* If this request caused GPU hang, req->fence.error will
761 * be set to -EIO. Use -EIO to set workload status so
762 * that when this request caused GPU hang, didn't trigger
763 * context switch interrupt to guest.
765 if (likely(workload
->status
== -EINPROGRESS
)) {
766 if (workload
->req
->fence
.error
== -EIO
)
767 workload
->status
= -EIO
;
769 workload
->status
= 0;
772 i915_gem_request_put(fetch_and_zero(&workload
->req
));
774 if (!workload
->status
&& !(vgpu
->resetting_eng
&
775 ENGINE_MASK(ring_id
))) {
776 update_guest_context(workload
);
778 for_each_set_bit(event
, workload
->pending_events
,
780 intel_vgpu_trigger_virtual_event(vgpu
, event
);
782 mutex_lock(&dev_priv
->drm
.struct_mutex
);
783 /* unpin shadow ctx as the shadow_ctx update is done */
784 engine
->context_unpin(engine
, s
->shadow_ctx
);
785 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
788 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
789 ring_id
, workload
, workload
->status
);
791 scheduler
->current_workload
[ring_id
] = NULL
;
793 list_del_init(&workload
->list
);
795 if (!workload
->status
) {
796 release_shadow_batch_buffer(workload
);
797 release_shadow_wa_ctx(&workload
->wa_ctx
);
800 if (workload
->status
|| (vgpu
->resetting_eng
& ENGINE_MASK(ring_id
))) {
801 /* if workload->status is not successful means HW GPU
802 * has occurred GPU hang or something wrong with i915/GVT,
803 * and GVT won't inject context switch interrupt to guest.
804 * So this error is a vGPU hang actually to the guest.
805 * According to this we should emunlate a vGPU hang. If
806 * there are pending workloads which are already submitted
807 * from guest, we should clean them up like HW GPU does.
809 * if it is in middle of engine resetting, the pending
810 * workloads won't be submitted to HW GPU and will be
811 * cleaned up during the resetting process later, so doing
812 * the workload clean up here doesn't have any impact.
814 clean_workloads(vgpu
, ENGINE_MASK(ring_id
));
817 workload
->complete(workload
);
819 atomic_dec(&s
->running_workload_num
);
820 wake_up(&scheduler
->workload_complete_wq
);
822 if (gvt
->scheduler
.need_reschedule
)
823 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
825 mutex_unlock(&gvt
->lock
);
828 struct workload_thread_param
{
829 struct intel_gvt
*gvt
;
833 static int workload_thread(void *priv
)
835 struct workload_thread_param
*p
= (struct workload_thread_param
*)priv
;
836 struct intel_gvt
*gvt
= p
->gvt
;
837 int ring_id
= p
->ring_id
;
838 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
839 struct intel_vgpu_workload
*workload
= NULL
;
840 struct intel_vgpu
*vgpu
= NULL
;
842 bool need_force_wake
= IS_SKYLAKE(gvt
->dev_priv
)
843 || IS_KABYLAKE(gvt
->dev_priv
);
844 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
848 gvt_dbg_core("workload thread for ring %d started\n", ring_id
);
850 while (!kthread_should_stop()) {
851 add_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
853 workload
= pick_next_workload(gvt
, ring_id
);
856 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
857 MAX_SCHEDULE_TIMEOUT
);
858 } while (!kthread_should_stop());
859 remove_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
864 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
865 workload
->ring_id
, workload
,
868 intel_runtime_pm_get(gvt
->dev_priv
);
870 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
871 workload
->ring_id
, workload
);
874 intel_uncore_forcewake_get(gvt
->dev_priv
,
877 mutex_lock(&gvt
->lock
);
878 ret
= dispatch_workload(workload
);
879 mutex_unlock(&gvt
->lock
);
882 vgpu
= workload
->vgpu
;
883 gvt_vgpu_err("fail to dispatch workload, skip\n");
887 gvt_dbg_sched("ring id %d wait workload %p\n",
888 workload
->ring_id
, workload
);
889 i915_wait_request(workload
->req
, 0, MAX_SCHEDULE_TIMEOUT
);
892 gvt_dbg_sched("will complete workload %p, status: %d\n",
893 workload
, workload
->status
);
895 complete_current_workload(gvt
, ring_id
);
898 intel_uncore_forcewake_put(gvt
->dev_priv
,
901 intel_runtime_pm_put(gvt
->dev_priv
);
902 if (ret
&& (vgpu_is_vm_unhealthy(ret
)))
903 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
908 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
910 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
911 struct intel_gvt
*gvt
= vgpu
->gvt
;
912 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
914 if (atomic_read(&s
->running_workload_num
)) {
915 gvt_dbg_sched("wait vgpu idle\n");
917 wait_event(scheduler
->workload_complete_wq
,
918 !atomic_read(&s
->running_workload_num
));
922 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
924 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
925 struct intel_engine_cs
*engine
;
926 enum intel_engine_id i
;
928 gvt_dbg_core("clean workload scheduler\n");
930 for_each_engine(engine
, gvt
->dev_priv
, i
) {
931 atomic_notifier_chain_unregister(
932 &engine
->context_status_notifier
,
933 &gvt
->shadow_ctx_notifier_block
[i
]);
934 kthread_stop(scheduler
->thread
[i
]);
938 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
940 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
941 struct workload_thread_param
*param
= NULL
;
942 struct intel_engine_cs
*engine
;
943 enum intel_engine_id i
;
946 gvt_dbg_core("init workload scheduler\n");
948 init_waitqueue_head(&scheduler
->workload_complete_wq
);
950 for_each_engine(engine
, gvt
->dev_priv
, i
) {
951 init_waitqueue_head(&scheduler
->waitq
[i
]);
953 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
962 scheduler
->thread
[i
] = kthread_run(workload_thread
, param
,
963 "gvt workload %d", i
);
964 if (IS_ERR(scheduler
->thread
[i
])) {
965 gvt_err("fail to create workload thread\n");
966 ret
= PTR_ERR(scheduler
->thread
[i
]);
970 gvt
->shadow_ctx_notifier_block
[i
].notifier_call
=
971 shadow_context_status_change
;
972 atomic_notifier_chain_register(&engine
->context_status_notifier
,
973 &gvt
->shadow_ctx_notifier_block
[i
]);
977 intel_gvt_clean_workload_scheduler(gvt
);
984 * intel_vgpu_clean_submission - free submission-related resource for vGPU
987 * This function is called when a vGPU is being destroyed.
990 void intel_vgpu_clean_submission(struct intel_vgpu
*vgpu
)
992 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
994 intel_vgpu_select_submission_ops(vgpu
, ALL_ENGINES
, 0);
995 i915_gem_context_put(s
->shadow_ctx
);
996 kmem_cache_destroy(s
->workloads
);
1001 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1003 * @engine_mask: engines expected to be reset
1005 * This function is called when a vGPU is being destroyed.
1008 void intel_vgpu_reset_submission(struct intel_vgpu
*vgpu
,
1009 unsigned long engine_mask
)
1011 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1016 clean_workloads(vgpu
, engine_mask
);
1017 s
->ops
->reset(vgpu
, engine_mask
);
1021 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1024 * This function is called when a vGPU is being created.
1027 * Zero on success, negative error code if failed.
1030 int intel_vgpu_setup_submission(struct intel_vgpu
*vgpu
)
1032 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1033 enum intel_engine_id i
;
1034 struct intel_engine_cs
*engine
;
1037 s
->shadow_ctx
= i915_gem_context_create_gvt(
1038 &vgpu
->gvt
->dev_priv
->drm
);
1039 if (IS_ERR(s
->shadow_ctx
))
1040 return PTR_ERR(s
->shadow_ctx
);
1042 if (HAS_LOGICAL_RING_PREEMPTION(vgpu
->gvt
->dev_priv
))
1043 s
->shadow_ctx
->priority
= INT_MAX
;
1045 bitmap_zero(s
->shadow_ctx_desc_updated
, I915_NUM_ENGINES
);
1047 s
->workloads
= kmem_cache_create("gvt-g_vgpu_workload",
1048 sizeof(struct intel_vgpu_workload
), 0,
1052 if (!s
->workloads
) {
1054 goto out_shadow_ctx
;
1057 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
)
1058 INIT_LIST_HEAD(&s
->workload_q_head
[i
]);
1060 atomic_set(&s
->running_workload_num
, 0);
1061 bitmap_zero(s
->tlb_handle_pending
, I915_NUM_ENGINES
);
1066 i915_gem_context_put(s
->shadow_ctx
);
1071 * intel_vgpu_select_submission_ops - select virtual submission interface
1073 * @interface: expected vGPU virtual submission interface
1075 * This function is called when guest configures submission interface.
1078 * Zero on success, negative error code if failed.
1081 int intel_vgpu_select_submission_ops(struct intel_vgpu
*vgpu
,
1082 unsigned long engine_mask
,
1083 unsigned int interface
)
1085 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1086 const struct intel_vgpu_submission_ops
*ops
[] = {
1087 [INTEL_VGPU_EXECLIST_SUBMISSION
] =
1088 &intel_vgpu_execlist_submission_ops
,
1092 if (WARN_ON(interface
>= ARRAY_SIZE(ops
)))
1095 if (WARN_ON(interface
== 0 && engine_mask
!= ALL_ENGINES
))
1099 s
->ops
->clean(vgpu
, engine_mask
);
1101 if (interface
== 0) {
1103 s
->virtual_submission_interface
= 0;
1105 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu
->id
);
1109 ret
= ops
[interface
]->init(vgpu
, engine_mask
);
1113 s
->ops
= ops
[interface
];
1114 s
->virtual_submission_interface
= interface
;
1117 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1118 vgpu
->id
, s
->ops
->name
);
1124 * intel_vgpu_destroy_workload - destroy a vGPU workload
1127 * This function is called when destroy a vGPU workload.
1130 void intel_vgpu_destroy_workload(struct intel_vgpu_workload
*workload
)
1132 struct intel_vgpu_submission
*s
= &workload
->vgpu
->submission
;
1134 if (workload
->shadow_mm
)
1135 intel_gvt_mm_unreference(workload
->shadow_mm
);
1137 kmem_cache_free(s
->workloads
, workload
);
1140 static struct intel_vgpu_workload
*
1141 alloc_workload(struct intel_vgpu
*vgpu
)
1143 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1144 struct intel_vgpu_workload
*workload
;
1146 workload
= kmem_cache_zalloc(s
->workloads
, GFP_KERNEL
);
1148 return ERR_PTR(-ENOMEM
);
1150 INIT_LIST_HEAD(&workload
->list
);
1151 INIT_LIST_HEAD(&workload
->shadow_bb
);
1153 init_waitqueue_head(&workload
->shadow_ctx_status_wq
);
1154 atomic_set(&workload
->shadow_ctx_active
, 0);
1156 workload
->status
= -EINPROGRESS
;
1157 workload
->shadowed
= false;
1158 workload
->vgpu
= vgpu
;
1163 #define RING_CTX_OFF(x) \
1164 offsetof(struct execlist_ring_context, x)
1166 static void read_guest_pdps(struct intel_vgpu
*vgpu
,
1167 u64 ring_context_gpa
, u32 pdp
[8])
1172 gpa
= ring_context_gpa
+ RING_CTX_OFF(pdp3_UDW
.val
);
1174 for (i
= 0; i
< 8; i
++)
1175 intel_gvt_hypervisor_read_gpa(vgpu
,
1176 gpa
+ i
* 8, &pdp
[7 - i
], 4);
1179 static int prepare_mm(struct intel_vgpu_workload
*workload
)
1181 struct execlist_ctx_descriptor_format
*desc
= &workload
->ctx_desc
;
1182 struct intel_vgpu_mm
*mm
;
1183 struct intel_vgpu
*vgpu
= workload
->vgpu
;
1184 int page_table_level
;
1187 if (desc
->addressing_mode
== 1) { /* legacy 32-bit */
1188 page_table_level
= 3;
1189 } else if (desc
->addressing_mode
== 3) { /* legacy 64 bit */
1190 page_table_level
= 4;
1192 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1196 read_guest_pdps(workload
->vgpu
, workload
->ring_context_gpa
, pdp
);
1198 mm
= intel_vgpu_find_ppgtt_mm(workload
->vgpu
, page_table_level
, pdp
);
1200 intel_gvt_mm_reference(mm
);
1203 mm
= intel_vgpu_create_mm(workload
->vgpu
, INTEL_GVT_MM_PPGTT
,
1204 pdp
, page_table_level
, 0);
1206 gvt_vgpu_err("fail to create mm object.\n");
1210 workload
->shadow_mm
= mm
;
1214 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1215 ((a)->lrca == (b)->lrca))
1217 #define get_last_workload(q) \
1218 (list_empty(q) ? NULL : container_of(q->prev, \
1219 struct intel_vgpu_workload, list))
1221 * intel_vgpu_create_workload - create a vGPU workload
1223 * @desc: a guest context descriptor
1225 * This function is called when creating a vGPU workload.
1228 * struct intel_vgpu_workload * on success, negative error code in
1229 * pointer if failed.
1232 struct intel_vgpu_workload
*
1233 intel_vgpu_create_workload(struct intel_vgpu
*vgpu
, int ring_id
,
1234 struct execlist_ctx_descriptor_format
*desc
)
1236 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1237 struct list_head
*q
= workload_q_head(vgpu
, ring_id
);
1238 struct intel_vgpu_workload
*last_workload
= get_last_workload(q
);
1239 struct intel_vgpu_workload
*workload
= NULL
;
1240 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
1241 u64 ring_context_gpa
;
1242 u32 head
, tail
, start
, ctl
, ctx_ctl
, per_ctx
, indirect_ctx
;
1245 ring_context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
1246 (u32
)((desc
->lrca
+ 1) << I915_GTT_PAGE_SHIFT
));
1247 if (ring_context_gpa
== INTEL_GVT_INVALID_ADDR
) {
1248 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc
->lrca
);
1249 return ERR_PTR(-EINVAL
);
1252 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1253 RING_CTX_OFF(ring_header
.val
), &head
, 4);
1255 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1256 RING_CTX_OFF(ring_tail
.val
), &tail
, 4);
1258 head
&= RB_HEAD_OFF_MASK
;
1259 tail
&= RB_TAIL_OFF_MASK
;
1261 if (last_workload
&& same_context(&last_workload
->ctx_desc
, desc
)) {
1262 gvt_dbg_el("ring id %d cur workload == last\n", ring_id
);
1263 gvt_dbg_el("ctx head %x real head %lx\n", head
,
1264 last_workload
->rb_tail
);
1266 * cannot use guest context head pointer here,
1267 * as it might not be updated at this time
1269 head
= last_workload
->rb_tail
;
1272 gvt_dbg_el("ring id %d begin a new workload\n", ring_id
);
1274 /* record some ring buffer register values for scan and shadow */
1275 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1276 RING_CTX_OFF(rb_start
.val
), &start
, 4);
1277 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1278 RING_CTX_OFF(rb_ctrl
.val
), &ctl
, 4);
1279 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1280 RING_CTX_OFF(ctx_ctrl
.val
), &ctx_ctl
, 4);
1282 workload
= alloc_workload(vgpu
);
1283 if (IS_ERR(workload
))
1286 workload
->ring_id
= ring_id
;
1287 workload
->ctx_desc
= *desc
;
1288 workload
->ring_context_gpa
= ring_context_gpa
;
1289 workload
->rb_head
= head
;
1290 workload
->rb_tail
= tail
;
1291 workload
->rb_start
= start
;
1292 workload
->rb_ctl
= ctl
;
1294 if (ring_id
== RCS
) {
1295 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1296 RING_CTX_OFF(bb_per_ctx_ptr
.val
), &per_ctx
, 4);
1297 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1298 RING_CTX_OFF(rcs_indirect_ctx
.val
), &indirect_ctx
, 4);
1300 workload
->wa_ctx
.indirect_ctx
.guest_gma
=
1301 indirect_ctx
& INDIRECT_CTX_ADDR_MASK
;
1302 workload
->wa_ctx
.indirect_ctx
.size
=
1303 (indirect_ctx
& INDIRECT_CTX_SIZE_MASK
) *
1305 workload
->wa_ctx
.per_ctx
.guest_gma
=
1306 per_ctx
& PER_CTX_ADDR_MASK
;
1307 workload
->wa_ctx
.per_ctx
.valid
= per_ctx
& 1;
1310 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1311 workload
, ring_id
, head
, tail
, start
, ctl
);
1313 ret
= prepare_mm(workload
);
1315 kmem_cache_free(s
->workloads
, workload
);
1316 return ERR_PTR(ret
);
1319 /* Only scan and shadow the first workload in the queue
1320 * as there is only one pre-allocated buf-obj for shadow.
1322 if (list_empty(workload_q_head(vgpu
, ring_id
))) {
1323 intel_runtime_pm_get(dev_priv
);
1324 mutex_lock(&dev_priv
->drm
.struct_mutex
);
1325 ret
= intel_gvt_scan_and_shadow_workload(workload
);
1326 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1327 intel_runtime_pm_put(dev_priv
);
1330 if (ret
&& (vgpu_is_vm_unhealthy(ret
))) {
1331 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
1332 intel_vgpu_destroy_workload(workload
);
1333 return ERR_PTR(ret
);
1340 * intel_vgpu_queue_workload - Qeue a vGPU workload
1341 * @workload: the workload to queue in
1343 void intel_vgpu_queue_workload(struct intel_vgpu_workload
*workload
)
1345 list_add_tail(&workload
->list
,
1346 workload_q_head(workload
->vgpu
, workload
->ring_id
));
1347 intel_gvt_kick_schedule(workload
->vgpu
->gvt
);
1348 wake_up(&workload
->vgpu
->gvt
->scheduler
.waitq
[workload
->ring_id
]);