2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context
*ring_context
,
50 for (i
= 0; i
< 8; i
++)
51 ring_context
->pdps
[i
].val
= pdp
[7 - i
];
54 static void update_shadow_pdps(struct intel_vgpu_workload
*workload
)
56 struct drm_i915_gem_object
*ctx_obj
=
57 workload
->req
->hw_context
->state
->obj
;
58 struct execlist_ring_context
*shadow_ring_context
;
61 if (WARN_ON(!workload
->shadow_mm
))
64 if (WARN_ON(!atomic_read(&workload
->shadow_mm
->pincount
)))
67 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
68 shadow_ring_context
= kmap(page
);
69 set_context_pdp_root_pointer(shadow_ring_context
,
70 (void *)workload
->shadow_mm
->ppgtt_mm
.shadow_pdps
);
75 * when populating shadow ctx from guest, we should not overrride oa related
76 * registers, so that they will not be overlapped by guest oa configs. Thus
77 * made it possible to capture oa data from host for both host and guests.
79 static void sr_oa_regs(struct intel_vgpu_workload
*workload
,
80 u32
*reg_state
, bool save
)
82 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
83 u32 ctx_oactxctrl
= dev_priv
->perf
.oa
.ctx_oactxctrl_offset
;
84 u32 ctx_flexeu0
= dev_priv
->perf
.oa
.ctx_flexeu0_offset
;
87 i915_mmio_reg_offset(EU_PERF_CNTL0
),
88 i915_mmio_reg_offset(EU_PERF_CNTL1
),
89 i915_mmio_reg_offset(EU_PERF_CNTL2
),
90 i915_mmio_reg_offset(EU_PERF_CNTL3
),
91 i915_mmio_reg_offset(EU_PERF_CNTL4
),
92 i915_mmio_reg_offset(EU_PERF_CNTL5
),
93 i915_mmio_reg_offset(EU_PERF_CNTL6
),
96 if (workload
->ring_id
!= RCS
)
100 workload
->oactxctrl
= reg_state
[ctx_oactxctrl
+ 1];
102 for (i
= 0; i
< ARRAY_SIZE(workload
->flex_mmio
); i
++) {
103 u32 state_offset
= ctx_flexeu0
+ i
* 2;
105 workload
->flex_mmio
[i
] = reg_state
[state_offset
+ 1];
108 reg_state
[ctx_oactxctrl
] =
109 i915_mmio_reg_offset(GEN8_OACTXCONTROL
);
110 reg_state
[ctx_oactxctrl
+ 1] = workload
->oactxctrl
;
112 for (i
= 0; i
< ARRAY_SIZE(workload
->flex_mmio
); i
++) {
113 u32 state_offset
= ctx_flexeu0
+ i
* 2;
114 u32 mmio
= flex_mmio
[i
];
116 reg_state
[state_offset
] = mmio
;
117 reg_state
[state_offset
+ 1] = workload
->flex_mmio
[i
];
122 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
124 struct intel_vgpu
*vgpu
= workload
->vgpu
;
125 struct intel_gvt
*gvt
= vgpu
->gvt
;
126 int ring_id
= workload
->ring_id
;
127 struct drm_i915_gem_object
*ctx_obj
=
128 workload
->req
->hw_context
->state
->obj
;
129 struct execlist_ring_context
*shadow_ring_context
;
132 unsigned long context_gpa
, context_page_num
;
135 gvt_dbg_sched("ring id %d workload lrca %x", ring_id
,
136 workload
->ctx_desc
.lrca
);
138 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
140 context_page_num
= context_page_num
>> PAGE_SHIFT
;
142 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
143 context_page_num
= 19;
147 while (i
< context_page_num
) {
148 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
149 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
150 I915_GTT_PAGE_SHIFT
));
151 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
152 gvt_vgpu_err("Invalid guest context descriptor\n");
156 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
158 intel_gvt_hypervisor_read_gpa(vgpu
, context_gpa
, dst
,
164 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
165 shadow_ring_context
= kmap(page
);
167 sr_oa_regs(workload
, (u32
*)shadow_ring_context
, true);
168 #define COPY_REG(name) \
169 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
170 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
171 #define COPY_REG_MASKED(name) {\
172 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
173 + RING_CTX_OFF(name.val),\
174 &shadow_ring_context->name.val, 4);\
175 shadow_ring_context->name.val |= 0xffff << 16;\
178 COPY_REG_MASKED(ctx_ctrl
);
179 COPY_REG(ctx_timestamp
);
181 if (ring_id
== RCS
) {
182 COPY_REG(bb_per_ctx_ptr
);
183 COPY_REG(rcs_indirect_ctx
);
184 COPY_REG(rcs_indirect_ctx_offset
);
187 #undef COPY_REG_MASKED
189 intel_gvt_hypervisor_read_gpa(vgpu
,
190 workload
->ring_context_gpa
+
191 sizeof(*shadow_ring_context
),
192 (void *)shadow_ring_context
+
193 sizeof(*shadow_ring_context
),
194 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
196 sr_oa_regs(workload
, (u32
*)shadow_ring_context
, false);
201 static inline bool is_gvt_request(struct i915_request
*req
)
203 return i915_gem_context_force_single_submission(req
->gem_context
);
206 static void save_ring_hw_state(struct intel_vgpu
*vgpu
, int ring_id
)
208 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
209 u32 ring_base
= dev_priv
->engine
[ring_id
]->mmio_base
;
212 reg
= RING_INSTDONE(ring_base
);
213 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
214 reg
= RING_ACTHD(ring_base
);
215 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
216 reg
= RING_ACTHD_UDW(ring_base
);
217 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
220 static int shadow_context_status_change(struct notifier_block
*nb
,
221 unsigned long action
, void *data
)
223 struct i915_request
*req
= data
;
224 struct intel_gvt
*gvt
= container_of(nb
, struct intel_gvt
,
225 shadow_ctx_notifier_block
[req
->engine
->id
]);
226 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
227 enum intel_engine_id ring_id
= req
->engine
->id
;
228 struct intel_vgpu_workload
*workload
;
231 if (!is_gvt_request(req
)) {
232 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
233 if (action
== INTEL_CONTEXT_SCHEDULE_IN
&&
234 scheduler
->engine_owner
[ring_id
]) {
235 /* Switch ring from vGPU to host. */
236 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
238 scheduler
->engine_owner
[ring_id
] = NULL
;
240 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
245 workload
= scheduler
->current_workload
[ring_id
];
246 if (unlikely(!workload
))
250 case INTEL_CONTEXT_SCHEDULE_IN
:
251 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
252 if (workload
->vgpu
!= scheduler
->engine_owner
[ring_id
]) {
253 /* Switch ring from host to vGPU or vGPU to vGPU. */
254 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
255 workload
->vgpu
, ring_id
);
256 scheduler
->engine_owner
[ring_id
] = workload
->vgpu
;
258 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
259 ring_id
, workload
->vgpu
->id
);
260 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
261 atomic_set(&workload
->shadow_ctx_active
, 1);
263 case INTEL_CONTEXT_SCHEDULE_OUT
:
264 save_ring_hw_state(workload
->vgpu
, ring_id
);
265 atomic_set(&workload
->shadow_ctx_active
, 0);
267 case INTEL_CONTEXT_SCHEDULE_PREEMPTED
:
268 save_ring_hw_state(workload
->vgpu
, ring_id
);
274 wake_up(&workload
->shadow_ctx_status_wq
);
278 static void shadow_context_descriptor_update(struct intel_context
*ce
)
284 /* Update bits 0-11 of the context descriptor which includes flags
285 * like GEN8_CTX_* cached in desc_template
287 desc
&= U64_MAX
<< 12;
288 desc
|= ce
->gem_context
->desc_template
& ((1ULL << 12) - 1);
293 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload
*workload
)
295 struct intel_vgpu
*vgpu
= workload
->vgpu
;
296 struct i915_request
*req
= workload
->req
;
297 void *shadow_ring_buffer_va
;
300 if ((IS_KABYLAKE(req
->i915
) || IS_BROXTON(req
->i915
))
301 && is_inhibit_context(req
->hw_context
))
302 intel_vgpu_restore_inhibit_context(vgpu
, req
);
304 /* allocate shadow ring buffer */
305 cs
= intel_ring_begin(workload
->req
, workload
->rb_len
/ sizeof(u32
));
307 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
312 shadow_ring_buffer_va
= workload
->shadow_ring_buffer_va
;
314 /* get shadow ring buffer va */
315 workload
->shadow_ring_buffer_va
= cs
;
317 memcpy(cs
, shadow_ring_buffer_va
,
320 cs
+= workload
->rb_len
/ sizeof(u32
);
321 intel_ring_advance(workload
->req
, cs
);
326 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
328 if (!wa_ctx
->indirect_ctx
.obj
)
331 i915_gem_object_unpin_map(wa_ctx
->indirect_ctx
.obj
);
332 i915_gem_object_put(wa_ctx
->indirect_ctx
.obj
);
336 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
337 * shadow it as well, include ringbuffer,wa_ctx and ctx.
338 * @workload: an abstract entity for each execlist submission.
340 * This function is called before the workload submitting to i915, to make
341 * sure the content of the workload is valid.
343 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
)
345 struct intel_vgpu
*vgpu
= workload
->vgpu
;
346 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
347 struct i915_gem_context
*shadow_ctx
= s
->shadow_ctx
;
348 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
349 struct intel_engine_cs
*engine
= dev_priv
->engine
[workload
->ring_id
];
350 struct intel_context
*ce
;
351 struct i915_request
*rq
;
354 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
359 /* pin shadow context by gvt even the shadow context will be pinned
360 * when i915 alloc request. That is because gvt will update the guest
361 * context from shadow context when workload is completed, and at that
362 * moment, i915 may already unpined the shadow context to make the
363 * shadow_ctx pages invalid. So gvt need to pin itself. After update
364 * the guest context, gvt can unpin the shadow_ctx safely.
366 ce
= intel_context_pin(shadow_ctx
, engine
);
368 gvt_vgpu_err("fail to pin shadow context\n");
372 shadow_ctx
->desc_template
&= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT
);
373 shadow_ctx
->desc_template
|= workload
->ctx_desc
.addressing_mode
<<
374 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
376 if (!test_and_set_bit(workload
->ring_id
, s
->shadow_ctx_desc_updated
))
377 shadow_context_descriptor_update(ce
);
379 ret
= intel_gvt_scan_and_shadow_ringbuffer(workload
);
383 if ((workload
->ring_id
== RCS
) &&
384 (workload
->wa_ctx
.indirect_ctx
.size
!= 0)) {
385 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
390 rq
= i915_request_alloc(engine
, shadow_ctx
);
392 gvt_vgpu_err("fail to allocate gem request\n");
396 workload
->req
= i915_request_get(rq
);
398 ret
= populate_shadow_context(workload
);
404 rq
= fetch_and_zero(&workload
->req
);
405 i915_request_put(rq
);
407 release_shadow_wa_ctx(&workload
->wa_ctx
);
409 intel_context_unpin(ce
);
413 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
);
415 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
417 struct intel_gvt
*gvt
= workload
->vgpu
->gvt
;
418 const int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
419 struct intel_vgpu_shadow_bb
*bb
;
422 list_for_each_entry(bb
, &workload
->shadow_bb
, list
) {
423 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
424 * is only updated into ring_scan_buffer, not real ring address
425 * allocated in later copy_workload_to_ring_buffer. pls be noted
426 * shadow_ring_buffer_va is now pointed to real ring buffer va
427 * in copy_workload_to_ring_buffer.
431 bb
->bb_start_cmd_va
= workload
->shadow_ring_buffer_va
435 /* for non-priv bb, scan&shadow is only for
436 * debugging purpose, so the content of shadow bb
437 * is the same as original bb. Therefore,
438 * here, rather than switch to shadow bb's gma
439 * address, we directly use original batch buffer's
440 * gma address, and send original bb to hardware
443 if (bb
->clflush
& CLFLUSH_AFTER
) {
444 drm_clflush_virt_range(bb
->va
,
446 bb
->clflush
&= ~CLFLUSH_AFTER
;
448 i915_gem_obj_finish_shmem_access(bb
->obj
);
449 bb
->accessing
= false;
452 bb
->vma
= i915_gem_object_ggtt_pin(bb
->obj
,
454 if (IS_ERR(bb
->vma
)) {
455 ret
= PTR_ERR(bb
->vma
);
459 /* relocate shadow batch buffer */
460 bb
->bb_start_cmd_va
[1] = i915_ggtt_offset(bb
->vma
);
461 if (gmadr_bytes
== 8)
462 bb
->bb_start_cmd_va
[2] = 0;
464 /* No one is going to touch shadow bb from now on. */
465 if (bb
->clflush
& CLFLUSH_AFTER
) {
466 drm_clflush_virt_range(bb
->va
,
468 bb
->clflush
&= ~CLFLUSH_AFTER
;
471 ret
= i915_gem_object_set_to_gtt_domain(bb
->obj
,
476 i915_gem_obj_finish_shmem_access(bb
->obj
);
477 bb
->accessing
= false;
479 ret
= i915_vma_move_to_active(bb
->vma
,
488 release_shadow_batch_buffer(workload
);
492 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
494 struct intel_vgpu_workload
*workload
=
495 container_of(wa_ctx
, struct intel_vgpu_workload
, wa_ctx
);
496 struct i915_request
*rq
= workload
->req
;
497 struct execlist_ring_context
*shadow_ring_context
=
498 (struct execlist_ring_context
*)rq
->hw_context
->lrc_reg_state
;
500 shadow_ring_context
->bb_per_ctx_ptr
.val
=
501 (shadow_ring_context
->bb_per_ctx_ptr
.val
&
502 (~PER_CTX_ADDR_MASK
)) | wa_ctx
->per_ctx
.shadow_gma
;
503 shadow_ring_context
->rcs_indirect_ctx
.val
=
504 (shadow_ring_context
->rcs_indirect_ctx
.val
&
505 (~INDIRECT_CTX_ADDR_MASK
)) | wa_ctx
->indirect_ctx
.shadow_gma
;
508 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
510 struct i915_vma
*vma
;
511 unsigned char *per_ctx_va
=
512 (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
513 wa_ctx
->indirect_ctx
.size
;
515 if (wa_ctx
->indirect_ctx
.size
== 0)
518 vma
= i915_gem_object_ggtt_pin(wa_ctx
->indirect_ctx
.obj
, NULL
,
519 0, CACHELINE_BYTES
, 0);
523 /* FIXME: we are not tracking our pinned VMA leaving it
524 * up to the core to fix up the stray pin_count upon
528 wa_ctx
->indirect_ctx
.shadow_gma
= i915_ggtt_offset(vma
);
530 wa_ctx
->per_ctx
.shadow_gma
= *((unsigned int *)per_ctx_va
+ 1);
531 memset(per_ctx_va
, 0, CACHELINE_BYTES
);
533 update_wa_ctx_2_shadow_ctx(wa_ctx
);
537 static void release_shadow_batch_buffer(struct intel_vgpu_workload
*workload
)
539 struct intel_vgpu
*vgpu
= workload
->vgpu
;
540 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
541 struct intel_vgpu_shadow_bb
*bb
, *pos
;
543 if (list_empty(&workload
->shadow_bb
))
546 bb
= list_first_entry(&workload
->shadow_bb
,
547 struct intel_vgpu_shadow_bb
, list
);
549 mutex_lock(&dev_priv
->drm
.struct_mutex
);
551 list_for_each_entry_safe(bb
, pos
, &workload
->shadow_bb
, list
) {
554 i915_gem_obj_finish_shmem_access(bb
->obj
);
556 if (bb
->va
&& !IS_ERR(bb
->va
))
557 i915_gem_object_unpin_map(bb
->obj
);
559 if (bb
->vma
&& !IS_ERR(bb
->vma
)) {
560 i915_vma_unpin(bb
->vma
);
561 i915_vma_close(bb
->vma
);
563 __i915_gem_object_release_unless_active(bb
->obj
);
569 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
572 static int prepare_workload(struct intel_vgpu_workload
*workload
)
574 struct intel_vgpu
*vgpu
= workload
->vgpu
;
577 ret
= intel_vgpu_pin_mm(workload
->shadow_mm
);
579 gvt_vgpu_err("fail to vgpu pin mm\n");
583 update_shadow_pdps(workload
);
585 ret
= intel_vgpu_sync_oos_pages(workload
->vgpu
);
587 gvt_vgpu_err("fail to vgpu sync oos pages\n");
591 ret
= intel_vgpu_flush_post_shadow(workload
->vgpu
);
593 gvt_vgpu_err("fail to flush post shadow\n");
597 ret
= copy_workload_to_ring_buffer(workload
);
599 gvt_vgpu_err("fail to generate request\n");
603 ret
= prepare_shadow_batch_buffer(workload
);
605 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
609 ret
= prepare_shadow_wa_ctx(&workload
->wa_ctx
);
611 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
612 goto err_shadow_batch
;
615 if (workload
->prepare
) {
616 ret
= workload
->prepare(workload
);
618 goto err_shadow_wa_ctx
;
623 release_shadow_wa_ctx(&workload
->wa_ctx
);
625 release_shadow_batch_buffer(workload
);
627 intel_vgpu_unpin_mm(workload
->shadow_mm
);
631 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
633 struct intel_vgpu
*vgpu
= workload
->vgpu
;
634 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
635 int ring_id
= workload
->ring_id
;
638 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
641 mutex_lock(&vgpu
->vgpu_lock
);
642 mutex_lock(&dev_priv
->drm
.struct_mutex
);
644 ret
= intel_gvt_scan_and_shadow_workload(workload
);
648 ret
= prepare_workload(workload
);
652 workload
->status
= ret
;
654 if (!IS_ERR_OR_NULL(workload
->req
)) {
655 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
656 ring_id
, workload
->req
);
657 i915_request_add(workload
->req
);
658 workload
->dispatched
= true;
661 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
662 mutex_unlock(&vgpu
->vgpu_lock
);
666 static struct intel_vgpu_workload
*pick_next_workload(
667 struct intel_gvt
*gvt
, int ring_id
)
669 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
670 struct intel_vgpu_workload
*workload
= NULL
;
672 mutex_lock(&gvt
->sched_lock
);
675 * no current vgpu / will be scheduled out / no workload
678 if (!scheduler
->current_vgpu
) {
679 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id
);
683 if (scheduler
->need_reschedule
) {
684 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id
);
688 if (list_empty(workload_q_head(scheduler
->current_vgpu
, ring_id
)))
692 * still have current workload, maybe the workload disptacher
693 * fail to submit it for some reason, resubmit it.
695 if (scheduler
->current_workload
[ring_id
]) {
696 workload
= scheduler
->current_workload
[ring_id
];
697 gvt_dbg_sched("ring id %d still have current workload %p\n",
703 * pick a workload as current workload
704 * once current workload is set, schedule policy routines
705 * will wait the current workload is finished when trying to
706 * schedule out a vgpu.
708 scheduler
->current_workload
[ring_id
] = container_of(
709 workload_q_head(scheduler
->current_vgpu
, ring_id
)->next
,
710 struct intel_vgpu_workload
, list
);
712 workload
= scheduler
->current_workload
[ring_id
];
714 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id
, workload
);
716 atomic_inc(&workload
->vgpu
->submission
.running_workload_num
);
718 mutex_unlock(&gvt
->sched_lock
);
722 static void update_guest_context(struct intel_vgpu_workload
*workload
)
724 struct i915_request
*rq
= workload
->req
;
725 struct intel_vgpu
*vgpu
= workload
->vgpu
;
726 struct intel_gvt
*gvt
= vgpu
->gvt
;
727 struct drm_i915_gem_object
*ctx_obj
= rq
->hw_context
->state
->obj
;
728 struct execlist_ring_context
*shadow_ring_context
;
731 unsigned long context_gpa
, context_page_num
;
734 gvt_dbg_sched("ring id %d workload lrca %x\n", rq
->engine
->id
,
735 workload
->ctx_desc
.lrca
);
737 context_page_num
= rq
->engine
->context_size
;
738 context_page_num
= context_page_num
>> PAGE_SHIFT
;
740 if (IS_BROADWELL(gvt
->dev_priv
) && rq
->engine
->id
== RCS
)
741 context_page_num
= 19;
745 while (i
< context_page_num
) {
746 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
747 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
748 I915_GTT_PAGE_SHIFT
));
749 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
750 gvt_vgpu_err("invalid guest context descriptor\n");
754 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
756 intel_gvt_hypervisor_write_gpa(vgpu
, context_gpa
, src
,
762 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
763 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
765 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
766 shadow_ring_context
= kmap(page
);
768 #define COPY_REG(name) \
769 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
770 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
773 COPY_REG(ctx_timestamp
);
777 intel_gvt_hypervisor_write_gpa(vgpu
,
778 workload
->ring_context_gpa
+
779 sizeof(*shadow_ring_context
),
780 (void *)shadow_ring_context
+
781 sizeof(*shadow_ring_context
),
782 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
787 void intel_vgpu_clean_workloads(struct intel_vgpu
*vgpu
,
788 unsigned long engine_mask
)
790 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
791 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
792 struct intel_engine_cs
*engine
;
793 struct intel_vgpu_workload
*pos
, *n
;
796 /* free the unsubmited workloads in the queues. */
797 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
) {
798 list_for_each_entry_safe(pos
, n
,
799 &s
->workload_q_head
[engine
->id
], list
) {
800 list_del_init(&pos
->list
);
801 intel_vgpu_destroy_workload(pos
);
803 clear_bit(engine
->id
, s
->shadow_ctx_desc_updated
);
807 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
809 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
810 struct intel_vgpu_workload
*workload
=
811 scheduler
->current_workload
[ring_id
];
812 struct intel_vgpu
*vgpu
= workload
->vgpu
;
813 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
814 struct i915_request
*rq
= workload
->req
;
817 mutex_lock(&vgpu
->vgpu_lock
);
818 mutex_lock(&gvt
->sched_lock
);
820 /* For the workload w/ request, needs to wait for the context
821 * switch to make sure request is completed.
822 * For the workload w/o request, directly complete the workload.
825 wait_event(workload
->shadow_ctx_status_wq
,
826 !atomic_read(&workload
->shadow_ctx_active
));
828 /* If this request caused GPU hang, req->fence.error will
829 * be set to -EIO. Use -EIO to set workload status so
830 * that when this request caused GPU hang, didn't trigger
831 * context switch interrupt to guest.
833 if (likely(workload
->status
== -EINPROGRESS
)) {
834 if (workload
->req
->fence
.error
== -EIO
)
835 workload
->status
= -EIO
;
837 workload
->status
= 0;
840 if (!workload
->status
&& !(vgpu
->resetting_eng
&
841 ENGINE_MASK(ring_id
))) {
842 update_guest_context(workload
);
844 for_each_set_bit(event
, workload
->pending_events
,
846 intel_vgpu_trigger_virtual_event(vgpu
, event
);
849 /* unpin shadow ctx as the shadow_ctx update is done */
850 mutex_lock(&rq
->i915
->drm
.struct_mutex
);
851 intel_context_unpin(rq
->hw_context
);
852 mutex_unlock(&rq
->i915
->drm
.struct_mutex
);
854 i915_request_put(fetch_and_zero(&workload
->req
));
857 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
858 ring_id
, workload
, workload
->status
);
860 scheduler
->current_workload
[ring_id
] = NULL
;
862 list_del_init(&workload
->list
);
864 if (!workload
->status
) {
865 release_shadow_batch_buffer(workload
);
866 release_shadow_wa_ctx(&workload
->wa_ctx
);
869 if (workload
->status
|| (vgpu
->resetting_eng
& ENGINE_MASK(ring_id
))) {
870 /* if workload->status is not successful means HW GPU
871 * has occurred GPU hang or something wrong with i915/GVT,
872 * and GVT won't inject context switch interrupt to guest.
873 * So this error is a vGPU hang actually to the guest.
874 * According to this we should emunlate a vGPU hang. If
875 * there are pending workloads which are already submitted
876 * from guest, we should clean them up like HW GPU does.
878 * if it is in middle of engine resetting, the pending
879 * workloads won't be submitted to HW GPU and will be
880 * cleaned up during the resetting process later, so doing
881 * the workload clean up here doesn't have any impact.
883 intel_vgpu_clean_workloads(vgpu
, ENGINE_MASK(ring_id
));
886 workload
->complete(workload
);
888 atomic_dec(&s
->running_workload_num
);
889 wake_up(&scheduler
->workload_complete_wq
);
891 if (gvt
->scheduler
.need_reschedule
)
892 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
894 mutex_unlock(&gvt
->sched_lock
);
895 mutex_unlock(&vgpu
->vgpu_lock
);
898 struct workload_thread_param
{
899 struct intel_gvt
*gvt
;
903 static int workload_thread(void *priv
)
905 struct workload_thread_param
*p
= (struct workload_thread_param
*)priv
;
906 struct intel_gvt
*gvt
= p
->gvt
;
907 int ring_id
= p
->ring_id
;
908 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
909 struct intel_vgpu_workload
*workload
= NULL
;
910 struct intel_vgpu
*vgpu
= NULL
;
912 bool need_force_wake
= IS_SKYLAKE(gvt
->dev_priv
)
913 || IS_KABYLAKE(gvt
->dev_priv
)
914 || IS_BROXTON(gvt
->dev_priv
);
915 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
919 gvt_dbg_core("workload thread for ring %d started\n", ring_id
);
921 while (!kthread_should_stop()) {
922 add_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
924 workload
= pick_next_workload(gvt
, ring_id
);
927 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
928 MAX_SCHEDULE_TIMEOUT
);
929 } while (!kthread_should_stop());
930 remove_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
935 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
936 workload
->ring_id
, workload
,
939 intel_runtime_pm_get(gvt
->dev_priv
);
941 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
942 workload
->ring_id
, workload
);
945 intel_uncore_forcewake_get(gvt
->dev_priv
,
948 ret
= dispatch_workload(workload
);
951 vgpu
= workload
->vgpu
;
952 gvt_vgpu_err("fail to dispatch workload, skip\n");
956 gvt_dbg_sched("ring id %d wait workload %p\n",
957 workload
->ring_id
, workload
);
958 i915_request_wait(workload
->req
, 0, MAX_SCHEDULE_TIMEOUT
);
961 gvt_dbg_sched("will complete workload %p, status: %d\n",
962 workload
, workload
->status
);
964 complete_current_workload(gvt
, ring_id
);
967 intel_uncore_forcewake_put(gvt
->dev_priv
,
970 intel_runtime_pm_put(gvt
->dev_priv
);
971 if (ret
&& (vgpu_is_vm_unhealthy(ret
)))
972 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
977 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
979 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
980 struct intel_gvt
*gvt
= vgpu
->gvt
;
981 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
983 if (atomic_read(&s
->running_workload_num
)) {
984 gvt_dbg_sched("wait vgpu idle\n");
986 wait_event(scheduler
->workload_complete_wq
,
987 !atomic_read(&s
->running_workload_num
));
991 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
993 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
994 struct intel_engine_cs
*engine
;
995 enum intel_engine_id i
;
997 gvt_dbg_core("clean workload scheduler\n");
999 for_each_engine(engine
, gvt
->dev_priv
, i
) {
1000 atomic_notifier_chain_unregister(
1001 &engine
->context_status_notifier
,
1002 &gvt
->shadow_ctx_notifier_block
[i
]);
1003 kthread_stop(scheduler
->thread
[i
]);
1007 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
1009 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
1010 struct workload_thread_param
*param
= NULL
;
1011 struct intel_engine_cs
*engine
;
1012 enum intel_engine_id i
;
1015 gvt_dbg_core("init workload scheduler\n");
1017 init_waitqueue_head(&scheduler
->workload_complete_wq
);
1019 for_each_engine(engine
, gvt
->dev_priv
, i
) {
1020 init_waitqueue_head(&scheduler
->waitq
[i
]);
1022 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
1031 scheduler
->thread
[i
] = kthread_run(workload_thread
, param
,
1032 "gvt workload %d", i
);
1033 if (IS_ERR(scheduler
->thread
[i
])) {
1034 gvt_err("fail to create workload thread\n");
1035 ret
= PTR_ERR(scheduler
->thread
[i
]);
1039 gvt
->shadow_ctx_notifier_block
[i
].notifier_call
=
1040 shadow_context_status_change
;
1041 atomic_notifier_chain_register(&engine
->context_status_notifier
,
1042 &gvt
->shadow_ctx_notifier_block
[i
]);
1046 intel_gvt_clean_workload_scheduler(gvt
);
1053 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1056 * This function is called when a vGPU is being destroyed.
1059 void intel_vgpu_clean_submission(struct intel_vgpu
*vgpu
)
1061 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1063 intel_vgpu_select_submission_ops(vgpu
, ALL_ENGINES
, 0);
1064 i915_gem_context_put(s
->shadow_ctx
);
1065 kmem_cache_destroy(s
->workloads
);
1070 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1072 * @engine_mask: engines expected to be reset
1074 * This function is called when a vGPU is being destroyed.
1077 void intel_vgpu_reset_submission(struct intel_vgpu
*vgpu
,
1078 unsigned long engine_mask
)
1080 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1085 intel_vgpu_clean_workloads(vgpu
, engine_mask
);
1086 s
->ops
->reset(vgpu
, engine_mask
);
1090 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1093 * This function is called when a vGPU is being created.
1096 * Zero on success, negative error code if failed.
1099 int intel_vgpu_setup_submission(struct intel_vgpu
*vgpu
)
1101 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1102 enum intel_engine_id i
;
1103 struct intel_engine_cs
*engine
;
1106 s
->shadow_ctx
= i915_gem_context_create_gvt(
1107 &vgpu
->gvt
->dev_priv
->drm
);
1108 if (IS_ERR(s
->shadow_ctx
))
1109 return PTR_ERR(s
->shadow_ctx
);
1111 bitmap_zero(s
->shadow_ctx_desc_updated
, I915_NUM_ENGINES
);
1113 s
->workloads
= kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1114 sizeof(struct intel_vgpu_workload
), 0,
1116 offsetof(struct intel_vgpu_workload
, rb_tail
),
1117 sizeof_field(struct intel_vgpu_workload
, rb_tail
),
1120 if (!s
->workloads
) {
1122 goto out_shadow_ctx
;
1125 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
)
1126 INIT_LIST_HEAD(&s
->workload_q_head
[i
]);
1128 atomic_set(&s
->running_workload_num
, 0);
1129 bitmap_zero(s
->tlb_handle_pending
, I915_NUM_ENGINES
);
1134 i915_gem_context_put(s
->shadow_ctx
);
1139 * intel_vgpu_select_submission_ops - select virtual submission interface
1141 * @interface: expected vGPU virtual submission interface
1143 * This function is called when guest configures submission interface.
1146 * Zero on success, negative error code if failed.
1149 int intel_vgpu_select_submission_ops(struct intel_vgpu
*vgpu
,
1150 unsigned long engine_mask
,
1151 unsigned int interface
)
1153 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1154 const struct intel_vgpu_submission_ops
*ops
[] = {
1155 [INTEL_VGPU_EXECLIST_SUBMISSION
] =
1156 &intel_vgpu_execlist_submission_ops
,
1160 if (WARN_ON(interface
>= ARRAY_SIZE(ops
)))
1163 if (WARN_ON(interface
== 0 && engine_mask
!= ALL_ENGINES
))
1167 s
->ops
->clean(vgpu
, engine_mask
);
1169 if (interface
== 0) {
1171 s
->virtual_submission_interface
= 0;
1173 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu
->id
);
1177 ret
= ops
[interface
]->init(vgpu
, engine_mask
);
1181 s
->ops
= ops
[interface
];
1182 s
->virtual_submission_interface
= interface
;
1185 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1186 vgpu
->id
, s
->ops
->name
);
1192 * intel_vgpu_destroy_workload - destroy a vGPU workload
1195 * This function is called when destroy a vGPU workload.
1198 void intel_vgpu_destroy_workload(struct intel_vgpu_workload
*workload
)
1200 struct intel_vgpu_submission
*s
= &workload
->vgpu
->submission
;
1202 if (workload
->shadow_mm
)
1203 intel_vgpu_mm_put(workload
->shadow_mm
);
1205 kmem_cache_free(s
->workloads
, workload
);
1208 static struct intel_vgpu_workload
*
1209 alloc_workload(struct intel_vgpu
*vgpu
)
1211 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1212 struct intel_vgpu_workload
*workload
;
1214 workload
= kmem_cache_zalloc(s
->workloads
, GFP_KERNEL
);
1216 return ERR_PTR(-ENOMEM
);
1218 INIT_LIST_HEAD(&workload
->list
);
1219 INIT_LIST_HEAD(&workload
->shadow_bb
);
1221 init_waitqueue_head(&workload
->shadow_ctx_status_wq
);
1222 atomic_set(&workload
->shadow_ctx_active
, 0);
1224 workload
->status
= -EINPROGRESS
;
1225 workload
->vgpu
= vgpu
;
1230 #define RING_CTX_OFF(x) \
1231 offsetof(struct execlist_ring_context, x)
1233 static void read_guest_pdps(struct intel_vgpu
*vgpu
,
1234 u64 ring_context_gpa
, u32 pdp
[8])
1239 gpa
= ring_context_gpa
+ RING_CTX_OFF(pdps
[0].val
);
1241 for (i
= 0; i
< 8; i
++)
1242 intel_gvt_hypervisor_read_gpa(vgpu
,
1243 gpa
+ i
* 8, &pdp
[7 - i
], 4);
1246 static int prepare_mm(struct intel_vgpu_workload
*workload
)
1248 struct execlist_ctx_descriptor_format
*desc
= &workload
->ctx_desc
;
1249 struct intel_vgpu_mm
*mm
;
1250 struct intel_vgpu
*vgpu
= workload
->vgpu
;
1251 intel_gvt_gtt_type_t root_entry_type
;
1252 u64 pdps
[GVT_RING_CTX_NR_PDPS
];
1254 switch (desc
->addressing_mode
) {
1255 case 1: /* legacy 32-bit */
1256 root_entry_type
= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
;
1258 case 3: /* legacy 64-bit */
1259 root_entry_type
= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
;
1262 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1266 read_guest_pdps(workload
->vgpu
, workload
->ring_context_gpa
, (void *)pdps
);
1268 mm
= intel_vgpu_get_ppgtt_mm(workload
->vgpu
, root_entry_type
, pdps
);
1272 workload
->shadow_mm
= mm
;
1276 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1277 ((a)->lrca == (b)->lrca))
1280 * intel_vgpu_create_workload - create a vGPU workload
1282 * @desc: a guest context descriptor
1284 * This function is called when creating a vGPU workload.
1287 * struct intel_vgpu_workload * on success, negative error code in
1288 * pointer if failed.
1291 struct intel_vgpu_workload
*
1292 intel_vgpu_create_workload(struct intel_vgpu
*vgpu
, int ring_id
,
1293 struct execlist_ctx_descriptor_format
*desc
)
1295 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
1296 struct list_head
*q
= workload_q_head(vgpu
, ring_id
);
1297 struct intel_vgpu_workload
*last_workload
= NULL
;
1298 struct intel_vgpu_workload
*workload
= NULL
;
1299 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
1300 u64 ring_context_gpa
;
1301 u32 head
, tail
, start
, ctl
, ctx_ctl
, per_ctx
, indirect_ctx
;
1304 ring_context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
1305 (u32
)((desc
->lrca
+ 1) << I915_GTT_PAGE_SHIFT
));
1306 if (ring_context_gpa
== INTEL_GVT_INVALID_ADDR
) {
1307 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc
->lrca
);
1308 return ERR_PTR(-EINVAL
);
1311 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1312 RING_CTX_OFF(ring_header
.val
), &head
, 4);
1314 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1315 RING_CTX_OFF(ring_tail
.val
), &tail
, 4);
1317 head
&= RB_HEAD_OFF_MASK
;
1318 tail
&= RB_TAIL_OFF_MASK
;
1320 list_for_each_entry_reverse(last_workload
, q
, list
) {
1322 if (same_context(&last_workload
->ctx_desc
, desc
)) {
1323 gvt_dbg_el("ring id %d cur workload == last\n",
1325 gvt_dbg_el("ctx head %x real head %lx\n", head
,
1326 last_workload
->rb_tail
);
1328 * cannot use guest context head pointer here,
1329 * as it might not be updated at this time
1331 head
= last_workload
->rb_tail
;
1336 gvt_dbg_el("ring id %d begin a new workload\n", ring_id
);
1338 /* record some ring buffer register values for scan and shadow */
1339 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1340 RING_CTX_OFF(rb_start
.val
), &start
, 4);
1341 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1342 RING_CTX_OFF(rb_ctrl
.val
), &ctl
, 4);
1343 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1344 RING_CTX_OFF(ctx_ctrl
.val
), &ctx_ctl
, 4);
1346 workload
= alloc_workload(vgpu
);
1347 if (IS_ERR(workload
))
1350 workload
->ring_id
= ring_id
;
1351 workload
->ctx_desc
= *desc
;
1352 workload
->ring_context_gpa
= ring_context_gpa
;
1353 workload
->rb_head
= head
;
1354 workload
->rb_tail
= tail
;
1355 workload
->rb_start
= start
;
1356 workload
->rb_ctl
= ctl
;
1358 if (ring_id
== RCS
) {
1359 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1360 RING_CTX_OFF(bb_per_ctx_ptr
.val
), &per_ctx
, 4);
1361 intel_gvt_hypervisor_read_gpa(vgpu
, ring_context_gpa
+
1362 RING_CTX_OFF(rcs_indirect_ctx
.val
), &indirect_ctx
, 4);
1364 workload
->wa_ctx
.indirect_ctx
.guest_gma
=
1365 indirect_ctx
& INDIRECT_CTX_ADDR_MASK
;
1366 workload
->wa_ctx
.indirect_ctx
.size
=
1367 (indirect_ctx
& INDIRECT_CTX_SIZE_MASK
) *
1369 workload
->wa_ctx
.per_ctx
.guest_gma
=
1370 per_ctx
& PER_CTX_ADDR_MASK
;
1371 workload
->wa_ctx
.per_ctx
.valid
= per_ctx
& 1;
1374 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1375 workload
, ring_id
, head
, tail
, start
, ctl
);
1377 ret
= prepare_mm(workload
);
1379 kmem_cache_free(s
->workloads
, workload
);
1380 return ERR_PTR(ret
);
1383 /* Only scan and shadow the first workload in the queue
1384 * as there is only one pre-allocated buf-obj for shadow.
1386 if (list_empty(workload_q_head(vgpu
, ring_id
))) {
1387 intel_runtime_pm_get(dev_priv
);
1388 mutex_lock(&dev_priv
->drm
.struct_mutex
);
1389 ret
= intel_gvt_scan_and_shadow_workload(workload
);
1390 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1391 intel_runtime_pm_put(dev_priv
);
1395 if (vgpu_is_vm_unhealthy(ret
))
1396 enter_failsafe_mode(vgpu
, GVT_FAILSAFE_GUEST_ERR
);
1397 intel_vgpu_destroy_workload(workload
);
1398 return ERR_PTR(ret
);
1405 * intel_vgpu_queue_workload - Qeue a vGPU workload
1406 * @workload: the workload to queue in
1408 void intel_vgpu_queue_workload(struct intel_vgpu_workload
*workload
)
1410 list_add_tail(&workload
->list
,
1411 workload_q_head(workload
->vgpu
, workload
->ring_id
));
1412 intel_gvt_kick_schedule(workload
->vgpu
->gvt
);
1413 wake_up(&workload
->vgpu
->gvt
->scheduler
.waitq
[workload
->ring_id
]);