2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #ifndef _GVT_SCHEDULER_H_
37 #define _GVT_SCHEDULER_H_
39 struct intel_gvt_workload_scheduler
{
40 struct intel_vgpu
*current_vgpu
;
41 struct intel_vgpu
*next_vgpu
;
42 struct intel_vgpu_workload
*current_workload
[I915_NUM_ENGINES
];
45 spinlock_t mmio_context_lock
;
46 /* can be null when owner is host */
47 struct intel_vgpu
*engine_owner
[I915_NUM_ENGINES
];
49 wait_queue_head_t workload_complete_wq
;
50 struct task_struct
*thread
[I915_NUM_ENGINES
];
51 wait_queue_head_t waitq
[I915_NUM_ENGINES
];
54 struct intel_gvt_sched_policy_ops
*sched_ops
;
57 #define INDIRECT_CTX_ADDR_MASK 0xffffffc0
58 #define INDIRECT_CTX_SIZE_MASK 0x3f
59 struct shadow_indirect_ctx
{
60 struct drm_i915_gem_object
*obj
;
61 unsigned long guest_gma
;
62 unsigned long shadow_gma
;
67 #define PER_CTX_ADDR_MASK 0xfffff000
68 struct shadow_per_ctx
{
69 unsigned long guest_gma
;
70 unsigned long shadow_gma
;
74 struct intel_shadow_wa_ctx
{
75 struct shadow_indirect_ctx indirect_ctx
;
76 struct shadow_per_ctx per_ctx
;
80 struct intel_vgpu_workload
{
81 struct intel_vgpu
*vgpu
;
83 struct i915_request
*req
;
84 /* if this workload has been dispatched to i915? */
86 bool shadow
; /* if workload has done shadow of guest request */
89 struct intel_vgpu_mm
*shadow_mm
;
91 /* different submission model may need different handler */
92 int (*prepare
)(struct intel_vgpu_workload
*);
93 int (*complete
)(struct intel_vgpu_workload
*);
94 struct list_head list
;
96 DECLARE_BITMAP(pending_events
, INTEL_GVT_EVENT_MAX
);
97 void *shadow_ring_buffer_va
;
99 /* execlist context information */
100 struct execlist_ctx_descriptor_format ctx_desc
;
101 struct execlist_ring_context
*ring_context
;
102 unsigned long rb_head
, rb_tail
, rb_ctl
, rb_start
, rb_len
;
103 unsigned long guest_rb_head
;
104 bool restore_inhibit
;
105 struct intel_vgpu_elsp_dwords elsp_dwords
;
106 bool emulate_schedule_in
;
107 atomic_t shadow_ctx_active
;
108 wait_queue_head_t shadow_ctx_status_wq
;
109 u64 ring_context_gpa
;
111 /* shadow batch buffer */
112 struct list_head shadow_bb
;
113 struct intel_shadow_wa_ctx wa_ctx
;
120 struct intel_vgpu_shadow_bb
{
121 struct list_head list
;
122 struct drm_i915_gem_object
*obj
;
123 struct i915_vma
*vma
;
125 u32
*bb_start_cmd_va
;
126 unsigned int clflush
;
128 unsigned long bb_offset
;
132 #define workload_q_head(vgpu, ring_id) \
133 (&(vgpu->submission.workload_q_head[ring_id]))
135 void intel_vgpu_queue_workload(struct intel_vgpu_workload
*workload
);
137 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
);
139 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
);
141 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
);
143 int intel_vgpu_setup_submission(struct intel_vgpu
*vgpu
);
145 void intel_vgpu_reset_submission(struct intel_vgpu
*vgpu
,
146 intel_engine_mask_t engine_mask
);
148 void intel_vgpu_clean_submission(struct intel_vgpu
*vgpu
);
150 int intel_vgpu_select_submission_ops(struct intel_vgpu
*vgpu
,
151 intel_engine_mask_t engine_mask
,
152 unsigned int interface
);
154 extern const struct intel_vgpu_submission_ops
155 intel_vgpu_execlist_submission_ops
;
157 struct intel_vgpu_workload
*
158 intel_vgpu_create_workload(struct intel_vgpu
*vgpu
, int ring_id
,
159 struct execlist_ctx_descriptor_format
*desc
);
161 void intel_vgpu_destroy_workload(struct intel_vgpu_workload
*workload
);
163 void intel_vgpu_clean_workloads(struct intel_vgpu
*vgpu
,
164 intel_engine_mask_t engine_mask
);