Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / mock_engine.c
blob22a73da45ad58b9bfae36cd823c6a934c4262c49
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include "mock_engine.h"
26 #include "mock_request.h"
28 struct mock_ring {
29 struct intel_ring base;
30 struct i915_timeline timeline;
33 static struct mock_request *first_request(struct mock_engine *engine)
35 return list_first_entry_or_null(&engine->hw_queue,
36 struct mock_request,
37 link);
40 static void advance(struct mock_engine *engine,
41 struct mock_request *request)
43 list_del_init(&request->link);
44 mock_seqno_advance(&engine->base, request->base.global_seqno);
47 static void hw_delay_complete(struct timer_list *t)
49 struct mock_engine *engine = from_timer(engine, t, hw_delay);
50 struct mock_request *request;
52 spin_lock(&engine->hw_lock);
54 /* Timer fired, first request is complete */
55 request = first_request(engine);
56 if (request)
57 advance(engine, request);
60 * Also immediately signal any subsequent 0-delay requests, but
61 * requeue the timer for the next delayed request.
63 while ((request = first_request(engine))) {
64 if (request->delay) {
65 mod_timer(&engine->hw_delay, jiffies + request->delay);
66 break;
69 advance(engine, request);
72 spin_unlock(&engine->hw_lock);
75 static void mock_context_unpin(struct intel_context *ce)
77 i915_gem_context_put(ce->gem_context);
80 static void mock_context_destroy(struct intel_context *ce)
82 GEM_BUG_ON(ce->pin_count);
85 static const struct intel_context_ops mock_context_ops = {
86 .unpin = mock_context_unpin,
87 .destroy = mock_context_destroy,
90 static struct intel_context *
91 mock_context_pin(struct intel_engine_cs *engine,
92 struct i915_gem_context *ctx)
94 struct intel_context *ce = to_intel_context(ctx, engine);
96 if (!ce->pin_count++) {
97 i915_gem_context_get(ctx);
98 ce->ring = engine->buffer;
99 ce->ops = &mock_context_ops;
102 return ce;
105 static int mock_request_alloc(struct i915_request *request)
107 struct mock_request *mock = container_of(request, typeof(*mock), base);
109 INIT_LIST_HEAD(&mock->link);
110 mock->delay = 0;
112 return 0;
115 static int mock_emit_flush(struct i915_request *request,
116 unsigned int flags)
118 return 0;
121 static void mock_emit_breadcrumb(struct i915_request *request,
122 u32 *flags)
126 static void mock_submit_request(struct i915_request *request)
128 struct mock_request *mock = container_of(request, typeof(*mock), base);
129 struct mock_engine *engine =
130 container_of(request->engine, typeof(*engine), base);
132 i915_request_submit(request);
133 GEM_BUG_ON(!request->global_seqno);
135 spin_lock_irq(&engine->hw_lock);
136 list_add_tail(&mock->link, &engine->hw_queue);
137 if (mock->link.prev == &engine->hw_queue) {
138 if (mock->delay)
139 mod_timer(&engine->hw_delay, jiffies + mock->delay);
140 else
141 advance(engine, mock);
143 spin_unlock_irq(&engine->hw_lock);
146 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
148 const unsigned long sz = PAGE_SIZE / 2;
149 struct mock_ring *ring;
151 BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
153 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
154 if (!ring)
155 return NULL;
157 i915_timeline_init(engine->i915, &ring->timeline, engine->name);
159 ring->base.size = sz;
160 ring->base.effective_size = sz;
161 ring->base.vaddr = (void *)(ring + 1);
162 ring->base.timeline = &ring->timeline;
164 INIT_LIST_HEAD(&ring->base.request_list);
165 intel_ring_update_space(&ring->base);
167 return &ring->base;
170 static void mock_ring_free(struct intel_ring *base)
172 struct mock_ring *ring = container_of(base, typeof(*ring), base);
174 i915_timeline_fini(&ring->timeline);
175 kfree(ring);
178 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
179 const char *name,
180 int id)
182 struct mock_engine *engine;
184 GEM_BUG_ON(id >= I915_NUM_ENGINES);
186 engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
187 if (!engine)
188 return NULL;
190 /* minimal engine setup for requests */
191 engine->base.i915 = i915;
192 snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
193 engine->base.id = id;
194 engine->base.status_page.page_addr = (void *)(engine + 1);
196 engine->base.context_pin = mock_context_pin;
197 engine->base.request_alloc = mock_request_alloc;
198 engine->base.emit_flush = mock_emit_flush;
199 engine->base.emit_breadcrumb = mock_emit_breadcrumb;
200 engine->base.submit_request = mock_submit_request;
202 i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
203 lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
205 intel_engine_init_breadcrumbs(&engine->base);
206 engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
208 /* fake hw queue */
209 spin_lock_init(&engine->hw_lock);
210 timer_setup(&engine->hw_delay, hw_delay_complete, 0);
211 INIT_LIST_HEAD(&engine->hw_queue);
213 engine->base.buffer = mock_ring(&engine->base);
214 if (!engine->base.buffer)
215 goto err_breadcrumbs;
217 if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
218 goto err_ring;
220 return &engine->base;
222 err_ring:
223 mock_ring_free(engine->base.buffer);
224 err_breadcrumbs:
225 intel_engine_fini_breadcrumbs(&engine->base);
226 i915_timeline_fini(&engine->base.timeline);
227 kfree(engine);
228 return NULL;
231 void mock_engine_flush(struct intel_engine_cs *engine)
233 struct mock_engine *mock =
234 container_of(engine, typeof(*mock), base);
235 struct mock_request *request, *rn;
237 del_timer_sync(&mock->hw_delay);
239 spin_lock_irq(&mock->hw_lock);
240 list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
241 list_del_init(&request->link);
242 mock_seqno_advance(&mock->base, request->base.global_seqno);
244 spin_unlock_irq(&mock->hw_lock);
247 void mock_engine_reset(struct intel_engine_cs *engine)
249 intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0);
252 void mock_engine_free(struct intel_engine_cs *engine)
254 struct mock_engine *mock =
255 container_of(engine, typeof(*mock), base);
256 struct intel_context *ce;
258 GEM_BUG_ON(timer_pending(&mock->hw_delay));
260 ce = fetch_and_zero(&engine->last_retired_context);
261 if (ce)
262 intel_context_unpin(ce);
264 __intel_context_unpin(engine->i915->kernel_context, engine);
266 mock_ring_free(engine->buffer);
268 intel_engine_fini_breadcrumbs(engine);
269 i915_timeline_fini(&engine->timeline);
271 kfree(engine);