drm/exynos: Stop using drm_framebuffer_unregister_private
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / intel_engine_cs.c
blob371acf109e343295ae060c4cf49908bf5607118e
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include "i915_drv.h"
26 #include "intel_ringbuffer.h"
27 #include "intel_lrc.h"
29 static const struct engine_info {
30 const char *name;
31 unsigned exec_id;
32 enum intel_engine_hw_id hw_id;
33 u32 mmio_base;
34 unsigned irq_shift;
35 int (*init_legacy)(struct intel_engine_cs *engine);
36 int (*init_execlists)(struct intel_engine_cs *engine);
37 } intel_engines[] = {
38 [RCS] = {
39 .name = "render ring",
40 .exec_id = I915_EXEC_RENDER,
41 .hw_id = RCS_HW,
42 .mmio_base = RENDER_RING_BASE,
43 .irq_shift = GEN8_RCS_IRQ_SHIFT,
44 .init_execlists = logical_render_ring_init,
45 .init_legacy = intel_init_render_ring_buffer,
47 [BCS] = {
48 .name = "blitter ring",
49 .exec_id = I915_EXEC_BLT,
50 .hw_id = BCS_HW,
51 .mmio_base = BLT_RING_BASE,
52 .irq_shift = GEN8_BCS_IRQ_SHIFT,
53 .init_execlists = logical_xcs_ring_init,
54 .init_legacy = intel_init_blt_ring_buffer,
56 [VCS] = {
57 .name = "bsd ring",
58 .exec_id = I915_EXEC_BSD,
59 .hw_id = VCS_HW,
60 .mmio_base = GEN6_BSD_RING_BASE,
61 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
62 .init_execlists = logical_xcs_ring_init,
63 .init_legacy = intel_init_bsd_ring_buffer,
65 [VCS2] = {
66 .name = "bsd2 ring",
67 .exec_id = I915_EXEC_BSD,
68 .hw_id = VCS2_HW,
69 .mmio_base = GEN8_BSD2_RING_BASE,
70 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
71 .init_execlists = logical_xcs_ring_init,
72 .init_legacy = intel_init_bsd2_ring_buffer,
74 [VECS] = {
75 .name = "video enhancement ring",
76 .exec_id = I915_EXEC_VEBOX,
77 .hw_id = VECS_HW,
78 .mmio_base = VEBOX_RING_BASE,
79 .irq_shift = GEN8_VECS_IRQ_SHIFT,
80 .init_execlists = logical_xcs_ring_init,
81 .init_legacy = intel_init_vebox_ring_buffer,
85 static int
86 intel_engine_setup(struct drm_i915_private *dev_priv,
87 enum intel_engine_id id)
89 const struct engine_info *info = &intel_engines[id];
90 struct intel_engine_cs *engine;
92 GEM_BUG_ON(dev_priv->engine[id]);
93 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
94 if (!engine)
95 return -ENOMEM;
97 engine->id = id;
98 engine->i915 = dev_priv;
99 engine->name = info->name;
100 engine->exec_id = info->exec_id;
101 engine->hw_id = engine->guc_id = info->hw_id;
102 engine->mmio_base = info->mmio_base;
103 engine->irq_shift = info->irq_shift;
105 /* Nothing to do here, execute in order of dependencies */
106 engine->schedule = NULL;
108 dev_priv->engine[id] = engine;
109 return 0;
113 * intel_engines_init() - allocate, populate and init the Engine Command Streamers
114 * @dev_priv: i915 device private
116 * Return: non-zero if the initialization failed.
118 int intel_engines_init(struct drm_i915_private *dev_priv)
120 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
121 unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
122 unsigned int mask = 0;
123 int (*init)(struct intel_engine_cs *engine);
124 struct intel_engine_cs *engine;
125 enum intel_engine_id id;
126 unsigned int i;
127 int ret;
129 WARN_ON(ring_mask == 0);
130 WARN_ON(ring_mask &
131 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
133 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
134 if (!HAS_ENGINE(dev_priv, i))
135 continue;
137 if (i915.enable_execlists)
138 init = intel_engines[i].init_execlists;
139 else
140 init = intel_engines[i].init_legacy;
142 if (!init)
143 continue;
145 ret = intel_engine_setup(dev_priv, i);
146 if (ret)
147 goto cleanup;
149 ret = init(dev_priv->engine[i]);
150 if (ret)
151 goto cleanup;
153 mask |= ENGINE_MASK(i);
157 * Catch failures to update intel_engines table when the new engines
158 * are added to the driver by a warning and disabling the forgotten
159 * engines.
161 if (WARN_ON(mask != ring_mask))
162 device_info->ring_mask = mask;
164 device_info->num_rings = hweight32(mask);
166 return 0;
168 cleanup:
169 for_each_engine(engine, dev_priv, id) {
170 if (i915.enable_execlists)
171 intel_logical_ring_cleanup(engine);
172 else
173 intel_engine_cleanup(engine);
176 return ret;
179 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
181 struct drm_i915_private *dev_priv = engine->i915;
183 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
184 * so long as the semaphore value in the register/page is greater
185 * than the sync value), so whenever we reset the seqno,
186 * so long as we reset the tracking semaphore value to 0, it will
187 * always be before the next request's seqno. If we don't reset
188 * the semaphore value, then when the seqno moves backwards all
189 * future waits will complete instantly (causing rendering corruption).
191 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
192 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
193 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
194 if (HAS_VEBOX(dev_priv))
195 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
197 if (dev_priv->semaphore) {
198 struct page *page = i915_vma_first_page(dev_priv->semaphore);
199 void *semaphores;
201 /* Semaphores are in noncoherent memory, flush to be safe */
202 semaphores = kmap(page);
203 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
204 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
205 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
206 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
207 kunmap(page);
210 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
211 if (engine->irq_seqno_barrier)
212 engine->irq_seqno_barrier(engine);
214 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
215 engine->timeline->last_submitted_seqno = seqno;
217 engine->hangcheck.seqno = seqno;
219 /* After manually advancing the seqno, fake the interrupt in case
220 * there are any waiters for that seqno.
222 intel_engine_wakeup(engine);
225 static void intel_engine_init_timeline(struct intel_engine_cs *engine)
227 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
231 * intel_engines_setup_common - setup engine state not requiring hw access
232 * @engine: Engine to setup.
234 * Initializes @engine@ structure members shared between legacy and execlists
235 * submission modes which do not require hardware access.
237 * Typically done early in the submission mode specific engine setup stage.
239 void intel_engine_setup_common(struct intel_engine_cs *engine)
241 engine->execlist_queue = RB_ROOT;
242 engine->execlist_first = NULL;
244 intel_engine_init_timeline(engine);
245 intel_engine_init_hangcheck(engine);
246 i915_gem_batch_pool_init(engine, &engine->batch_pool);
248 intel_engine_init_cmd_parser(engine);
251 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
253 struct drm_i915_gem_object *obj;
254 struct i915_vma *vma;
255 int ret;
257 WARN_ON(engine->scratch);
259 obj = i915_gem_object_create_stolen(engine->i915, size);
260 if (!obj)
261 obj = i915_gem_object_create_internal(engine->i915, size);
262 if (IS_ERR(obj)) {
263 DRM_ERROR("Failed to allocate scratch page\n");
264 return PTR_ERR(obj);
267 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
268 if (IS_ERR(vma)) {
269 ret = PTR_ERR(vma);
270 goto err_unref;
273 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
274 if (ret)
275 goto err_unref;
277 engine->scratch = vma;
278 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
279 engine->name, i915_ggtt_offset(vma));
280 return 0;
282 err_unref:
283 i915_gem_object_put(obj);
284 return ret;
287 static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
289 i915_vma_unpin_and_release(&engine->scratch);
293 * intel_engines_init_common - initialize cengine state which might require hw access
294 * @engine: Engine to initialize.
296 * Initializes @engine@ structure members shared between legacy and execlists
297 * submission modes which do require hardware access.
299 * Typcally done at later stages of submission mode specific engine setup.
301 * Returns zero on success or an error code on failure.
303 int intel_engine_init_common(struct intel_engine_cs *engine)
305 int ret;
307 /* We may need to do things with the shrinker which
308 * require us to immediately switch back to the default
309 * context. This can cause a problem as pinning the
310 * default context also requires GTT space which may not
311 * be available. To avoid this we always pin the default
312 * context.
314 ret = engine->context_pin(engine, engine->i915->kernel_context);
315 if (ret)
316 return ret;
318 ret = intel_engine_init_breadcrumbs(engine);
319 if (ret)
320 goto err_unpin;
322 ret = i915_gem_render_state_init(engine);
323 if (ret)
324 goto err_unpin;
326 return 0;
328 err_unpin:
329 engine->context_unpin(engine, engine->i915->kernel_context);
330 return ret;
334 * intel_engines_cleanup_common - cleans up the engine state created by
335 * the common initiailizers.
336 * @engine: Engine to cleanup.
338 * This cleans up everything created by the common helpers.
340 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
342 intel_engine_cleanup_scratch(engine);
344 i915_gem_render_state_fini(engine);
345 intel_engine_fini_breadcrumbs(engine);
346 intel_engine_cleanup_cmd_parser(engine);
347 i915_gem_batch_pool_fini(&engine->batch_pool);
349 engine->context_unpin(engine, engine->i915->kernel_context);
352 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
354 struct drm_i915_private *dev_priv = engine->i915;
355 u64 acthd;
357 if (INTEL_GEN(dev_priv) >= 8)
358 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
359 RING_ACTHD_UDW(engine->mmio_base));
360 else if (INTEL_GEN(dev_priv) >= 4)
361 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
362 else
363 acthd = I915_READ(ACTHD);
365 return acthd;
368 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
370 struct drm_i915_private *dev_priv = engine->i915;
371 u64 bbaddr;
373 if (INTEL_GEN(dev_priv) >= 8)
374 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
375 RING_BBADDR_UDW(engine->mmio_base));
376 else
377 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
379 return bbaddr;
382 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
384 switch (type) {
385 case I915_CACHE_NONE: return " uncached";
386 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
387 case I915_CACHE_L3_LLC: return " L3+LLC";
388 case I915_CACHE_WT: return " WT";
389 default: return "";
393 static inline uint32_t
394 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
395 int subslice, i915_reg_t reg)
397 uint32_t mcr;
398 uint32_t ret;
399 enum forcewake_domains fw_domains;
401 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
402 FW_REG_READ);
403 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
404 GEN8_MCR_SELECTOR,
405 FW_REG_READ | FW_REG_WRITE);
407 spin_lock_irq(&dev_priv->uncore.lock);
408 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
410 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
412 * The HW expects the slice and sublice selectors to be reset to 0
413 * after reading out the registers.
415 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
416 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
417 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
418 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
420 ret = I915_READ_FW(reg);
422 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
423 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
425 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
426 spin_unlock_irq(&dev_priv->uncore.lock);
428 return ret;
431 /* NB: please notice the memset */
432 void intel_engine_get_instdone(struct intel_engine_cs *engine,
433 struct intel_instdone *instdone)
435 struct drm_i915_private *dev_priv = engine->i915;
436 u32 mmio_base = engine->mmio_base;
437 int slice;
438 int subslice;
440 memset(instdone, 0, sizeof(*instdone));
442 switch (INTEL_GEN(dev_priv)) {
443 default:
444 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
446 if (engine->id != RCS)
447 break;
449 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
450 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
451 instdone->sampler[slice][subslice] =
452 read_subslice_reg(dev_priv, slice, subslice,
453 GEN7_SAMPLER_INSTDONE);
454 instdone->row[slice][subslice] =
455 read_subslice_reg(dev_priv, slice, subslice,
456 GEN7_ROW_INSTDONE);
458 break;
459 case 7:
460 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
462 if (engine->id != RCS)
463 break;
465 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
466 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
467 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
469 break;
470 case 6:
471 case 5:
472 case 4:
473 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
475 if (engine->id == RCS)
476 /* HACK: Using the wrong struct member */
477 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
478 break;
479 case 3:
480 case 2:
481 instdone->instdone = I915_READ(GEN2_INSTDONE);
482 break;