2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
28 #include "i915_vgpu.h"
29 #include "intel_ringbuffer.h"
30 #include "intel_lrc.h"
32 /* Haswell does have the CXT_SIZE register however it does not appear to be
33 * valid. Now, docs explain in dwords what is in the context object. The full
34 * size is 70720 bytes, however, the power context and execlist context will
35 * never be saved (power context is stored elsewhere, and execlists don't work
36 * on HSW) - so the final size, including the extra state required for the
37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
39 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
41 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
42 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
43 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
45 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
47 struct engine_class_info
{
49 int (*init_legacy
)(struct intel_engine_cs
*engine
);
50 int (*init_execlists
)(struct intel_engine_cs
*engine
);
55 static const struct engine_class_info intel_engine_classes
[] = {
58 .init_execlists
= logical_render_ring_init
,
59 .init_legacy
= intel_init_render_ring_buffer
,
60 .uabi_class
= I915_ENGINE_CLASS_RENDER
,
62 [COPY_ENGINE_CLASS
] = {
64 .init_execlists
= logical_xcs_ring_init
,
65 .init_legacy
= intel_init_blt_ring_buffer
,
66 .uabi_class
= I915_ENGINE_CLASS_COPY
,
68 [VIDEO_DECODE_CLASS
] = {
70 .init_execlists
= logical_xcs_ring_init
,
71 .init_legacy
= intel_init_bsd_ring_buffer
,
72 .uabi_class
= I915_ENGINE_CLASS_VIDEO
,
74 [VIDEO_ENHANCEMENT_CLASS
] = {
76 .init_execlists
= logical_xcs_ring_init
,
77 .init_legacy
= intel_init_vebox_ring_buffer
,
78 .uabi_class
= I915_ENGINE_CLASS_VIDEO_ENHANCE
,
91 static const struct engine_info intel_engines
[] = {
94 .uabi_id
= I915_EXEC_RENDER
,
95 .class = RENDER_CLASS
,
97 .mmio_base
= RENDER_RING_BASE
,
98 .irq_shift
= GEN8_RCS_IRQ_SHIFT
,
102 .uabi_id
= I915_EXEC_BLT
,
103 .class = COPY_ENGINE_CLASS
,
105 .mmio_base
= BLT_RING_BASE
,
106 .irq_shift
= GEN8_BCS_IRQ_SHIFT
,
110 .uabi_id
= I915_EXEC_BSD
,
111 .class = VIDEO_DECODE_CLASS
,
113 .mmio_base
= GEN6_BSD_RING_BASE
,
114 .irq_shift
= GEN8_VCS1_IRQ_SHIFT
,
118 .uabi_id
= I915_EXEC_BSD
,
119 .class = VIDEO_DECODE_CLASS
,
121 .mmio_base
= GEN8_BSD2_RING_BASE
,
122 .irq_shift
= GEN8_VCS2_IRQ_SHIFT
,
126 .uabi_id
= I915_EXEC_VEBOX
,
127 .class = VIDEO_ENHANCEMENT_CLASS
,
129 .mmio_base
= VEBOX_RING_BASE
,
130 .irq_shift
= GEN8_VECS_IRQ_SHIFT
,
135 * ___intel_engine_context_size() - return the size of the context for an engine
136 * @dev_priv: i915 device private
137 * @class: engine class
139 * Each engine class may require a different amount of space for a context
142 * Return: size (in bytes) of an engine class specific context image
144 * Note: this size includes the HWSP, which is part of the context image
145 * in LRC mode, but does not include the "shared data page" used with
146 * GuC submission. The caller should account for this if using the GuC.
149 __intel_engine_context_size(struct drm_i915_private
*dev_priv
, u8
class)
153 BUILD_BUG_ON(I915_GTT_PAGE_SIZE
!= PAGE_SIZE
);
157 switch (INTEL_GEN(dev_priv
)) {
159 MISSING_CASE(INTEL_GEN(dev_priv
));
161 return GEN10_LR_CONTEXT_RENDER_SIZE
;
163 return GEN9_LR_CONTEXT_RENDER_SIZE
;
165 return GEN8_LR_CONTEXT_RENDER_SIZE
;
167 if (IS_HASWELL(dev_priv
))
168 return HSW_CXT_TOTAL_SIZE
;
170 cxt_size
= I915_READ(GEN7_CXT_SIZE
);
171 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size
) * 64,
174 cxt_size
= I915_READ(CXT_SIZE
);
175 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size
) * 64,
181 /* For the special day when i810 gets merged. */
188 case VIDEO_DECODE_CLASS
:
189 case VIDEO_ENHANCEMENT_CLASS
:
190 case COPY_ENGINE_CLASS
:
191 if (INTEL_GEN(dev_priv
) < 8)
193 return GEN8_LR_CONTEXT_OTHER_SIZE
;
198 intel_engine_setup(struct drm_i915_private
*dev_priv
,
199 enum intel_engine_id id
)
201 const struct engine_info
*info
= &intel_engines
[id
];
202 const struct engine_class_info
*class_info
;
203 struct intel_engine_cs
*engine
;
205 GEM_BUG_ON(info
->class >= ARRAY_SIZE(intel_engine_classes
));
206 class_info
= &intel_engine_classes
[info
->class];
208 if (GEM_WARN_ON(info
->class > MAX_ENGINE_CLASS
))
211 if (GEM_WARN_ON(info
->instance
> MAX_ENGINE_INSTANCE
))
214 if (GEM_WARN_ON(dev_priv
->engine_class
[info
->class][info
->instance
]))
217 GEM_BUG_ON(dev_priv
->engine
[id
]);
218 engine
= kzalloc(sizeof(*engine
), GFP_KERNEL
);
223 engine
->i915
= dev_priv
;
224 WARN_ON(snprintf(engine
->name
, sizeof(engine
->name
), "%s%u",
225 class_info
->name
, info
->instance
) >=
226 sizeof(engine
->name
));
227 engine
->hw_id
= engine
->guc_id
= info
->hw_id
;
228 engine
->mmio_base
= info
->mmio_base
;
229 engine
->irq_shift
= info
->irq_shift
;
230 engine
->class = info
->class;
231 engine
->instance
= info
->instance
;
233 engine
->uabi_id
= info
->uabi_id
;
234 engine
->uabi_class
= class_info
->uabi_class
;
236 engine
->context_size
= __intel_engine_context_size(dev_priv
,
238 if (WARN_ON(engine
->context_size
> BIT(20)))
239 engine
->context_size
= 0;
241 /* Nothing to do here, execute in order of dependencies */
242 engine
->schedule
= NULL
;
244 spin_lock_init(&engine
->stats
.lock
);
246 ATOMIC_INIT_NOTIFIER_HEAD(&engine
->context_status_notifier
);
248 dev_priv
->engine_class
[info
->class][info
->instance
] = engine
;
249 dev_priv
->engine
[id
] = engine
;
254 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
255 * @dev_priv: i915 device private
257 * Return: non-zero if the initialization failed.
259 int intel_engines_init_mmio(struct drm_i915_private
*dev_priv
)
261 struct intel_device_info
*device_info
= mkwrite_device_info(dev_priv
);
262 const unsigned int ring_mask
= INTEL_INFO(dev_priv
)->ring_mask
;
263 struct intel_engine_cs
*engine
;
264 enum intel_engine_id id
;
265 unsigned int mask
= 0;
269 WARN_ON(ring_mask
== 0);
271 GENMASK(sizeof(mask
) * BITS_PER_BYTE
- 1, I915_NUM_ENGINES
));
273 for (i
= 0; i
< ARRAY_SIZE(intel_engines
); i
++) {
274 if (!HAS_ENGINE(dev_priv
, i
))
277 err
= intel_engine_setup(dev_priv
, i
);
281 mask
|= ENGINE_MASK(i
);
285 * Catch failures to update intel_engines table when the new engines
286 * are added to the driver by a warning and disabling the forgotten
289 if (WARN_ON(mask
!= ring_mask
))
290 device_info
->ring_mask
= mask
;
292 /* We always presume we have at least RCS available for later probing */
293 if (WARN_ON(!HAS_ENGINE(dev_priv
, RCS
))) {
298 device_info
->num_rings
= hweight32(mask
);
300 i915_check_and_clear_faults(dev_priv
);
305 for_each_engine(engine
, dev_priv
, id
)
311 * intel_engines_init() - init the Engine Command Streamers
312 * @dev_priv: i915 device private
314 * Return: non-zero if the initialization failed.
316 int intel_engines_init(struct drm_i915_private
*dev_priv
)
318 struct intel_engine_cs
*engine
;
319 enum intel_engine_id id
, err_id
;
322 for_each_engine(engine
, dev_priv
, id
) {
323 const struct engine_class_info
*class_info
=
324 &intel_engine_classes
[engine
->class];
325 int (*init
)(struct intel_engine_cs
*engine
);
327 if (HAS_EXECLISTS(dev_priv
))
328 init
= class_info
->init_execlists
;
330 init
= class_info
->init_legacy
;
335 if (GEM_WARN_ON(!init
))
342 GEM_BUG_ON(!engine
->submit_request
);
348 for_each_engine(engine
, dev_priv
, id
) {
351 dev_priv
->engine
[id
] = NULL
;
353 dev_priv
->gt
.cleanup_engine(engine
);
359 void intel_engine_init_global_seqno(struct intel_engine_cs
*engine
, u32 seqno
)
361 struct drm_i915_private
*dev_priv
= engine
->i915
;
363 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
364 * so long as the semaphore value in the register/page is greater
365 * than the sync value), so whenever we reset the seqno,
366 * so long as we reset the tracking semaphore value to 0, it will
367 * always be before the next request's seqno. If we don't reset
368 * the semaphore value, then when the seqno moves backwards all
369 * future waits will complete instantly (causing rendering corruption).
371 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)) {
372 I915_WRITE(RING_SYNC_0(engine
->mmio_base
), 0);
373 I915_WRITE(RING_SYNC_1(engine
->mmio_base
), 0);
374 if (HAS_VEBOX(dev_priv
))
375 I915_WRITE(RING_SYNC_2(engine
->mmio_base
), 0);
378 intel_write_status_page(engine
, I915_GEM_HWS_INDEX
, seqno
);
379 clear_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
381 /* After manually advancing the seqno, fake the interrupt in case
382 * there are any waiters for that seqno.
384 intel_engine_wakeup(engine
);
386 GEM_BUG_ON(intel_engine_get_seqno(engine
) != seqno
);
389 static void intel_engine_init_timeline(struct intel_engine_cs
*engine
)
391 engine
->timeline
= &engine
->i915
->gt
.global_timeline
.engine
[engine
->id
];
394 static bool csb_force_mmio(struct drm_i915_private
*i915
)
397 * IOMMU adds unpredictable latency causing the CSB write (from the
398 * GPU into the HWSP) to only be visible some time after the interrupt
399 * (missed breadcrumb syndrome).
401 if (intel_vtd_active())
404 /* Older GVT emulation depends upon intercepting CSB mmio */
405 if (intel_vgpu_active(i915
) && !intel_vgpu_has_hwsp_emulation(i915
))
411 static void intel_engine_init_execlist(struct intel_engine_cs
*engine
)
413 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
415 execlists
->csb_use_mmio
= csb_force_mmio(engine
->i915
);
417 execlists
->port_mask
= 1;
418 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists
));
419 GEM_BUG_ON(execlists_num_ports(execlists
) > EXECLIST_MAX_PORTS
);
421 execlists
->queue
= RB_ROOT
;
422 execlists
->first
= NULL
;
426 * intel_engines_setup_common - setup engine state not requiring hw access
427 * @engine: Engine to setup.
429 * Initializes @engine@ structure members shared between legacy and execlists
430 * submission modes which do not require hardware access.
432 * Typically done early in the submission mode specific engine setup stage.
434 void intel_engine_setup_common(struct intel_engine_cs
*engine
)
436 intel_engine_init_execlist(engine
);
438 intel_engine_init_timeline(engine
);
439 intel_engine_init_hangcheck(engine
);
440 i915_gem_batch_pool_init(engine
, &engine
->batch_pool
);
442 intel_engine_init_cmd_parser(engine
);
445 int intel_engine_create_scratch(struct intel_engine_cs
*engine
, int size
)
447 struct drm_i915_gem_object
*obj
;
448 struct i915_vma
*vma
;
451 WARN_ON(engine
->scratch
);
453 obj
= i915_gem_object_create_stolen(engine
->i915
, size
);
455 obj
= i915_gem_object_create_internal(engine
->i915
, size
);
457 DRM_ERROR("Failed to allocate scratch page\n");
461 vma
= i915_vma_instance(obj
, &engine
->i915
->ggtt
.base
, NULL
);
467 ret
= i915_vma_pin(vma
, 0, 4096, PIN_GLOBAL
| PIN_HIGH
);
471 engine
->scratch
= vma
;
472 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
473 engine
->name
, i915_ggtt_offset(vma
));
477 i915_gem_object_put(obj
);
481 static void intel_engine_cleanup_scratch(struct intel_engine_cs
*engine
)
483 i915_vma_unpin_and_release(&engine
->scratch
);
486 static void cleanup_phys_status_page(struct intel_engine_cs
*engine
)
488 struct drm_i915_private
*dev_priv
= engine
->i915
;
490 if (!dev_priv
->status_page_dmah
)
493 drm_pci_free(&dev_priv
->drm
, dev_priv
->status_page_dmah
);
494 engine
->status_page
.page_addr
= NULL
;
497 static void cleanup_status_page(struct intel_engine_cs
*engine
)
499 struct i915_vma
*vma
;
500 struct drm_i915_gem_object
*obj
;
502 vma
= fetch_and_zero(&engine
->status_page
.vma
);
511 i915_gem_object_unpin_map(obj
);
512 __i915_gem_object_release_unless_active(obj
);
515 static int init_status_page(struct intel_engine_cs
*engine
)
517 struct drm_i915_gem_object
*obj
;
518 struct i915_vma
*vma
;
523 obj
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
525 DRM_ERROR("Failed to allocate status page\n");
529 ret
= i915_gem_object_set_cache_level(obj
, I915_CACHE_LLC
);
533 vma
= i915_vma_instance(obj
, &engine
->i915
->ggtt
.base
, NULL
);
540 if (!HAS_LLC(engine
->i915
))
541 /* On g33, we cannot place HWS above 256MiB, so
542 * restrict its pinning to the low mappable arena.
543 * Though this restriction is not documented for
544 * gen4, gen5, or byt, they also behave similarly
545 * and hang if the HWS is placed at the top of the
546 * GTT. To generalise, it appears that all !llc
547 * platforms have issues with us placing the HWS
548 * above the mappable region (even though we never
551 flags
|= PIN_MAPPABLE
;
554 ret
= i915_vma_pin(vma
, 0, 4096, flags
);
558 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
560 ret
= PTR_ERR(vaddr
);
564 engine
->status_page
.vma
= vma
;
565 engine
->status_page
.ggtt_offset
= i915_ggtt_offset(vma
);
566 engine
->status_page
.page_addr
= memset(vaddr
, 0, PAGE_SIZE
);
568 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
569 engine
->name
, i915_ggtt_offset(vma
));
575 i915_gem_object_put(obj
);
579 static int init_phys_status_page(struct intel_engine_cs
*engine
)
581 struct drm_i915_private
*dev_priv
= engine
->i915
;
583 GEM_BUG_ON(engine
->id
!= RCS
);
585 dev_priv
->status_page_dmah
=
586 drm_pci_alloc(&dev_priv
->drm
, PAGE_SIZE
, PAGE_SIZE
);
587 if (!dev_priv
->status_page_dmah
)
590 engine
->status_page
.page_addr
= dev_priv
->status_page_dmah
->vaddr
;
591 memset(engine
->status_page
.page_addr
, 0, PAGE_SIZE
);
597 * intel_engines_init_common - initialize cengine state which might require hw access
598 * @engine: Engine to initialize.
600 * Initializes @engine@ structure members shared between legacy and execlists
601 * submission modes which do require hardware access.
603 * Typcally done at later stages of submission mode specific engine setup.
605 * Returns zero on success or an error code on failure.
607 int intel_engine_init_common(struct intel_engine_cs
*engine
)
609 struct intel_ring
*ring
;
612 engine
->set_default_submission(engine
);
614 /* We may need to do things with the shrinker which
615 * require us to immediately switch back to the default
616 * context. This can cause a problem as pinning the
617 * default context also requires GTT space which may not
618 * be available. To avoid this we always pin the default
621 ring
= engine
->context_pin(engine
, engine
->i915
->kernel_context
);
623 return PTR_ERR(ring
);
626 * Similarly the preempt context must always be available so that
627 * we can interrupt the engine at any time.
629 if (HAS_LOGICAL_RING_PREEMPTION(engine
->i915
)) {
630 ring
= engine
->context_pin(engine
,
631 engine
->i915
->preempt_context
);
634 goto err_unpin_kernel
;
638 ret
= intel_engine_init_breadcrumbs(engine
);
640 goto err_unpin_preempt
;
642 if (HWS_NEEDS_PHYSICAL(engine
->i915
))
643 ret
= init_phys_status_page(engine
);
645 ret
= init_status_page(engine
);
647 goto err_breadcrumbs
;
652 intel_engine_fini_breadcrumbs(engine
);
654 if (HAS_LOGICAL_RING_PREEMPTION(engine
->i915
))
655 engine
->context_unpin(engine
, engine
->i915
->preempt_context
);
657 engine
->context_unpin(engine
, engine
->i915
->kernel_context
);
662 * intel_engines_cleanup_common - cleans up the engine state created by
663 * the common initiailizers.
664 * @engine: Engine to cleanup.
666 * This cleans up everything created by the common helpers.
668 void intel_engine_cleanup_common(struct intel_engine_cs
*engine
)
670 intel_engine_cleanup_scratch(engine
);
672 if (HWS_NEEDS_PHYSICAL(engine
->i915
))
673 cleanup_phys_status_page(engine
);
675 cleanup_status_page(engine
);
677 intel_engine_fini_breadcrumbs(engine
);
678 intel_engine_cleanup_cmd_parser(engine
);
679 i915_gem_batch_pool_fini(&engine
->batch_pool
);
681 if (engine
->default_state
)
682 i915_gem_object_put(engine
->default_state
);
684 if (HAS_LOGICAL_RING_PREEMPTION(engine
->i915
))
685 engine
->context_unpin(engine
, engine
->i915
->preempt_context
);
686 engine
->context_unpin(engine
, engine
->i915
->kernel_context
);
689 u64
intel_engine_get_active_head(struct intel_engine_cs
*engine
)
691 struct drm_i915_private
*dev_priv
= engine
->i915
;
694 if (INTEL_GEN(dev_priv
) >= 8)
695 acthd
= I915_READ64_2x32(RING_ACTHD(engine
->mmio_base
),
696 RING_ACTHD_UDW(engine
->mmio_base
));
697 else if (INTEL_GEN(dev_priv
) >= 4)
698 acthd
= I915_READ(RING_ACTHD(engine
->mmio_base
));
700 acthd
= I915_READ(ACTHD
);
705 u64
intel_engine_get_last_batch_head(struct intel_engine_cs
*engine
)
707 struct drm_i915_private
*dev_priv
= engine
->i915
;
710 if (INTEL_GEN(dev_priv
) >= 8)
711 bbaddr
= I915_READ64_2x32(RING_BBADDR(engine
->mmio_base
),
712 RING_BBADDR_UDW(engine
->mmio_base
));
714 bbaddr
= I915_READ(RING_BBADDR(engine
->mmio_base
));
719 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
722 case I915_CACHE_NONE
: return " uncached";
723 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
724 case I915_CACHE_L3_LLC
: return " L3+LLC";
725 case I915_CACHE_WT
: return " WT";
730 static inline uint32_t
731 read_subslice_reg(struct drm_i915_private
*dev_priv
, int slice
,
732 int subslice
, i915_reg_t reg
)
736 enum forcewake_domains fw_domains
;
738 fw_domains
= intel_uncore_forcewake_for_reg(dev_priv
, reg
,
740 fw_domains
|= intel_uncore_forcewake_for_reg(dev_priv
,
742 FW_REG_READ
| FW_REG_WRITE
);
744 spin_lock_irq(&dev_priv
->uncore
.lock
);
745 intel_uncore_forcewake_get__locked(dev_priv
, fw_domains
);
747 mcr
= I915_READ_FW(GEN8_MCR_SELECTOR
);
749 * The HW expects the slice and sublice selectors to be reset to 0
750 * after reading out the registers.
752 WARN_ON_ONCE(mcr
& (GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
));
753 mcr
&= ~(GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
);
754 mcr
|= GEN8_MCR_SLICE(slice
) | GEN8_MCR_SUBSLICE(subslice
);
755 I915_WRITE_FW(GEN8_MCR_SELECTOR
, mcr
);
757 ret
= I915_READ_FW(reg
);
759 mcr
&= ~(GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
);
760 I915_WRITE_FW(GEN8_MCR_SELECTOR
, mcr
);
762 intel_uncore_forcewake_put__locked(dev_priv
, fw_domains
);
763 spin_unlock_irq(&dev_priv
->uncore
.lock
);
768 /* NB: please notice the memset */
769 void intel_engine_get_instdone(struct intel_engine_cs
*engine
,
770 struct intel_instdone
*instdone
)
772 struct drm_i915_private
*dev_priv
= engine
->i915
;
773 u32 mmio_base
= engine
->mmio_base
;
777 memset(instdone
, 0, sizeof(*instdone
));
779 switch (INTEL_GEN(dev_priv
)) {
781 instdone
->instdone
= I915_READ(RING_INSTDONE(mmio_base
));
783 if (engine
->id
!= RCS
)
786 instdone
->slice_common
= I915_READ(GEN7_SC_INSTDONE
);
787 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
) {
788 instdone
->sampler
[slice
][subslice
] =
789 read_subslice_reg(dev_priv
, slice
, subslice
,
790 GEN7_SAMPLER_INSTDONE
);
791 instdone
->row
[slice
][subslice
] =
792 read_subslice_reg(dev_priv
, slice
, subslice
,
797 instdone
->instdone
= I915_READ(RING_INSTDONE(mmio_base
));
799 if (engine
->id
!= RCS
)
802 instdone
->slice_common
= I915_READ(GEN7_SC_INSTDONE
);
803 instdone
->sampler
[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE
);
804 instdone
->row
[0][0] = I915_READ(GEN7_ROW_INSTDONE
);
810 instdone
->instdone
= I915_READ(RING_INSTDONE(mmio_base
));
812 if (engine
->id
== RCS
)
813 /* HACK: Using the wrong struct member */
814 instdone
->slice_common
= I915_READ(GEN4_INSTDONE1
);
818 instdone
->instdone
= I915_READ(GEN2_INSTDONE
);
823 static int wa_add(struct drm_i915_private
*dev_priv
,
825 const u32 mask
, const u32 val
)
827 const u32 idx
= dev_priv
->workarounds
.count
;
829 if (WARN_ON(idx
>= I915_MAX_WA_REGS
))
832 dev_priv
->workarounds
.reg
[idx
].addr
= addr
;
833 dev_priv
->workarounds
.reg
[idx
].value
= val
;
834 dev_priv
->workarounds
.reg
[idx
].mask
= mask
;
836 dev_priv
->workarounds
.count
++;
841 #define WA_REG(addr, mask, val) do { \
842 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
847 #define WA_SET_BIT_MASKED(addr, mask) \
848 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
850 #define WA_CLR_BIT_MASKED(addr, mask) \
851 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
853 #define WA_SET_FIELD_MASKED(addr, mask, value) \
854 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
856 static int wa_ring_whitelist_reg(struct intel_engine_cs
*engine
,
859 struct drm_i915_private
*dev_priv
= engine
->i915
;
860 struct i915_workarounds
*wa
= &dev_priv
->workarounds
;
861 const uint32_t index
= wa
->hw_whitelist_count
[engine
->id
];
863 if (WARN_ON(index
>= RING_MAX_NONPRIV_SLOTS
))
866 I915_WRITE(RING_FORCE_TO_NONPRIV(engine
->mmio_base
, index
),
867 i915_mmio_reg_offset(reg
));
868 wa
->hw_whitelist_count
[engine
->id
]++;
873 static int gen8_init_workarounds(struct intel_engine_cs
*engine
)
875 struct drm_i915_private
*dev_priv
= engine
->i915
;
877 WA_SET_BIT_MASKED(INSTPM
, INSTPM_FORCE_ORDERING
);
879 /* WaDisableAsyncFlipPerfMode:bdw,chv */
880 WA_SET_BIT_MASKED(MI_MODE
, ASYNC_FLIP_PERF_DISABLE
);
882 /* WaDisablePartialInstShootdown:bdw,chv */
883 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
,
884 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
);
886 /* Use Force Non-Coherent whenever executing a 3D context. This is a
887 * workaround for for a possible hang in the unlikely event a TLB
888 * invalidation occurs during a PSD flush.
890 /* WaForceEnableNonCoherent:bdw,chv */
891 /* WaHdcDisableFetchWhenMasked:bdw,chv */
892 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
893 HDC_DONOT_FETCH_MEM_WHEN_MASKED
|
894 HDC_FORCE_NON_COHERENT
);
896 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
897 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
898 * polygons in the same 8x4 pixel/sample area to be processed without
899 * stalling waiting for the earlier ones to write to Hierarchical Z
902 * This optimization is off by default for BDW and CHV; turn it on.
904 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7
, HIZ_RAW_STALL_OPT_DISABLE
);
906 /* Wa4x4STCOptimizationDisable:bdw,chv */
907 WA_SET_BIT_MASKED(CACHE_MODE_1
, GEN8_4x4_STC_OPTIMIZATION_DISABLE
);
910 * BSpec recommends 8x4 when MSAA is used,
911 * however in practice 16x4 seems fastest.
913 * Note that PS/WM thread counts depend on the WIZ hashing
914 * disable bit, which we don't touch here, but it's good
915 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
917 WA_SET_FIELD_MASKED(GEN7_GT_MODE
,
918 GEN6_WIZ_HASHING_MASK
,
919 GEN6_WIZ_HASHING_16x4
);
924 static int bdw_init_workarounds(struct intel_engine_cs
*engine
)
926 struct drm_i915_private
*dev_priv
= engine
->i915
;
929 ret
= gen8_init_workarounds(engine
);
933 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
934 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
, STALL_DOP_GATING_DISABLE
);
936 /* WaDisableDopClockGating:bdw
938 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
939 * to disable EUTC clock gating.
941 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2
,
942 DOP_CLOCK_GATING_DISABLE
);
944 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
,
945 GEN8_SAMPLER_POWER_BYPASS_DIS
);
947 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
948 /* WaForceContextSaveRestoreNonCoherent:bdw */
949 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT
|
950 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
951 (IS_BDW_GT3(dev_priv
) ? HDC_FENCE_DEST_SLM_DISABLE
: 0));
956 static int chv_init_workarounds(struct intel_engine_cs
*engine
)
958 struct drm_i915_private
*dev_priv
= engine
->i915
;
961 ret
= gen8_init_workarounds(engine
);
965 /* WaDisableThreadStallDopClockGating:chv */
966 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
, STALL_DOP_GATING_DISABLE
);
968 /* Improve HiZ throughput on CHV. */
969 WA_SET_BIT_MASKED(HIZ_CHICKEN
, CHV_HZ_8X8_MODE_IN_1X
);
974 static int gen9_init_workarounds(struct intel_engine_cs
*engine
)
976 struct drm_i915_private
*dev_priv
= engine
->i915
;
979 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
980 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS
, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE
));
982 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
983 I915_WRITE(BDW_SCRATCH1
, I915_READ(BDW_SCRATCH1
) |
984 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE
);
986 /* WaDisableKillLogic:bxt,skl,kbl */
987 if (!IS_COFFEELAKE(dev_priv
))
988 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) |
991 if (HAS_LLC(dev_priv
)) {
992 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
994 * Must match Display Engine. See
995 * WaCompressedResourceDisplayNewHashMode.
997 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
998 GEN9_PBE_COMPRESSED_HASH_SELECTION
);
999 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7
,
1000 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR
);
1002 I915_WRITE(MMCD_MISC_CTRL
,
1003 I915_READ(MMCD_MISC_CTRL
) |
1008 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
1009 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
1010 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
,
1011 FLOW_CONTROL_ENABLE
|
1012 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
);
1014 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
1015 if (!IS_COFFEELAKE(dev_priv
))
1016 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
,
1017 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC
);
1019 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
1020 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
1021 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7
,
1022 GEN9_ENABLE_YV12_BUGFIX
|
1023 GEN9_ENABLE_GPGPU_PREEMPTION
);
1025 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
1026 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
1027 WA_SET_BIT_MASKED(CACHE_MODE_1
, (GEN8_4x4_STC_OPTIMIZATION_DISABLE
|
1028 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE
));
1030 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
1031 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5
,
1032 GEN9_CCS_TLB_PREFETCH_ENABLE
);
1034 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
1035 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
1036 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT
|
1037 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE
);
1039 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
1040 * both tied to WaForceContextSaveRestoreNonCoherent
1041 * in some hsds for skl. We keep the tie for all gen9. The
1042 * documentation is a bit hazy and so we want to get common behaviour,
1043 * even though there is no clear evidence we would need both on kbl/bxt.
1044 * This area has been source of system hangs so we play it safe
1045 * and mimic the skl regardless of what bspec says.
1047 * Use Force Non-Coherent whenever executing a 3D context. This
1048 * is a workaround for a possible hang in the unlikely event
1049 * a TLB invalidation occurs during a PSD flush.
1052 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
1053 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
1054 HDC_FORCE_NON_COHERENT
);
1056 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1057 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) |
1058 BDW_DISABLE_HDC_INVALIDATION
);
1060 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
1061 if (IS_SKYLAKE(dev_priv
) ||
1062 IS_KABYLAKE(dev_priv
) ||
1063 IS_COFFEELAKE(dev_priv
))
1064 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
,
1065 GEN8_SAMPLER_POWER_BYPASS_DIS
);
1067 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
1068 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2
, GEN8_ST_PO_DISABLE
);
1070 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1071 if (IS_GEN9_LP(dev_priv
)) {
1072 u32 val
= I915_READ(GEN8_L3SQCREG1
);
1074 val
&= ~L3_PRIO_CREDITS_MASK
;
1075 val
|= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1076 I915_WRITE(GEN8_L3SQCREG1
, val
);
1079 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1080 I915_WRITE(GEN8_L3SQCREG4
, (I915_READ(GEN8_L3SQCREG4
) |
1081 GEN8_LQSC_FLUSH_COHERENT_LINES
));
1084 * Supporting preemption with fine-granularity requires changes in the
1085 * batch buffer programming. Since we can't break old userspace, we
1086 * need to set our default preemption level to safe value. Userspace is
1087 * still able to use more fine-grained preemption levels, since in
1088 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
1089 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
1090 * not real HW workarounds, but merely a way to start using preemption
1091 * while maintaining old contract with userspace.
1094 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
1095 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1
, GEN9_PREEMPT_3D_OBJECT_LEVEL
);
1097 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
1098 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1
, GEN9_PREEMPT_GPGPU_LEVEL_MASK
,
1099 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL
);
1101 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1102 ret
= wa_ring_whitelist_reg(engine
, GEN9_CTX_PREEMPT_REG
);
1106 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1107 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1
,
1108 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL
));
1109 ret
= wa_ring_whitelist_reg(engine
, GEN8_CS_CHICKEN1
);
1113 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1114 ret
= wa_ring_whitelist_reg(engine
, GEN8_HDC_CHICKEN1
);
1121 static int skl_tune_iz_hashing(struct intel_engine_cs
*engine
)
1123 struct drm_i915_private
*dev_priv
= engine
->i915
;
1124 u8 vals
[3] = { 0, 0, 0 };
1127 for (i
= 0; i
< 3; i
++) {
1131 * Only consider slices where one, and only one, subslice has 7
1134 if (!is_power_of_2(INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[i
]))
1138 * subslice_7eu[i] != 0 (because of the check above) and
1139 * ss_max == 4 (maximum number of subslices possible per slice)
1143 ss
= ffs(INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[i
]) - 1;
1147 if (vals
[0] == 0 && vals
[1] == 0 && vals
[2] == 0)
1150 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1151 WA_SET_FIELD_MASKED(GEN7_GT_MODE
,
1152 GEN9_IZ_HASHING_MASK(2) |
1153 GEN9_IZ_HASHING_MASK(1) |
1154 GEN9_IZ_HASHING_MASK(0),
1155 GEN9_IZ_HASHING(2, vals
[2]) |
1156 GEN9_IZ_HASHING(1, vals
[1]) |
1157 GEN9_IZ_HASHING(0, vals
[0]));
1162 static int skl_init_workarounds(struct intel_engine_cs
*engine
)
1164 struct drm_i915_private
*dev_priv
= engine
->i915
;
1167 ret
= gen9_init_workarounds(engine
);
1171 /* WaEnableGapsTsvCreditFix:skl */
1172 I915_WRITE(GEN8_GARBCNTL
, (I915_READ(GEN8_GARBCNTL
) |
1173 GEN9_GAPS_TSV_CREDIT_DISABLE
));
1175 /* WaDisableGafsUnitClkGating:skl */
1176 I915_WRITE(GEN7_UCGCTL4
, (I915_READ(GEN7_UCGCTL4
) |
1177 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE
));
1179 /* WaInPlaceDecompressionHang:skl */
1180 if (IS_SKL_REVID(dev_priv
, SKL_REVID_H0
, REVID_FOREVER
))
1181 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA
,
1182 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA
) |
1183 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
));
1185 /* WaDisableLSQCROPERFforOCL:skl */
1186 ret
= wa_ring_whitelist_reg(engine
, GEN8_L3SQCREG4
);
1190 return skl_tune_iz_hashing(engine
);
1193 static int bxt_init_workarounds(struct intel_engine_cs
*engine
)
1195 struct drm_i915_private
*dev_priv
= engine
->i915
;
1198 ret
= gen9_init_workarounds(engine
);
1202 /* WaDisableThreadStallDopClockGating:bxt */
1203 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
,
1204 STALL_DOP_GATING_DISABLE
);
1206 /* WaDisablePooledEuLoadBalancingFix:bxt */
1207 I915_WRITE(FF_SLICE_CS_CHICKEN2
,
1208 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE
));
1210 /* WaToEnableHwFixForPushConstHWBug:bxt */
1211 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1212 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1214 /* WaInPlaceDecompressionHang:bxt */
1215 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA
,
1216 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA
) |
1217 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
));
1222 static int cnl_init_workarounds(struct intel_engine_cs
*engine
)
1224 struct drm_i915_private
*dev_priv
= engine
->i915
;
1227 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
1228 if (IS_CNL_REVID(dev_priv
, CNL_REVID_B0
, CNL_REVID_B0
))
1229 I915_WRITE(GAMT_CHKN_BIT_REG
,
1230 (I915_READ(GAMT_CHKN_BIT_REG
) |
1231 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT
));
1233 /* WaForceContextSaveRestoreNonCoherent:cnl */
1234 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0
,
1235 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT
);
1237 /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
1238 if (IS_CNL_REVID(dev_priv
, CNL_REVID_B0
, CNL_REVID_B0
))
1239 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
, THROTTLE_12_5
);
1241 /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
1242 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1243 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1245 /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
1246 if (IS_CNL_REVID(dev_priv
, 0, CNL_REVID_B0
))
1247 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1248 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE
);
1250 /* WaInPlaceDecompressionHang:cnl */
1251 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA
,
1252 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA
) |
1253 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
));
1255 /* WaPushConstantDereferenceHoldDisable:cnl */
1256 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2
, PUSH_CONSTANT_DEREF_DISABLE
);
1258 /* FtrEnableFastAnisoL1BankingFix: cnl */
1259 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
, CNL_FAST_ANISO_L1_BANKING_FIX
);
1261 /* WaDisable3DMidCmdPreemption:cnl */
1262 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1
, GEN9_PREEMPT_3D_OBJECT_LEVEL
);
1264 /* WaDisableGPGPUMidCmdPreemption:cnl */
1265 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1
, GEN9_PREEMPT_GPGPU_LEVEL_MASK
,
1266 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL
);
1268 /* WaEnablePreemptionGranularityControlByUMD:cnl */
1269 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1
,
1270 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL
));
1271 ret
= wa_ring_whitelist_reg(engine
, GEN8_CS_CHICKEN1
);
1275 /* WaDisableEarlyEOT:cnl */
1276 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
, DISABLE_EARLY_EOT
);
1281 static int kbl_init_workarounds(struct intel_engine_cs
*engine
)
1283 struct drm_i915_private
*dev_priv
= engine
->i915
;
1286 ret
= gen9_init_workarounds(engine
);
1290 /* WaEnableGapsTsvCreditFix:kbl */
1291 I915_WRITE(GEN8_GARBCNTL
, (I915_READ(GEN8_GARBCNTL
) |
1292 GEN9_GAPS_TSV_CREDIT_DISABLE
));
1294 /* WaDisableDynamicCreditSharing:kbl */
1295 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
1296 I915_WRITE(GAMT_CHKN_BIT_REG
,
1297 (I915_READ(GAMT_CHKN_BIT_REG
) |
1298 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING
));
1300 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1301 if (IS_KBL_REVID(dev_priv
, KBL_REVID_A0
, KBL_REVID_A0
))
1302 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
1303 HDC_FENCE_DEST_SLM_DISABLE
);
1305 /* WaToEnableHwFixForPushConstHWBug:kbl */
1306 if (IS_KBL_REVID(dev_priv
, KBL_REVID_C0
, REVID_FOREVER
))
1307 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1308 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1310 /* WaDisableGafsUnitClkGating:kbl */
1311 I915_WRITE(GEN7_UCGCTL4
, (I915_READ(GEN7_UCGCTL4
) |
1312 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE
));
1314 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1316 GEN7_HALF_SLICE_CHICKEN1
,
1317 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE
);
1319 /* WaInPlaceDecompressionHang:kbl */
1320 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA
,
1321 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA
) |
1322 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
));
1324 /* WaDisableLSQCROPERFforOCL:kbl */
1325 ret
= wa_ring_whitelist_reg(engine
, GEN8_L3SQCREG4
);
1332 static int glk_init_workarounds(struct intel_engine_cs
*engine
)
1334 struct drm_i915_private
*dev_priv
= engine
->i915
;
1337 ret
= gen9_init_workarounds(engine
);
1341 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1342 ret
= wa_ring_whitelist_reg(engine
, GEN9_SLICE_COMMON_ECO_CHICKEN1
);
1346 /* WaToEnableHwFixForPushConstHWBug:glk */
1347 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1348 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1353 static int cfl_init_workarounds(struct intel_engine_cs
*engine
)
1355 struct drm_i915_private
*dev_priv
= engine
->i915
;
1358 ret
= gen9_init_workarounds(engine
);
1362 /* WaEnableGapsTsvCreditFix:cfl */
1363 I915_WRITE(GEN8_GARBCNTL
, (I915_READ(GEN8_GARBCNTL
) |
1364 GEN9_GAPS_TSV_CREDIT_DISABLE
));
1366 /* WaToEnableHwFixForPushConstHWBug:cfl */
1367 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1368 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1370 /* WaDisableGafsUnitClkGating:cfl */
1371 I915_WRITE(GEN7_UCGCTL4
, (I915_READ(GEN7_UCGCTL4
) |
1372 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE
));
1374 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1376 GEN7_HALF_SLICE_CHICKEN1
,
1377 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE
);
1379 /* WaInPlaceDecompressionHang:cfl */
1380 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA
,
1381 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA
) |
1382 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
));
1387 int init_workarounds_ring(struct intel_engine_cs
*engine
)
1389 struct drm_i915_private
*dev_priv
= engine
->i915
;
1392 WARN_ON(engine
->id
!= RCS
);
1394 dev_priv
->workarounds
.count
= 0;
1395 dev_priv
->workarounds
.hw_whitelist_count
[engine
->id
] = 0;
1397 if (IS_BROADWELL(dev_priv
))
1398 err
= bdw_init_workarounds(engine
);
1399 else if (IS_CHERRYVIEW(dev_priv
))
1400 err
= chv_init_workarounds(engine
);
1401 else if (IS_SKYLAKE(dev_priv
))
1402 err
= skl_init_workarounds(engine
);
1403 else if (IS_BROXTON(dev_priv
))
1404 err
= bxt_init_workarounds(engine
);
1405 else if (IS_KABYLAKE(dev_priv
))
1406 err
= kbl_init_workarounds(engine
);
1407 else if (IS_GEMINILAKE(dev_priv
))
1408 err
= glk_init_workarounds(engine
);
1409 else if (IS_COFFEELAKE(dev_priv
))
1410 err
= cfl_init_workarounds(engine
);
1411 else if (IS_CANNONLAKE(dev_priv
))
1412 err
= cnl_init_workarounds(engine
);
1418 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1419 engine
->name
, dev_priv
->workarounds
.count
);
1423 int intel_ring_workarounds_emit(struct drm_i915_gem_request
*req
)
1425 struct i915_workarounds
*w
= &req
->i915
->workarounds
;
1432 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
1436 cs
= intel_ring_begin(req
, (w
->count
* 2 + 2));
1440 *cs
++ = MI_LOAD_REGISTER_IMM(w
->count
);
1441 for (i
= 0; i
< w
->count
; i
++) {
1442 *cs
++ = i915_mmio_reg_offset(w
->reg
[i
].addr
);
1443 *cs
++ = w
->reg
[i
].value
;
1447 intel_ring_advance(req
, cs
);
1449 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
1456 static bool ring_is_idle(struct intel_engine_cs
*engine
)
1458 struct drm_i915_private
*dev_priv
= engine
->i915
;
1461 /* If the whole device is asleep, the engine must be idle */
1462 if (!intel_runtime_pm_get_if_in_use(dev_priv
))
1465 /* First check that no commands are left in the ring */
1466 if ((I915_READ_HEAD(engine
) & HEAD_ADDR
) !=
1467 (I915_READ_TAIL(engine
) & TAIL_ADDR
))
1470 /* No bit for gen2, so assume the CS parser is idle */
1471 if (INTEL_GEN(dev_priv
) > 2 && !(I915_READ_MODE(engine
) & MODE_IDLE
))
1474 intel_runtime_pm_put(dev_priv
);
1480 * intel_engine_is_idle() - Report if the engine has finished process all work
1481 * @engine: the intel_engine_cs
1483 * Return true if there are no requests pending, nothing left to be submitted
1484 * to hardware, and that the engine is idle.
1486 bool intel_engine_is_idle(struct intel_engine_cs
*engine
)
1488 struct drm_i915_private
*dev_priv
= engine
->i915
;
1490 /* More white lies, if wedged, hw state is inconsistent */
1491 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
1494 /* Any inflight/incomplete requests? */
1495 if (!i915_seqno_passed(intel_engine_get_seqno(engine
),
1496 intel_engine_last_submit(engine
)))
1499 if (I915_SELFTEST_ONLY(engine
->breadcrumbs
.mock
))
1502 /* Interrupt/tasklet pending? */
1503 if (test_bit(ENGINE_IRQ_EXECLIST
, &engine
->irq_posted
))
1506 /* Waiting to drain ELSP? */
1507 if (READ_ONCE(engine
->execlists
.active
))
1510 /* ELSP is empty, but there are ready requests? */
1511 if (READ_ONCE(engine
->execlists
.first
))
1515 if (!ring_is_idle(engine
))
1521 bool intel_engines_are_idle(struct drm_i915_private
*dev_priv
)
1523 struct intel_engine_cs
*engine
;
1524 enum intel_engine_id id
;
1527 * If the driver is wedged, HW state may be very inconsistent and
1528 * report that it is still busy, even though we have stopped using it.
1530 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
1533 for_each_engine(engine
, dev_priv
, id
) {
1534 if (!intel_engine_is_idle(engine
))
1542 * intel_engine_has_kernel_context:
1543 * @engine: the engine
1545 * Returns true if the last context to be executed on this engine, or has been
1546 * executed if the engine is already idle, is the kernel context
1547 * (#i915.kernel_context).
1549 bool intel_engine_has_kernel_context(const struct intel_engine_cs
*engine
)
1551 const struct i915_gem_context
* const kernel_context
=
1552 engine
->i915
->kernel_context
;
1553 struct drm_i915_gem_request
*rq
;
1555 lockdep_assert_held(&engine
->i915
->drm
.struct_mutex
);
1558 * Check the last context seen by the engine. If active, it will be
1559 * the last request that remains in the timeline. When idle, it is
1560 * the last executed context as tracked by retirement.
1562 rq
= __i915_gem_active_peek(&engine
->timeline
->last_request
);
1564 return rq
->ctx
== kernel_context
;
1566 return engine
->last_retired_context
== kernel_context
;
1569 void intel_engines_reset_default_submission(struct drm_i915_private
*i915
)
1571 struct intel_engine_cs
*engine
;
1572 enum intel_engine_id id
;
1574 for_each_engine(engine
, i915
, id
)
1575 engine
->set_default_submission(engine
);
1579 * intel_engines_park: called when the GT is transitioning from busy->idle
1580 * @i915: the i915 device
1582 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1583 * Time for us to tidy and put away our toys (release resources back to the
1586 void intel_engines_park(struct drm_i915_private
*i915
)
1588 struct intel_engine_cs
*engine
;
1589 enum intel_engine_id id
;
1591 for_each_engine(engine
, i915
, id
) {
1592 /* Flush the residual irq tasklets first. */
1593 intel_engine_disarm_breadcrumbs(engine
);
1594 tasklet_kill(&engine
->execlists
.tasklet
);
1597 * We are committed now to parking the engines, make sure there
1598 * will be no more interrupts arriving later and the engines
1601 if (wait_for(intel_engine_is_idle(engine
), 10)) {
1602 struct drm_printer p
= drm_debug_printer(__func__
);
1604 dev_err(i915
->drm
.dev
,
1605 "%s is not idle before parking\n",
1607 intel_engine_dump(engine
, &p
, NULL
);
1611 engine
->park(engine
);
1613 i915_gem_batch_pool_fini(&engine
->batch_pool
);
1614 engine
->execlists
.no_priolist
= false;
1619 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1620 * @i915: the i915 device
1622 * The GT was idle and now about to fire up with some new user requests.
1624 void intel_engines_unpark(struct drm_i915_private
*i915
)
1626 struct intel_engine_cs
*engine
;
1627 enum intel_engine_id id
;
1629 for_each_engine(engine
, i915
, id
) {
1631 engine
->unpark(engine
);
1635 bool intel_engine_can_store_dword(struct intel_engine_cs
*engine
)
1637 switch (INTEL_GEN(engine
->i915
)) {
1639 return false; /* uses physical not virtual addresses */
1641 /* maybe only uses physical not virtual addresses */
1642 return !(IS_I915G(engine
->i915
) || IS_I915GM(engine
->i915
));
1644 return engine
->class != VIDEO_DECODE_CLASS
; /* b0rked */
1650 unsigned int intel_engines_has_context_isolation(struct drm_i915_private
*i915
)
1652 struct intel_engine_cs
*engine
;
1653 enum intel_engine_id id
;
1657 for_each_engine(engine
, i915
, id
)
1658 if (engine
->default_state
)
1659 which
|= BIT(engine
->uabi_class
);
1664 static void print_request(struct drm_printer
*m
,
1665 struct drm_i915_gem_request
*rq
,
1668 drm_printf(m
, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix
,
1670 i915_gem_request_completed(rq
) ? "!" : "",
1671 rq
->ctx
->hw_id
, rq
->fence
.seqno
,
1672 rq
->priotree
.priority
,
1673 jiffies_to_msecs(jiffies
- rq
->emitted_jiffies
),
1674 rq
->timeline
->common
->name
);
1677 static void hexdump(struct drm_printer
*m
, const void *buf
, size_t len
)
1679 const size_t rowsize
= 8 * sizeof(u32
);
1680 const void *prev
= NULL
;
1684 for (pos
= 0; pos
< len
; pos
+= rowsize
) {
1687 if (prev
&& !memcmp(prev
, buf
+ pos
, rowsize
)) {
1689 drm_printf(m
, "*\n");
1695 WARN_ON_ONCE(hex_dump_to_buffer(buf
+ pos
, len
- pos
,
1696 rowsize
, sizeof(u32
),
1698 false) >= sizeof(line
));
1699 drm_printf(m
, "%08zx %s\n", pos
, line
);
1706 void intel_engine_dump(struct intel_engine_cs
*engine
,
1707 struct drm_printer
*m
,
1708 const char *header
, ...)
1710 struct intel_breadcrumbs
* const b
= &engine
->breadcrumbs
;
1711 const struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
1712 struct i915_gpu_error
* const error
= &engine
->i915
->gpu_error
;
1713 struct drm_i915_private
*dev_priv
= engine
->i915
;
1714 struct drm_i915_gem_request
*rq
;
1722 va_start(ap
, header
);
1723 drm_vprintf(m
, header
, &ap
);
1727 if (i915_terminally_wedged(&engine
->i915
->gpu_error
))
1728 drm_printf(m
, "*** WEDGED ***\n");
1730 drm_printf(m
, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1731 intel_engine_get_seqno(engine
),
1732 intel_engine_last_submit(engine
),
1733 engine
->hangcheck
.seqno
,
1734 jiffies_to_msecs(jiffies
- engine
->hangcheck
.action_timestamp
),
1735 engine
->timeline
->inflight_seqnos
);
1736 drm_printf(m
, "\tReset count: %d (global %d)\n",
1737 i915_reset_engine_count(error
, engine
),
1738 i915_reset_count(error
));
1742 drm_printf(m
, "\tRequests:\n");
1744 rq
= list_first_entry(&engine
->timeline
->requests
,
1745 struct drm_i915_gem_request
, link
);
1746 if (&rq
->link
!= &engine
->timeline
->requests
)
1747 print_request(m
, rq
, "\t\tfirst ");
1749 rq
= list_last_entry(&engine
->timeline
->requests
,
1750 struct drm_i915_gem_request
, link
);
1751 if (&rq
->link
!= &engine
->timeline
->requests
)
1752 print_request(m
, rq
, "\t\tlast ");
1754 rq
= i915_gem_find_active_request(engine
);
1756 print_request(m
, rq
, "\t\tactive ");
1758 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1759 rq
->head
, rq
->postfix
, rq
->tail
,
1760 rq
->batch
? upper_32_bits(rq
->batch
->node
.start
) : ~0u,
1761 rq
->batch
? lower_32_bits(rq
->batch
->node
.start
) : ~0u);
1764 drm_printf(m
, "\tRING_START: 0x%08x [0x%08x]\n",
1765 I915_READ(RING_START(engine
->mmio_base
)),
1766 rq
? i915_ggtt_offset(rq
->ring
->vma
) : 0);
1767 drm_printf(m
, "\tRING_HEAD: 0x%08x [0x%08x]\n",
1768 I915_READ(RING_HEAD(engine
->mmio_base
)) & HEAD_ADDR
,
1769 rq
? rq
->ring
->head
: 0);
1770 drm_printf(m
, "\tRING_TAIL: 0x%08x [0x%08x]\n",
1771 I915_READ(RING_TAIL(engine
->mmio_base
)) & TAIL_ADDR
,
1772 rq
? rq
->ring
->tail
: 0);
1773 drm_printf(m
, "\tRING_CTL: 0x%08x%s\n",
1774 I915_READ(RING_CTL(engine
->mmio_base
)),
1775 I915_READ(RING_CTL(engine
->mmio_base
)) & (RING_WAIT
| RING_WAIT_SEMAPHORE
) ? " [waiting]" : "");
1776 if (INTEL_GEN(engine
->i915
) > 2) {
1777 drm_printf(m
, "\tRING_MODE: 0x%08x%s\n",
1778 I915_READ(RING_MI_MODE(engine
->mmio_base
)),
1779 I915_READ(RING_MI_MODE(engine
->mmio_base
)) & (MODE_IDLE
) ? " [idle]" : "");
1781 if (HAS_LEGACY_SEMAPHORES(dev_priv
)) {
1782 drm_printf(m
, "\tSYNC_0: 0x%08x\n",
1783 I915_READ(RING_SYNC_0(engine
->mmio_base
)));
1784 drm_printf(m
, "\tSYNC_1: 0x%08x\n",
1785 I915_READ(RING_SYNC_1(engine
->mmio_base
)));
1786 if (HAS_VEBOX(dev_priv
))
1787 drm_printf(m
, "\tSYNC_2: 0x%08x\n",
1788 I915_READ(RING_SYNC_2(engine
->mmio_base
)));
1793 addr
= intel_engine_get_active_head(engine
);
1794 drm_printf(m
, "\tACTHD: 0x%08x_%08x\n",
1795 upper_32_bits(addr
), lower_32_bits(addr
));
1796 addr
= intel_engine_get_last_batch_head(engine
);
1797 drm_printf(m
, "\tBBADDR: 0x%08x_%08x\n",
1798 upper_32_bits(addr
), lower_32_bits(addr
));
1799 if (INTEL_GEN(dev_priv
) >= 8)
1800 addr
= I915_READ64_2x32(RING_DMA_FADD(engine
->mmio_base
),
1801 RING_DMA_FADD_UDW(engine
->mmio_base
));
1802 else if (INTEL_GEN(dev_priv
) >= 4)
1803 addr
= I915_READ(RING_DMA_FADD(engine
->mmio_base
));
1805 addr
= I915_READ(DMA_FADD_I8XX
);
1806 drm_printf(m
, "\tDMA_FADDR: 0x%08x_%08x\n",
1807 upper_32_bits(addr
), lower_32_bits(addr
));
1808 if (INTEL_GEN(dev_priv
) >= 4) {
1809 drm_printf(m
, "\tIPEIR: 0x%08x\n",
1810 I915_READ(RING_IPEIR(engine
->mmio_base
)));
1811 drm_printf(m
, "\tIPEHR: 0x%08x\n",
1812 I915_READ(RING_IPEHR(engine
->mmio_base
)));
1814 drm_printf(m
, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR
));
1815 drm_printf(m
, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR
));
1818 if (HAS_EXECLISTS(dev_priv
)) {
1819 const u32
*hws
= &engine
->status_page
.page_addr
[I915_HWS_CSB_BUF0_INDEX
];
1820 u32 ptr
, read
, write
;
1823 drm_printf(m
, "\tExeclist status: 0x%08x %08x\n",
1824 I915_READ(RING_EXECLIST_STATUS_LO(engine
)),
1825 I915_READ(RING_EXECLIST_STATUS_HI(engine
)));
1827 ptr
= I915_READ(RING_CONTEXT_STATUS_PTR(engine
));
1828 read
= GEN8_CSB_READ_PTR(ptr
);
1829 write
= GEN8_CSB_WRITE_PTR(ptr
);
1830 drm_printf(m
, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
1831 read
, execlists
->csb_head
,
1833 intel_read_status_page(engine
, intel_hws_csb_write_index(engine
->i915
)),
1834 yesno(test_bit(ENGINE_IRQ_EXECLIST
,
1835 &engine
->irq_posted
)));
1836 if (read
>= GEN8_CSB_ENTRIES
)
1838 if (write
>= GEN8_CSB_ENTRIES
)
1841 write
+= GEN8_CSB_ENTRIES
;
1842 while (read
< write
) {
1843 idx
= ++read
% GEN8_CSB_ENTRIES
;
1844 drm_printf(m
, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
1846 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine
, idx
)),
1848 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine
, idx
)),
1853 for (idx
= 0; idx
< execlists_num_ports(execlists
); idx
++) {
1856 rq
= port_unpack(&execlists
->port
[idx
], &count
);
1858 snprintf(hdr
, sizeof(hdr
),
1859 "\t\tELSP[%d] count=%d, rq: ",
1861 print_request(m
, rq
, hdr
);
1863 drm_printf(m
, "\t\tELSP[%d] idle\n", idx
);
1866 drm_printf(m
, "\t\tHW active? 0x%x\n", execlists
->active
);
1868 } else if (INTEL_GEN(dev_priv
) > 6) {
1869 drm_printf(m
, "\tPP_DIR_BASE: 0x%08x\n",
1870 I915_READ(RING_PP_DIR_BASE(engine
)));
1871 drm_printf(m
, "\tPP_DIR_BASE_READ: 0x%08x\n",
1872 I915_READ(RING_PP_DIR_BASE_READ(engine
)));
1873 drm_printf(m
, "\tPP_DIR_DCLV: 0x%08x\n",
1874 I915_READ(RING_PP_DIR_DCLV(engine
)));
1877 spin_lock_irq(&engine
->timeline
->lock
);
1878 list_for_each_entry(rq
, &engine
->timeline
->requests
, link
)
1879 print_request(m
, rq
, "\t\tE ");
1880 for (rb
= execlists
->first
; rb
; rb
= rb_next(rb
)) {
1881 struct i915_priolist
*p
=
1882 rb_entry(rb
, typeof(*p
), node
);
1884 list_for_each_entry(rq
, &p
->requests
, priotree
.link
)
1885 print_request(m
, rq
, "\t\tQ ");
1887 spin_unlock_irq(&engine
->timeline
->lock
);
1889 spin_lock_irq(&b
->rb_lock
);
1890 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
1891 struct intel_wait
*w
= rb_entry(rb
, typeof(*w
), node
);
1893 drm_printf(m
, "\t%s [%d] waiting for %x\n",
1894 w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
1896 spin_unlock_irq(&b
->rb_lock
);
1898 if (INTEL_GEN(dev_priv
) >= 6) {
1899 drm_printf(m
, "\tRING_IMR: %08x\n", I915_READ_IMR(engine
));
1902 drm_printf(m
, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
1904 yesno(test_bit(ENGINE_IRQ_BREADCRUMB
,
1905 &engine
->irq_posted
)),
1906 yesno(test_bit(ENGINE_IRQ_EXECLIST
,
1907 &engine
->irq_posted
)));
1909 drm_printf(m
, "HWSP:\n");
1910 hexdump(m
, engine
->status_page
.page_addr
, PAGE_SIZE
);
1912 drm_printf(m
, "Idle? %s\n", yesno(intel_engine_is_idle(engine
)));
1915 static u8 user_class_map
[] = {
1916 [I915_ENGINE_CLASS_RENDER
] = RENDER_CLASS
,
1917 [I915_ENGINE_CLASS_COPY
] = COPY_ENGINE_CLASS
,
1918 [I915_ENGINE_CLASS_VIDEO
] = VIDEO_DECODE_CLASS
,
1919 [I915_ENGINE_CLASS_VIDEO_ENHANCE
] = VIDEO_ENHANCEMENT_CLASS
,
1922 struct intel_engine_cs
*
1923 intel_engine_lookup_user(struct drm_i915_private
*i915
, u8
class, u8 instance
)
1925 if (class >= ARRAY_SIZE(user_class_map
))
1928 class = user_class_map
[class];
1930 GEM_BUG_ON(class > MAX_ENGINE_CLASS
);
1932 if (instance
> MAX_ENGINE_INSTANCE
)
1935 return i915
->engine_class
[class][instance
];
1939 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1940 * @engine: engine to enable stats collection
1942 * Start collecting the engine busyness data for @engine.
1944 * Returns 0 on success or a negative error code.
1946 int intel_enable_engine_stats(struct intel_engine_cs
*engine
)
1948 struct intel_engine_execlists
*execlists
= &engine
->execlists
;
1949 unsigned long flags
;
1952 if (!intel_engine_supports_stats(engine
))
1955 tasklet_disable(&execlists
->tasklet
);
1956 spin_lock_irqsave(&engine
->stats
.lock
, flags
);
1958 if (unlikely(engine
->stats
.enabled
== ~0)) {
1963 if (engine
->stats
.enabled
++ == 0) {
1964 const struct execlist_port
*port
= execlists
->port
;
1965 unsigned int num_ports
= execlists_num_ports(execlists
);
1967 engine
->stats
.enabled_at
= ktime_get();
1969 /* XXX submission method oblivious? */
1970 while (num_ports
-- && port_isset(port
)) {
1971 engine
->stats
.active
++;
1975 if (engine
->stats
.active
)
1976 engine
->stats
.start
= engine
->stats
.enabled_at
;
1980 spin_unlock_irqrestore(&engine
->stats
.lock
, flags
);
1981 tasklet_enable(&execlists
->tasklet
);
1986 static ktime_t
__intel_engine_get_busy_time(struct intel_engine_cs
*engine
)
1988 ktime_t total
= engine
->stats
.total
;
1991 * If the engine is executing something at the moment
1992 * add it to the total.
1994 if (engine
->stats
.active
)
1995 total
= ktime_add(total
,
1996 ktime_sub(ktime_get(), engine
->stats
.start
));
2002 * intel_engine_get_busy_time() - Return current accumulated engine busyness
2003 * @engine: engine to report on
2005 * Returns accumulated time @engine was busy since engine stats were enabled.
2007 ktime_t
intel_engine_get_busy_time(struct intel_engine_cs
*engine
)
2010 unsigned long flags
;
2012 spin_lock_irqsave(&engine
->stats
.lock
, flags
);
2013 total
= __intel_engine_get_busy_time(engine
);
2014 spin_unlock_irqrestore(&engine
->stats
.lock
, flags
);
2020 * intel_disable_engine_stats() - Disable engine busy tracking on engine
2021 * @engine: engine to disable stats collection
2023 * Stops collecting the engine busyness data for @engine.
2025 void intel_disable_engine_stats(struct intel_engine_cs
*engine
)
2027 unsigned long flags
;
2029 if (!intel_engine_supports_stats(engine
))
2032 spin_lock_irqsave(&engine
->stats
.lock
, flags
);
2033 WARN_ON_ONCE(engine
->stats
.enabled
== 0);
2034 if (--engine
->stats
.enabled
== 0) {
2035 engine
->stats
.total
= __intel_engine_get_busy_time(engine
);
2036 engine
->stats
.active
= 0;
2038 spin_unlock_irqrestore(&engine
->stats
.lock
, flags
);
2041 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2042 #include "selftests/mock_engine.c"