1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
5 #include <linux/hashtable.h>
6 #include "i915_gem_batch_pool.h"
7 #include "i915_gem_request.h"
8 #include "i915_gem_timeline.h"
10 #include "i915_selftest.h"
14 #define I915_CMD_HASH_ORDER 9
16 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
17 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
18 * to give some inclination as to some of the magic values used in the various
21 #define CACHELINE_BYTES 64
22 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
24 struct intel_hw_status_page
{
30 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
31 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
33 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
34 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
36 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
37 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
39 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
40 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
42 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
43 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
45 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
46 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
48 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
49 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
51 enum intel_engine_hangcheck_action
{
56 ENGINE_ACTIVE_SUBUNITS
,
61 static inline const char *
62 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a
)
69 case ENGINE_ACTIVE_SEQNO
:
70 return "active seqno";
71 case ENGINE_ACTIVE_HEAD
:
73 case ENGINE_ACTIVE_SUBUNITS
:
74 return "active subunits";
75 case ENGINE_WAIT_KICK
:
84 #define I915_MAX_SLICES 3
85 #define I915_MAX_SUBSLICES 3
87 #define instdone_slice_mask(dev_priv__) \
88 (INTEL_GEN(dev_priv__) == 7 ? \
89 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
91 #define instdone_subslice_mask(dev_priv__) \
92 (INTEL_GEN(dev_priv__) == 7 ? \
93 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
95 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
96 for ((slice__) = 0, (subslice__) = 0; \
97 (slice__) < I915_MAX_SLICES; \
98 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
99 (slice__) += ((subslice__) == 0)) \
100 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
101 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
103 struct intel_instdone
{
105 /* The following exist only in the RCS engine */
107 u32 sampler
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
108 u32 row
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
111 struct intel_engine_hangcheck
{
114 enum intel_engine_hangcheck_action action
;
115 unsigned long action_timestamp
;
117 struct intel_instdone instdone
;
118 struct drm_i915_gem_request
*active_request
;
123 struct i915_vma
*vma
;
126 struct list_head request_list
;
137 struct i915_gem_context
;
138 struct drm_i915_reg_table
;
141 * we use a single page to load ctx workarounds so all of these
142 * values are referred in terms of dwords
144 * struct i915_wa_ctx_bb:
145 * offset: specifies batch starting position, also helpful in case
146 * if we want to have multiple batches at different offsets based on
147 * some criteria. It is not a requirement at the moment but provides
148 * an option for future use.
149 * size: size of the batch in DWORDS
151 struct i915_ctx_workarounds
{
152 struct i915_wa_ctx_bb
{
155 } indirect_ctx
, per_ctx
;
156 struct i915_vma
*vma
;
159 struct drm_i915_gem_request
;
162 * Engine IDs definitions.
163 * Keep instances of the same type engine together.
165 enum intel_engine_id
{
170 #define _VCS(n) (VCS + (n))
174 struct i915_priolist
{
176 struct list_head requests
;
181 * struct intel_engine_execlists - execlist submission queue and port state
183 * The struct intel_engine_execlists represents the combined logical state of
184 * driver and the hardware state for execlist mode of submission.
186 struct intel_engine_execlists
{
188 * @tasklet: softirq tasklet for bottom handler
190 struct tasklet_struct tasklet
;
193 * @default_priolist: priority list for I915_PRIORITY_NORMAL
195 struct i915_priolist default_priolist
;
198 * @no_priolist: priority lists disabled
203 * @elsp: the ExecList Submission Port register
208 * @port: execlist port states
210 * For each hardware ELSP (ExecList Submission Port) we keep
211 * track of the last request and the number of times we submitted
212 * that port to hw. We then count the number of times the hw reports
213 * a context completion or preemption. As only one context can
214 * be active on hw, we limit resubmission of context to port[0]. This
215 * is called Lite Restore, of the context.
217 struct execlist_port
{
219 * @request_count: combined request and submission count
221 struct drm_i915_gem_request
*request_count
;
222 #define EXECLIST_COUNT_BITS 2
223 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
224 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
225 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
226 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
227 #define port_set(p, packed) ((p)->request_count = (packed))
228 #define port_isset(p) ((p)->request_count)
229 #define port_index(p, execlists) ((p) - (execlists)->port)
232 * @context_id: context ID for port
234 GEM_DEBUG_DECL(u32 context_id
);
236 #define EXECLIST_MAX_PORTS 2
237 } port
[EXECLIST_MAX_PORTS
];
240 * @active: is the HW active? We consider the HW as active after
241 * submitting any context for execution and until we have seen the
242 * last context completion event. After that, we do not expect any
243 * more events until we submit, and so can park the HW.
245 * As we have a small number of different sources from which we feed
246 * the HW, we track the state of each inside a single bitfield.
249 #define EXECLISTS_ACTIVE_USER 0
250 #define EXECLISTS_ACTIVE_PREEMPT 1
251 #define EXECLISTS_ACTIVE_HWACK 2
254 * @port_mask: number of execlist ports - 1
256 unsigned int port_mask
;
259 * @queue: queue of requests, in priority lists
261 struct rb_root queue
;
264 * @first: leftmost level in priority @queue
266 struct rb_node
*first
;
269 * @fw_domains: forcewake domains for irq tasklet
271 unsigned int fw_domains
;
274 * @csb_head: context status buffer head
276 unsigned int csb_head
;
279 * @csb_use_mmio: access csb through mmio, instead of hwsp
284 #define INTEL_ENGINE_CS_MAX_NAME 8
286 struct intel_engine_cs
{
287 struct drm_i915_private
*i915
;
288 char name
[INTEL_ENGINE_CS_MAX_NAME
];
290 enum intel_engine_id id
;
301 unsigned int irq_shift
;
303 struct intel_ring
*buffer
;
304 struct intel_timeline
*timeline
;
306 struct drm_i915_gem_object
*default_state
;
309 unsigned long irq_posted
;
310 #define ENGINE_IRQ_BREADCRUMB 0
311 #define ENGINE_IRQ_EXECLIST 1
313 /* Rather than have every client wait upon all user interrupts,
314 * with the herd waking after every interrupt and each doing the
315 * heavyweight seqno dance, we delegate the task (of being the
316 * bottom-half of the user interrupt) to the first client. After
317 * every interrupt, we wake up one client, who does the heavyweight
318 * coherent seqno read and either goes back to sleep (if incomplete),
319 * or wakes up all the completed clients in parallel, before then
320 * transferring the bottom-half status to the next client in the queue.
322 * Compared to walking the entire list of waiters in a single dedicated
323 * bottom-half, we reduce the latency of the first waiter by avoiding
324 * a context switch, but incur additional coherent seqno reads when
325 * following the chain of request breadcrumbs. Since it is most likely
326 * that we have a single client waiting on each seqno, then reducing
327 * the overhead of waking that client is much preferred.
329 struct intel_breadcrumbs
{
330 spinlock_t irq_lock
; /* protects irq_*; irqsafe */
331 struct intel_wait
*irq_wait
; /* oldest waiter by retirement */
333 spinlock_t rb_lock
; /* protects the rb and wraps irq_lock */
334 struct rb_root waiters
; /* sorted by retirement, priority */
335 struct rb_root signals
; /* sorted by retirement */
336 struct task_struct
*signaler
; /* used for fence signalling */
337 struct drm_i915_gem_request __rcu
*first_signal
;
338 struct timer_list fake_irq
; /* used after a missed interrupt */
339 struct timer_list hangcheck
; /* detect missed interrupts */
341 unsigned int hangcheck_interrupts
;
342 unsigned int irq_enabled
;
345 I915_SELFTEST_DECLARE(bool mock
: 1);
350 * @enable: Bitmask of enable sample events on this engine.
352 * Bits correspond to sample event types, for instance
353 * I915_SAMPLE_QUEUED is bit 0 etc.
357 * @enable_count: Reference count for the enabled samplers.
359 * Index number corresponds to the bit number from @enable.
361 unsigned int enable_count
[I915_PMU_SAMPLE_BITS
];
363 * @sample: Counter values for sampling events.
365 * Our internal timer stores the current counters in this field.
367 #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
368 struct i915_pmu_sample sample
[I915_ENGINE_SAMPLE_MAX
];
372 * A pool of objects to use as shadow copies of client batch buffers
373 * when the command parser is enabled. Prevents the client from
374 * modifying the batch contents after software parsing.
376 struct i915_gem_batch_pool batch_pool
;
378 struct intel_hw_status_page status_page
;
379 struct i915_ctx_workarounds wa_ctx
;
380 struct i915_vma
*scratch
;
382 u32 irq_keep_mask
; /* always keep these interrupts */
383 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
384 void (*irq_enable
)(struct intel_engine_cs
*engine
);
385 void (*irq_disable
)(struct intel_engine_cs
*engine
);
387 int (*init_hw
)(struct intel_engine_cs
*engine
);
388 void (*reset_hw
)(struct intel_engine_cs
*engine
,
389 struct drm_i915_gem_request
*req
);
391 void (*park
)(struct intel_engine_cs
*engine
);
392 void (*unpark
)(struct intel_engine_cs
*engine
);
394 void (*set_default_submission
)(struct intel_engine_cs
*engine
);
396 struct intel_ring
*(*context_pin
)(struct intel_engine_cs
*engine
,
397 struct i915_gem_context
*ctx
);
398 void (*context_unpin
)(struct intel_engine_cs
*engine
,
399 struct i915_gem_context
*ctx
);
400 int (*request_alloc
)(struct drm_i915_gem_request
*req
);
401 int (*init_context
)(struct drm_i915_gem_request
*req
);
403 int (*emit_flush
)(struct drm_i915_gem_request
*request
,
405 #define EMIT_INVALIDATE BIT(0)
406 #define EMIT_FLUSH BIT(1)
407 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
408 int (*emit_bb_start
)(struct drm_i915_gem_request
*req
,
409 u64 offset
, u32 length
,
410 unsigned int dispatch_flags
);
411 #define I915_DISPATCH_SECURE BIT(0)
412 #define I915_DISPATCH_PINNED BIT(1)
413 #define I915_DISPATCH_RS BIT(2)
414 void (*emit_breadcrumb
)(struct drm_i915_gem_request
*req
,
416 int emit_breadcrumb_sz
;
418 /* Pass the request to the hardware queue (e.g. directly into
419 * the legacy ringbuffer or to the end of an execlist).
421 * This is called from an atomic context with irqs disabled; must
424 void (*submit_request
)(struct drm_i915_gem_request
*req
);
426 /* Call when the priority on a request has changed and it and its
427 * dependencies may need rescheduling. Note the request itself may
428 * not be ready to run!
430 * Called under the struct_mutex.
432 void (*schedule
)(struct drm_i915_gem_request
*request
,
436 * Cancel all requests on the hardware, or queued for execution.
437 * This should only cancel the ready requests that have been
438 * submitted to the engine (via the engine->submit_request callback).
439 * This is called when marking the device as wedged.
441 void (*cancel_requests
)(struct intel_engine_cs
*engine
);
443 /* Some chipsets are not quite as coherent as advertised and need
444 * an expensive kick to force a true read of the up-to-date seqno.
445 * However, the up-to-date seqno is not always required and the last
446 * seen value is good enough. Note that the seqno will always be
447 * monotonic, even if not coherent.
449 void (*irq_seqno_barrier
)(struct intel_engine_cs
*engine
);
450 void (*cleanup
)(struct intel_engine_cs
*engine
);
452 /* GEN8 signal/wait table - never trust comments!
453 * signal to signal to signal to signal to signal to
454 * RCS VCS BCS VECS VCS2
455 * --------------------------------------------------------------------
456 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
457 * |-------------------------------------------------------------------
458 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
459 * |-------------------------------------------------------------------
460 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
461 * |-------------------------------------------------------------------
462 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
463 * |-------------------------------------------------------------------
464 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
465 * |-------------------------------------------------------------------
468 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
469 * ie. transpose of g(x, y)
471 * sync from sync from sync from sync from sync from
472 * RCS VCS BCS VECS VCS2
473 * --------------------------------------------------------------------
474 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
475 * |-------------------------------------------------------------------
476 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
477 * |-------------------------------------------------------------------
478 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
479 * |-------------------------------------------------------------------
480 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
481 * |-------------------------------------------------------------------
482 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
483 * |-------------------------------------------------------------------
486 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
487 * ie. transpose of f(x, y)
490 #define GEN6_SEMAPHORE_LAST VECS_HW
491 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
492 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
494 /* our mbox written by others */
495 u32 wait
[GEN6_NUM_SEMAPHORES
];
496 /* mboxes this ring signals to */
497 i915_reg_t signal
[GEN6_NUM_SEMAPHORES
];
501 int (*sync_to
)(struct drm_i915_gem_request
*req
,
502 struct drm_i915_gem_request
*signal
);
503 u32
*(*signal
)(struct drm_i915_gem_request
*req
, u32
*cs
);
506 struct intel_engine_execlists execlists
;
508 /* Contexts are pinned whilst they are active on the GPU. The last
509 * context executed remains active whilst the GPU is idle - the
510 * switch away and write to the context object only occurs on the
511 * next execution. Contexts are only unpinned on retirement of the
512 * following request ensuring that we can always write to the object
513 * on the context switch even after idling. Across suspend, we switch
514 * to the kernel context and trash it as the save may not happen
515 * before the hardware is powered down.
517 struct i915_gem_context
*last_retired_context
;
519 /* We track the current MI_SET_CONTEXT in order to eliminate
520 * redudant context switches. This presumes that requests are not
521 * reordered! Or when they are the tracking is updated along with
522 * the emission of individual requests into the legacy command
525 struct i915_gem_context
*legacy_active_context
;
526 struct i915_hw_ppgtt
*legacy_active_ppgtt
;
528 /* status_notifier: list of callbacks for context-switch changes */
529 struct atomic_notifier_head context_status_notifier
;
531 struct intel_engine_hangcheck hangcheck
;
533 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
534 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
538 * Table of commands the command parser needs to know about
541 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
544 * Table of registers allowed in commands that read/write registers.
546 const struct drm_i915_reg_table
*reg_tables
;
550 * Returns the bitmask for the length field of the specified command.
551 * Return 0 for an unrecognized/invalid command.
553 * If the command parser finds an entry for a command in the engine's
554 * cmd_tables, it gets the command's length based on the table entry.
555 * If not, it calls this function to determine the per-engine length
556 * field encoding for the command (i.e. different opcode ranges use
557 * certain bits to encode the command length in the header).
559 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
563 * @lock: Lock protecting the below fields.
567 * @enabled: Reference count indicating number of listeners.
569 unsigned int enabled
;
571 * @active: Number of contexts currently scheduled in.
575 * @enabled_at: Timestamp when busy stats were enabled.
579 * @start: Timestamp of the last idle to active transition.
581 * Idle is defined as active == 0, active is active > 0.
585 * @total: Total time this engine was busy.
587 * Accumulated time not counting the most recent block in cases
588 * where engine is currently busy (active > 0).
594 static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs
*engine
)
596 return engine
->flags
& I915_ENGINE_NEEDS_CMD_PARSER
;
599 static inline bool intel_engine_supports_stats(struct intel_engine_cs
*engine
)
601 return engine
->flags
& I915_ENGINE_SUPPORTS_STATS
;
605 execlists_set_active(struct intel_engine_execlists
*execlists
,
608 __set_bit(bit
, (unsigned long *)&execlists
->active
);
612 execlists_clear_active(struct intel_engine_execlists
*execlists
,
615 __clear_bit(bit
, (unsigned long *)&execlists
->active
);
619 execlists_is_active(const struct intel_engine_execlists
*execlists
,
622 return test_bit(bit
, (unsigned long *)&execlists
->active
);
626 execlists_cancel_port_requests(struct intel_engine_execlists
* const execlists
);
629 execlists_unwind_incomplete_requests(struct intel_engine_execlists
*execlists
);
631 static inline unsigned int
632 execlists_num_ports(const struct intel_engine_execlists
* const execlists
)
634 return execlists
->port_mask
+ 1;
638 execlists_port_complete(struct intel_engine_execlists
* const execlists
,
639 struct execlist_port
* const port
)
641 const unsigned int m
= execlists
->port_mask
;
643 GEM_BUG_ON(port_index(port
, execlists
) != 0);
644 GEM_BUG_ON(!execlists_is_active(execlists
, EXECLISTS_ACTIVE_USER
));
646 memmove(port
, port
+ 1, m
* sizeof(struct execlist_port
));
647 memset(port
+ m
, 0, sizeof(struct execlist_port
));
650 static inline unsigned int
651 intel_engine_flag(const struct intel_engine_cs
*engine
)
653 return BIT(engine
->id
);
657 intel_read_status_page(struct intel_engine_cs
*engine
, int reg
)
659 /* Ensure that the compiler doesn't optimize away the load. */
660 return READ_ONCE(engine
->status_page
.page_addr
[reg
]);
664 intel_write_status_page(struct intel_engine_cs
*engine
, int reg
, u32 value
)
666 /* Writing into the status page should be done sparingly. Since
667 * we do when we are uncertain of the device state, we take a bit
668 * of extra paranoia to try and ensure that the HWS takes the value
669 * we give and that it doesn't end up trapped inside the CPU!
671 if (static_cpu_has(X86_FEATURE_CLFLUSH
)) {
673 clflush(&engine
->status_page
.page_addr
[reg
]);
674 engine
->status_page
.page_addr
[reg
] = value
;
675 clflush(&engine
->status_page
.page_addr
[reg
]);
678 WRITE_ONCE(engine
->status_page
.page_addr
[reg
], value
);
683 * Reads a dword out of the status page, which is written to from the command
684 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
687 * The following dwords have a reserved meaning:
688 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
689 * 0x04: ring 0 head pointer
690 * 0x05: ring 1 head pointer (915-class)
691 * 0x06: ring 2 head pointer (915-class)
692 * 0x10-0x1b: Context status DWords (GM45)
693 * 0x1f: Last written status offset. (GM45)
694 * 0x20-0x2f: Reserved (Gen6+)
696 * The area from dword 0x30 to 0x3ff is available for driver usage.
698 #define I915_GEM_HWS_INDEX 0x30
699 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
700 #define I915_GEM_HWS_PREEMPT_INDEX 0x32
701 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
702 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
703 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
705 #define I915_HWS_CSB_BUF0_INDEX 0x10
706 #define I915_HWS_CSB_WRITE_INDEX 0x1f
707 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
710 intel_engine_create_ring(struct intel_engine_cs
*engine
, int size
);
711 int intel_ring_pin(struct intel_ring
*ring
,
712 struct drm_i915_private
*i915
,
713 unsigned int offset_bias
);
714 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
);
715 unsigned int intel_ring_update_space(struct intel_ring
*ring
);
716 void intel_ring_unpin(struct intel_ring
*ring
);
717 void intel_ring_free(struct intel_ring
*ring
);
719 void intel_engine_stop(struct intel_engine_cs
*engine
);
720 void intel_engine_cleanup(struct intel_engine_cs
*engine
);
722 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
);
724 int __must_check
intel_ring_cacheline_align(struct drm_i915_gem_request
*req
);
726 int intel_ring_wait_for_space(struct intel_ring
*ring
, unsigned int bytes
);
727 u32 __must_check
*intel_ring_begin(struct drm_i915_gem_request
*req
,
731 intel_ring_advance(struct drm_i915_gem_request
*req
, u32
*cs
)
735 * This serves as a placeholder in the code so that the reader
736 * can compare against the preceding intel_ring_begin() and
737 * check that the number of dwords emitted matches the space
738 * reserved for the command packet (i.e. the value passed to
739 * intel_ring_begin()).
741 GEM_BUG_ON((req
->ring
->vaddr
+ req
->ring
->emit
) != cs
);
745 intel_ring_wrap(const struct intel_ring
*ring
, u32 pos
)
747 return pos
& (ring
->size
- 1);
751 intel_ring_offset(const struct drm_i915_gem_request
*req
, void *addr
)
753 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
754 u32 offset
= addr
- req
->ring
->vaddr
;
755 GEM_BUG_ON(offset
> req
->ring
->size
);
756 return intel_ring_wrap(req
->ring
, offset
);
760 assert_ring_tail_valid(const struct intel_ring
*ring
, unsigned int tail
)
762 /* We could combine these into a single tail operation, but keeping
763 * them as seperate tests will help identify the cause should one
766 GEM_BUG_ON(!IS_ALIGNED(tail
, 8));
767 GEM_BUG_ON(tail
>= ring
->size
);
771 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
772 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
773 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
774 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
775 * same cacheline, the Head Pointer must not be greater than the Tail
778 * We use ring->head as the last known location of the actual RING_HEAD,
779 * it may have advanced but in the worst case it is equally the same
780 * as ring->head and so we should never program RING_TAIL to advance
781 * into the same cacheline as ring->head.
783 #define cacheline(a) round_down(a, CACHELINE_BYTES)
784 GEM_BUG_ON(cacheline(tail
) == cacheline(ring
->head
) &&
789 static inline unsigned int
790 intel_ring_set_tail(struct intel_ring
*ring
, unsigned int tail
)
792 /* Whilst writes to the tail are strictly order, there is no
793 * serialisation between readers and the writers. The tail may be
794 * read by i915_gem_request_retire() just as it is being updated
795 * by execlists, as although the breadcrumb is complete, the context
796 * switch hasn't been seen.
798 assert_ring_tail_valid(ring
, tail
);
803 void intel_engine_init_global_seqno(struct intel_engine_cs
*engine
, u32 seqno
);
805 void intel_engine_setup_common(struct intel_engine_cs
*engine
);
806 int intel_engine_init_common(struct intel_engine_cs
*engine
);
807 int intel_engine_create_scratch(struct intel_engine_cs
*engine
, int size
);
808 void intel_engine_cleanup_common(struct intel_engine_cs
*engine
);
810 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
);
811 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
);
812 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
);
813 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
);
815 u64
intel_engine_get_active_head(struct intel_engine_cs
*engine
);
816 u64
intel_engine_get_last_batch_head(struct intel_engine_cs
*engine
);
818 static inline u32
intel_engine_get_seqno(struct intel_engine_cs
*engine
)
820 return intel_read_status_page(engine
, I915_GEM_HWS_INDEX
);
823 static inline u32
intel_engine_last_submit(struct intel_engine_cs
*engine
)
825 /* We are only peeking at the tail of the submit queue (and not the
826 * queue itself) in order to gain a hint as to the current active
827 * state of the engine. Callers are not expected to be taking
828 * engine->timeline->lock, nor are they expected to be concerned
829 * wtih serialising this hint with anything, so document it as
830 * a hint and nothing more.
832 return READ_ONCE(engine
->timeline
->seqno
);
835 int init_workarounds_ring(struct intel_engine_cs
*engine
);
836 int intel_ring_workarounds_emit(struct drm_i915_gem_request
*req
);
838 void intel_engine_get_instdone(struct intel_engine_cs
*engine
,
839 struct intel_instdone
*instdone
);
842 * Arbitrary size for largest possible 'add request' sequence. The code paths
843 * are complex and variable. Empirical measurement shows that the worst case
844 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
845 * we need to allocate double the largest single packet within that emission
846 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
848 #define MIN_SPACE_FOR_ADD_REQUEST 336
850 static inline u32
intel_hws_seqno_address(struct intel_engine_cs
*engine
)
852 return engine
->status_page
.ggtt_offset
+ I915_GEM_HWS_INDEX_ADDR
;
855 static inline u32
intel_hws_preempt_done_address(struct intel_engine_cs
*engine
)
857 return engine
->status_page
.ggtt_offset
+ I915_GEM_HWS_PREEMPT_ADDR
;
860 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
861 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
);
863 static inline void intel_wait_init(struct intel_wait
*wait
,
864 struct drm_i915_gem_request
*rq
)
870 static inline void intel_wait_init_for_seqno(struct intel_wait
*wait
, u32 seqno
)
876 static inline bool intel_wait_has_seqno(const struct intel_wait
*wait
)
882 intel_wait_update_seqno(struct intel_wait
*wait
, u32 seqno
)
885 return intel_wait_has_seqno(wait
);
889 intel_wait_update_request(struct intel_wait
*wait
,
890 const struct drm_i915_gem_request
*rq
)
892 return intel_wait_update_seqno(wait
, i915_gem_request_global_seqno(rq
));
896 intel_wait_check_seqno(const struct intel_wait
*wait
, u32 seqno
)
898 return wait
->seqno
== seqno
;
902 intel_wait_check_request(const struct intel_wait
*wait
,
903 const struct drm_i915_gem_request
*rq
)
905 return intel_wait_check_seqno(wait
, i915_gem_request_global_seqno(rq
));
908 static inline bool intel_wait_complete(const struct intel_wait
*wait
)
910 return RB_EMPTY_NODE(&wait
->node
);
913 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
914 struct intel_wait
*wait
);
915 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
916 struct intel_wait
*wait
);
917 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
,
919 void intel_engine_cancel_signaling(struct drm_i915_gem_request
*request
);
921 static inline bool intel_engine_has_waiter(const struct intel_engine_cs
*engine
)
923 return READ_ONCE(engine
->breadcrumbs
.irq_wait
);
926 unsigned int intel_engine_wakeup(struct intel_engine_cs
*engine
);
927 #define ENGINE_WAKEUP_WAITER BIT(0)
928 #define ENGINE_WAKEUP_ASLEEP BIT(1)
930 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs
*engine
);
931 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs
*engine
);
933 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
);
934 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
);
936 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
);
937 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
);
938 bool intel_breadcrumbs_busy(struct intel_engine_cs
*engine
);
940 static inline u32
*gen8_emit_pipe_control(u32
*batch
, u32 flags
, u32 offset
)
942 memset(batch
, 0, 6 * sizeof(u32
));
944 batch
[0] = GFX_OP_PIPE_CONTROL(6);
952 gen8_emit_ggtt_write_rcs(u32
*cs
, u32 value
, u32 gtt_offset
)
954 /* We're using qword write, offset should be aligned to 8 bytes. */
955 GEM_BUG_ON(!IS_ALIGNED(gtt_offset
, 8));
957 /* w/a for post sync ops following a GPGPU operation we
958 * need a prior CS_STALL, which is emitted by the flush
959 * following the batch.
961 *cs
++ = GFX_OP_PIPE_CONTROL(6);
962 *cs
++ = PIPE_CONTROL_GLOBAL_GTT_IVB
| PIPE_CONTROL_CS_STALL
|
963 PIPE_CONTROL_QW_WRITE
;
967 /* We're thrashing one dword of HWS. */
974 gen8_emit_ggtt_write(u32
*cs
, u32 value
, u32 gtt_offset
)
976 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
977 GEM_BUG_ON(gtt_offset
& (1 << 5));
978 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
979 GEM_BUG_ON(!IS_ALIGNED(gtt_offset
, 8));
981 *cs
++ = (MI_FLUSH_DW
+ 1) | MI_FLUSH_DW_OP_STOREDW
;
982 *cs
++ = gtt_offset
| MI_FLUSH_DW_USE_GTT
;
989 bool intel_engine_is_idle(struct intel_engine_cs
*engine
);
990 bool intel_engines_are_idle(struct drm_i915_private
*dev_priv
);
992 bool intel_engine_has_kernel_context(const struct intel_engine_cs
*engine
);
994 void intel_engines_park(struct drm_i915_private
*i915
);
995 void intel_engines_unpark(struct drm_i915_private
*i915
);
997 void intel_engines_reset_default_submission(struct drm_i915_private
*i915
);
998 unsigned int intel_engines_has_context_isolation(struct drm_i915_private
*i915
);
1000 bool intel_engine_can_store_dword(struct intel_engine_cs
*engine
);
1003 void intel_engine_dump(struct intel_engine_cs
*engine
,
1004 struct drm_printer
*m
,
1005 const char *header
, ...);
1007 struct intel_engine_cs
*
1008 intel_engine_lookup_user(struct drm_i915_private
*i915
, u8
class, u8 instance
);
1010 static inline void intel_engine_context_in(struct intel_engine_cs
*engine
)
1012 unsigned long flags
;
1014 if (READ_ONCE(engine
->stats
.enabled
) == 0)
1017 spin_lock_irqsave(&engine
->stats
.lock
, flags
);
1019 if (engine
->stats
.enabled
> 0) {
1020 if (engine
->stats
.active
++ == 0)
1021 engine
->stats
.start
= ktime_get();
1022 GEM_BUG_ON(engine
->stats
.active
== 0);
1025 spin_unlock_irqrestore(&engine
->stats
.lock
, flags
);
1028 static inline void intel_engine_context_out(struct intel_engine_cs
*engine
)
1030 unsigned long flags
;
1032 if (READ_ONCE(engine
->stats
.enabled
) == 0)
1035 spin_lock_irqsave(&engine
->stats
.lock
, flags
);
1037 if (engine
->stats
.enabled
> 0) {
1040 if (engine
->stats
.active
&& --engine
->stats
.active
== 0) {
1042 * Decrement the active context count and in case GPU
1043 * is now idle add up to the running total.
1045 last
= ktime_sub(ktime_get(), engine
->stats
.start
);
1047 engine
->stats
.total
= ktime_add(engine
->stats
.total
,
1049 } else if (engine
->stats
.active
== 0) {
1051 * After turning on engine stats, context out might be
1052 * the first event in which case we account from the
1053 * time stats gathering was turned on.
1055 last
= ktime_sub(ktime_get(), engine
->stats
.enabled_at
);
1057 engine
->stats
.total
= ktime_add(engine
->stats
.total
,
1062 spin_unlock_irqrestore(&engine
->stats
.lock
, flags
);
1065 int intel_enable_engine_stats(struct intel_engine_cs
*engine
);
1066 void intel_disable_engine_stats(struct intel_engine_cs
*engine
);
1068 ktime_t
intel_engine_get_busy_time(struct intel_engine_cs
*engine
);
1070 #endif /* _INTEL_RINGBUFFER_H_ */