1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include <linux/circ_buf.h>
8 #include "gem/i915_gem_context.h"
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_lrc_reg.h"
14 #include "gt/intel_ring.h"
16 #include "intel_guc_submission.h"
19 #include "i915_trace.h"
22 * DOC: GuC-based command submission
24 * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
25 * firmware is moving to an updated submission interface and we plan to
26 * turn submission back on when that lands. The below documentation (and related
27 * code) matches the old submission model and will be updated as part of the
28 * upgrade to the new flow.
30 * GuC stage descriptor:
31 * During initialization, the driver allocates a static pool of 1024 such
32 * descriptors, and shares them with the GuC. Currently, we only use one
33 * descriptor. This stage descriptor lets the GuC know about the workqueue and
34 * process descriptor. Theoretically, it also lets the GuC know about our HW
35 * contexts (context ID, etc...), but we actually employ a kind of submission
36 * where the GuC uses the LRCA sent via the work item instead. This is called
37 * a "proxy" submission.
39 * The Scratch registers:
40 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
41 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
42 * triggers an interrupt on the GuC via another register write (0xC4C8).
43 * Firmware writes a success/fail code back to the action register after
44 * processes the request. The kernel driver polls waiting for this update and
48 * There are several types of work items that the host may place into a
49 * workqueue, each with its own requirements and limitations. Currently only
50 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
51 * represents in-order queue. The kernel driver packs ring tail pointer and an
52 * ELSP context descriptor dword into Work Item.
53 * See guc_add_request()
57 static inline struct i915_priolist
*to_priolist(struct rb_node
*rb
)
59 return rb_entry(rb
, struct i915_priolist
, node
);
62 static struct guc_stage_desc
*__get_stage_desc(struct intel_guc
*guc
, u32 id
)
64 struct guc_stage_desc
*base
= guc
->stage_desc_pool_vaddr
;
69 static int guc_workqueue_create(struct intel_guc
*guc
)
71 return intel_guc_allocate_and_map_vma(guc
, GUC_WQ_SIZE
, &guc
->workqueue
,
72 &guc
->workqueue_vaddr
);
75 static void guc_workqueue_destroy(struct intel_guc
*guc
)
77 i915_vma_unpin_and_release(&guc
->workqueue
, I915_VMA_RELEASE_MAP
);
81 * Initialise the process descriptor shared with the GuC firmware.
83 static int guc_proc_desc_create(struct intel_guc
*guc
)
85 const u32 size
= PAGE_ALIGN(sizeof(struct guc_process_desc
));
87 return intel_guc_allocate_and_map_vma(guc
, size
, &guc
->proc_desc
,
88 &guc
->proc_desc_vaddr
);
91 static void guc_proc_desc_destroy(struct intel_guc
*guc
)
93 i915_vma_unpin_and_release(&guc
->proc_desc
, I915_VMA_RELEASE_MAP
);
96 static void guc_proc_desc_init(struct intel_guc
*guc
)
98 struct guc_process_desc
*desc
;
100 desc
= memset(guc
->proc_desc_vaddr
, 0, sizeof(*desc
));
103 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
104 * space for ring3 clients (set them as in mmap_ioctl) or kernel
105 * space for kernel clients (map on demand instead? May make debug
106 * easier to have it mapped).
108 desc
->wq_base_addr
= 0;
109 desc
->db_base_addr
= 0;
111 desc
->wq_size_bytes
= GUC_WQ_SIZE
;
112 desc
->wq_status
= WQ_STATUS_ACTIVE
;
113 desc
->priority
= GUC_CLIENT_PRIORITY_KMD_NORMAL
;
116 static void guc_proc_desc_fini(struct intel_guc
*guc
)
118 memset(guc
->proc_desc_vaddr
, 0, sizeof(struct guc_process_desc
));
121 static int guc_stage_desc_pool_create(struct intel_guc
*guc
)
123 u32 size
= PAGE_ALIGN(sizeof(struct guc_stage_desc
) *
124 GUC_MAX_STAGE_DESCRIPTORS
);
126 return intel_guc_allocate_and_map_vma(guc
, size
, &guc
->stage_desc_pool
,
127 &guc
->stage_desc_pool_vaddr
);
130 static void guc_stage_desc_pool_destroy(struct intel_guc
*guc
)
132 i915_vma_unpin_and_release(&guc
->stage_desc_pool
, I915_VMA_RELEASE_MAP
);
136 * Initialise/clear the stage descriptor shared with the GuC firmware.
138 * This descriptor tells the GuC where (in GGTT space) to find the important
139 * data structures related to work submission (process descriptor, write queue,
142 static void guc_stage_desc_init(struct intel_guc
*guc
)
144 struct guc_stage_desc
*desc
;
146 /* we only use 1 stage desc, so hardcode it to 0 */
147 desc
= __get_stage_desc(guc
, 0);
148 memset(desc
, 0, sizeof(*desc
));
150 desc
->attribute
= GUC_STAGE_DESC_ATTR_ACTIVE
|
151 GUC_STAGE_DESC_ATTR_KERNEL
;
154 desc
->priority
= GUC_CLIENT_PRIORITY_KMD_NORMAL
;
156 desc
->process_desc
= intel_guc_ggtt_offset(guc
, guc
->proc_desc
);
157 desc
->wq_addr
= intel_guc_ggtt_offset(guc
, guc
->workqueue
);
158 desc
->wq_size
= GUC_WQ_SIZE
;
161 static void guc_stage_desc_fini(struct intel_guc
*guc
)
163 struct guc_stage_desc
*desc
;
165 desc
= __get_stage_desc(guc
, 0);
166 memset(desc
, 0, sizeof(*desc
));
169 /* Construct a Work Item and append it to the GuC's Work Queue */
170 static void guc_wq_item_append(struct intel_guc
*guc
,
171 u32 target_engine
, u32 context_desc
,
172 u32 ring_tail
, u32 fence_id
)
174 /* wqi_len is in DWords, and does not include the one-word header */
175 const size_t wqi_size
= sizeof(struct guc_wq_item
);
176 const u32 wqi_len
= wqi_size
/ sizeof(u32
) - 1;
177 struct guc_process_desc
*desc
= guc
->proc_desc_vaddr
;
178 struct guc_wq_item
*wqi
;
181 lockdep_assert_held(&guc
->wq_lock
);
183 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
184 * should not have the case where structure wqi is across page, neither
185 * wrapped to the beginning. This simplifies the implementation below.
187 * XXX: if not the case, we need save data to a temp wqi and copy it to
188 * workqueue buffer dw by dw.
190 BUILD_BUG_ON(wqi_size
!= 16);
192 /* We expect the WQ to be active if we're appending items to it */
193 GEM_BUG_ON(desc
->wq_status
!= WQ_STATUS_ACTIVE
);
195 /* Free space is guaranteed. */
196 wq_off
= READ_ONCE(desc
->tail
);
197 GEM_BUG_ON(CIRC_SPACE(wq_off
, READ_ONCE(desc
->head
),
198 GUC_WQ_SIZE
) < wqi_size
);
199 GEM_BUG_ON(wq_off
& (wqi_size
- 1));
201 wqi
= guc
->workqueue_vaddr
+ wq_off
;
203 /* Now fill in the 4-word work queue item */
204 wqi
->header
= WQ_TYPE_INORDER
|
205 (wqi_len
<< WQ_LEN_SHIFT
) |
206 (target_engine
<< WQ_TARGET_SHIFT
) |
208 wqi
->context_desc
= context_desc
;
209 wqi
->submit_element_info
= ring_tail
<< WQ_RING_TAIL_SHIFT
;
210 GEM_BUG_ON(ring_tail
> WQ_RING_TAIL_MAX
);
211 wqi
->fence_id
= fence_id
;
213 /* Make the update visible to GuC */
214 WRITE_ONCE(desc
->tail
, (wq_off
+ wqi_size
) & (GUC_WQ_SIZE
- 1));
217 static void guc_add_request(struct intel_guc
*guc
, struct i915_request
*rq
)
219 struct intel_engine_cs
*engine
= rq
->engine
;
220 u32 ctx_desc
= lower_32_bits(rq
->context
->lrc_desc
);
221 u32 ring_tail
= intel_ring_set_tail(rq
->ring
, rq
->tail
) / sizeof(u64
);
223 guc_wq_item_append(guc
, engine
->guc_id
, ctx_desc
,
224 ring_tail
, rq
->fence
.seqno
);
228 * When we're doing submissions using regular execlists backend, writing to
229 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
230 * pinned in mappable aperture portion of GGTT are visible to command streamer.
231 * Writes done by GuC on our behalf are not guaranteeing such ordering,
232 * therefore, to ensure the flush, we're issuing a POSTING READ.
234 static void flush_ggtt_writes(struct i915_vma
*vma
)
236 if (i915_vma_is_map_and_fenceable(vma
))
237 intel_uncore_posting_read_fw(vma
->vm
->gt
->uncore
,
241 static void guc_submit(struct intel_engine_cs
*engine
,
242 struct i915_request
**out
,
243 struct i915_request
**end
)
245 struct intel_guc
*guc
= &engine
->gt
->uc
.guc
;
247 spin_lock(&guc
->wq_lock
);
250 struct i915_request
*rq
= *out
++;
252 flush_ggtt_writes(rq
->ring
->vma
);
253 guc_add_request(guc
, rq
);
254 } while (out
!= end
);
256 spin_unlock(&guc
->wq_lock
);
259 static inline int rq_prio(const struct i915_request
*rq
)
261 return rq
->sched
.attr
.priority
| __NO_PREEMPTION
;
264 static struct i915_request
*schedule_in(struct i915_request
*rq
, int idx
)
266 trace_i915_request_in(rq
, idx
);
269 * Currently we are not tracking the rq->context being inflight
270 * (ce->inflight = rq->engine). It is only used by the execlists
271 * backend at the moment, a similar counting strategy would be
272 * required if we generalise the inflight tracking.
275 __intel_gt_pm_get(rq
->engine
->gt
);
276 return i915_request_get(rq
);
279 static void schedule_out(struct i915_request
*rq
)
281 trace_i915_request_out(rq
);
283 intel_gt_pm_put_async(rq
->engine
->gt
);
284 i915_request_put(rq
);
287 static void __guc_dequeue(struct intel_engine_cs
*engine
)
289 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
290 struct i915_request
**first
= execlists
->inflight
;
291 struct i915_request
** const last_port
= first
+ execlists
->port_mask
;
292 struct i915_request
*last
= first
[0];
293 struct i915_request
**port
;
297 lockdep_assert_held(&engine
->active
.lock
);
307 * We write directly into the execlists->inflight queue and don't use
308 * the execlists->pending queue, as we don't have a distinct switch
312 while ((rb
= rb_first_cached(&execlists
->queue
))) {
313 struct i915_priolist
*p
= to_priolist(rb
);
314 struct i915_request
*rq
, *rn
;
317 priolist_for_each_request_consume(rq
, rn
, p
, i
) {
318 if (last
&& rq
->context
!= last
->context
) {
319 if (port
== last_port
)
322 *port
= schedule_in(last
,
323 port
- execlists
->inflight
);
327 list_del_init(&rq
->sched
.link
);
328 __i915_request_submit(rq
);
333 rb_erase_cached(&p
->node
, &execlists
->queue
);
334 i915_priolist_free(p
);
337 execlists
->queue_priority_hint
=
338 rb
? to_priolist(rb
)->priority
: INT_MIN
;
340 *port
= schedule_in(last
, port
- execlists
->inflight
);
342 guc_submit(engine
, first
, port
);
344 execlists
->active
= execlists
->inflight
;
347 static void guc_submission_tasklet(unsigned long data
)
349 struct intel_engine_cs
* const engine
= (struct intel_engine_cs
*)data
;
350 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
351 struct i915_request
**port
, *rq
;
354 spin_lock_irqsave(&engine
->active
.lock
, flags
);
356 for (port
= execlists
->inflight
; (rq
= *port
); port
++) {
357 if (!i915_request_completed(rq
))
362 if (port
!= execlists
->inflight
) {
363 int idx
= port
- execlists
->inflight
;
364 int rem
= ARRAY_SIZE(execlists
->inflight
) - idx
;
365 memmove(execlists
->inflight
, port
, rem
* sizeof(*port
));
368 __guc_dequeue(engine
);
370 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
373 static void guc_reset_prepare(struct intel_engine_cs
*engine
)
375 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
377 ENGINE_TRACE(engine
, "\n");
380 * Prevent request submission to the hardware until we have
381 * completed the reset in i915_gem_reset_finish(). If a request
382 * is completed by one engine, it may then queue a request
383 * to a second via its execlists->tasklet *just* as we are
384 * calling engine->init_hw() and also writing the ELSP.
385 * Turning off the execlists->tasklet until the reset is over
388 __tasklet_disable_sync_once(&execlists
->tasklet
);
392 cancel_port_requests(struct intel_engine_execlists
* const execlists
)
394 struct i915_request
* const *port
, *rq
;
396 /* Note we are only using the inflight and not the pending queue */
398 for (port
= execlists
->active
; (rq
= *port
); port
++)
401 memset(execlists
->inflight
, 0, sizeof(execlists
->inflight
));
404 static void guc_reset_rewind(struct intel_engine_cs
*engine
, bool stalled
)
406 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
407 struct i915_request
*rq
;
410 spin_lock_irqsave(&engine
->active
.lock
, flags
);
412 cancel_port_requests(execlists
);
414 /* Push back any incomplete requests for replay after the reset. */
415 rq
= execlists_unwind_incomplete_requests(execlists
);
419 if (!i915_request_started(rq
))
422 __i915_request_reset(rq
, stalled
);
423 intel_lr_context_reset(engine
, rq
->context
, rq
->head
, stalled
);
426 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
429 static void guc_reset_cancel(struct intel_engine_cs
*engine
)
431 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
432 struct i915_request
*rq
, *rn
;
436 ENGINE_TRACE(engine
, "\n");
439 * Before we call engine->cancel_requests(), we should have exclusive
440 * access to the submission state. This is arranged for us by the
441 * caller disabling the interrupt generation, the tasklet and other
442 * threads that may then access the same state, giving us a free hand
443 * to reset state. However, we still need to let lockdep be aware that
444 * we know this state may be accessed in hardirq context, so we
445 * disable the irq around this manipulation and we want to keep
446 * the spinlock focused on its duties and not accidentally conflate
447 * coverage to the submission's irq state. (Similarly, although we
448 * shouldn't need to disable irq around the manipulation of the
449 * submission's irq state, we also wish to remind ourselves that
452 spin_lock_irqsave(&engine
->active
.lock
, flags
);
454 /* Cancel the requests on the HW and clear the ELSP tracker. */
455 cancel_port_requests(execlists
);
457 /* Mark all executing requests as skipped. */
458 list_for_each_entry(rq
, &engine
->active
.requests
, sched
.link
) {
459 if (!i915_request_signaled(rq
))
460 dma_fence_set_error(&rq
->fence
, -EIO
);
462 i915_request_mark_complete(rq
);
465 /* Flush the queued requests to the timeline list (for retiring). */
466 while ((rb
= rb_first_cached(&execlists
->queue
))) {
467 struct i915_priolist
*p
= to_priolist(rb
);
470 priolist_for_each_request_consume(rq
, rn
, p
, i
) {
471 list_del_init(&rq
->sched
.link
);
472 __i915_request_submit(rq
);
473 dma_fence_set_error(&rq
->fence
, -EIO
);
474 i915_request_mark_complete(rq
);
477 rb_erase_cached(&p
->node
, &execlists
->queue
);
478 i915_priolist_free(p
);
481 /* Remaining _unready_ requests will be nop'ed when submitted */
483 execlists
->queue_priority_hint
= INT_MIN
;
484 execlists
->queue
= RB_ROOT_CACHED
;
486 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
489 static void guc_reset_finish(struct intel_engine_cs
*engine
)
491 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
493 if (__tasklet_enable(&execlists
->tasklet
))
494 /* And kick in case we missed a new request submission. */
495 tasklet_hi_schedule(&execlists
->tasklet
);
497 ENGINE_TRACE(engine
, "depth->%d\n",
498 atomic_read(&execlists
->tasklet
.count
));
502 * Everything below here is concerned with setup & teardown, and is
503 * therefore not part of the somewhat time-critical batch-submission
504 * path of guc_submit() above.
508 * Set up the memory resources to be shared with the GuC (via the GGTT)
509 * at firmware loading time.
511 int intel_guc_submission_init(struct intel_guc
*guc
)
515 if (guc
->stage_desc_pool
)
518 ret
= guc_stage_desc_pool_create(guc
);
522 * Keep static analysers happy, let them know that we allocated the
523 * vma after testing that it didn't exist earlier.
525 GEM_BUG_ON(!guc
->stage_desc_pool
);
527 ret
= guc_workqueue_create(guc
);
531 ret
= guc_proc_desc_create(guc
);
535 spin_lock_init(&guc
->wq_lock
);
540 guc_workqueue_destroy(guc
);
542 guc_stage_desc_pool_destroy(guc
);
546 void intel_guc_submission_fini(struct intel_guc
*guc
)
548 if (guc
->stage_desc_pool
) {
549 guc_proc_desc_destroy(guc
);
550 guc_workqueue_destroy(guc
);
551 guc_stage_desc_pool_destroy(guc
);
555 static void guc_interrupts_capture(struct intel_gt
*gt
)
557 struct intel_uncore
*uncore
= gt
->uncore
;
558 u32 irqs
= GT_CONTEXT_SWITCH_INTERRUPT
;
559 u32 dmask
= irqs
<< 16 | irqs
;
561 GEM_BUG_ON(INTEL_GEN(gt
->i915
) < 11);
563 /* Don't handle the ctx switch interrupt in GuC submission mode */
564 intel_uncore_rmw(uncore
, GEN11_RENDER_COPY_INTR_ENABLE
, dmask
, 0);
565 intel_uncore_rmw(uncore
, GEN11_VCS_VECS_INTR_ENABLE
, dmask
, 0);
568 static void guc_interrupts_release(struct intel_gt
*gt
)
570 struct intel_uncore
*uncore
= gt
->uncore
;
571 u32 irqs
= GT_CONTEXT_SWITCH_INTERRUPT
;
572 u32 dmask
= irqs
<< 16 | irqs
;
574 GEM_BUG_ON(INTEL_GEN(gt
->i915
) < 11);
576 /* Handle ctx switch interrupts again */
577 intel_uncore_rmw(uncore
, GEN11_RENDER_COPY_INTR_ENABLE
, 0, dmask
);
578 intel_uncore_rmw(uncore
, GEN11_VCS_VECS_INTR_ENABLE
, 0, dmask
);
581 static void guc_set_default_submission(struct intel_engine_cs
*engine
)
584 * We inherit a bunch of functions from execlists that we'd like
587 * engine->submit_request = execlists_submit_request;
588 * engine->cancel_requests = execlists_cancel_requests;
589 * engine->schedule = execlists_schedule;
591 * But we need to override the actual submission backend in order
592 * to talk to the GuC.
594 intel_execlists_set_default_submission(engine
);
596 engine
->execlists
.tasklet
.func
= guc_submission_tasklet
;
598 /* do not use execlists park/unpark */
599 engine
->park
= engine
->unpark
= NULL
;
601 engine
->reset
.prepare
= guc_reset_prepare
;
602 engine
->reset
.rewind
= guc_reset_rewind
;
603 engine
->reset
.cancel
= guc_reset_cancel
;
604 engine
->reset
.finish
= guc_reset_finish
;
606 engine
->flags
&= ~I915_ENGINE_SUPPORTS_STATS
;
607 engine
->flags
|= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET
;
610 * For the breadcrumb irq to work we need the interrupts to stay
611 * enabled. However, on all platforms on which we'll have support for
612 * GuC submission we don't allow disabling the interrupts at runtime, so
613 * we're always safe with the current flow.
615 GEM_BUG_ON(engine
->irq_enable
|| engine
->irq_disable
);
618 void intel_guc_submission_enable(struct intel_guc
*guc
)
620 struct intel_gt
*gt
= guc_to_gt(guc
);
621 struct intel_engine_cs
*engine
;
622 enum intel_engine_id id
;
625 * We're using GuC work items for submitting work through GuC. Since
626 * we're coalescing multiple requests from a single context into a
627 * single work item prior to assigning it to execlist_port, we can
628 * never have more work items than the total number of ports (for all
629 * engines). The GuC firmware is controlling the HEAD of work queue,
630 * and it is guaranteed that it will remove the work item from the
631 * queue before our request is completed.
633 BUILD_BUG_ON(ARRAY_SIZE(engine
->execlists
.inflight
) *
634 sizeof(struct guc_wq_item
) *
635 I915_NUM_ENGINES
> GUC_WQ_SIZE
);
637 guc_proc_desc_init(guc
);
638 guc_stage_desc_init(guc
);
640 /* Take over from manual control of ELSP (execlists) */
641 guc_interrupts_capture(gt
);
643 for_each_engine(engine
, gt
, id
) {
644 engine
->set_default_submission
= guc_set_default_submission
;
645 engine
->set_default_submission(engine
);
649 void intel_guc_submission_disable(struct intel_guc
*guc
)
651 struct intel_gt
*gt
= guc_to_gt(guc
);
653 GEM_BUG_ON(gt
->awake
); /* GT should be parked first */
655 /* Note: By the time we're here, GuC may have already been reset */
657 guc_interrupts_release(gt
);
659 guc_stage_desc_fini(guc
);
660 guc_proc_desc_fini(guc
);
663 static bool __guc_submission_support(struct intel_guc
*guc
)
665 /* XXX: GuC submission is unavailable for now */
668 if (!intel_guc_is_supported(guc
))
671 return i915_modparams
.enable_guc
& ENABLE_GUC_SUBMISSION
;
674 void intel_guc_submission_init_early(struct intel_guc
*guc
)
676 guc
->submission_supported
= __guc_submission_support(guc
);
679 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs
*engine
)
681 return engine
->set_default_submission
== guc_set_default_submission
;