2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/circ_buf.h>
26 #include <trace/events/dma_fence.h>
28 #include "intel_guc_submission.h"
29 #include "intel_lrc_reg.h"
32 #define GUC_PREEMPT_FINISHED 0x1
33 #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
34 #define GUC_PREEMPT_BREADCRUMB_BYTES \
35 (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
38 * DOC: GuC-based command submission
41 * A intel_guc_client refers to a submission path through GuC. Currently, there
42 * are two clients. One of them (the execbuf_client) is charged with all
43 * submissions to the GuC, the other one (preempt_client) is responsible for
44 * preempting the execbuf_client. This struct is the owner of a doorbell, a
45 * process descriptor and a workqueue (all of them inside a single gem object
46 * that contains all required pages for these elements).
48 * GuC stage descriptor:
49 * During initialization, the driver allocates a static pool of 1024 such
50 * descriptors, and shares them with the GuC.
51 * Currently, there exists a 1:1 mapping between a intel_guc_client and a
52 * guc_stage_desc (via the client's stage_id), so effectively only one
53 * gets used. This stage descriptor lets the GuC know about the doorbell,
54 * workqueue and process descriptor. Theoretically, it also lets the GuC
55 * know about our HW contexts (context ID, etc...), but we actually
56 * employ a kind of submission where the GuC uses the LRCA sent via the work
57 * item instead (the single guc_stage_desc associated to execbuf client
58 * contains information about the default kernel context only, but this is
59 * essentially unused). This is called a "proxy" submission.
61 * The Scratch registers:
62 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
63 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
64 * triggers an interrupt on the GuC via another register write (0xC4C8).
65 * Firmware writes a success/fail code back to the action register after
66 * processes the request. The kernel driver polls waiting for this update and
68 * See intel_guc_send()
71 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
72 * mapped into process space.
75 * There are several types of work items that the host may place into a
76 * workqueue, each with its own requirements and limitations. Currently only
77 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
78 * represents in-order queue. The kernel driver packs ring tail pointer and an
79 * ELSP context descriptor dword into Work Item.
80 * See guc_add_request()
84 static inline struct i915_priolist
*to_priolist(struct rb_node
*rb
)
86 return rb_entry(rb
, struct i915_priolist
, node
);
89 static inline bool is_high_priority(struct intel_guc_client
*client
)
91 return (client
->priority
== GUC_CLIENT_PRIORITY_KMD_HIGH
||
92 client
->priority
== GUC_CLIENT_PRIORITY_HIGH
);
95 static int reserve_doorbell(struct intel_guc_client
*client
)
101 GEM_BUG_ON(client
->doorbell_id
!= GUC_DOORBELL_INVALID
);
104 * The bitmap tracks which doorbell registers are currently in use.
105 * It is split into two halves; the first half is used for normal
106 * priority contexts, the second half for high-priority ones.
109 end
= GUC_NUM_DOORBELLS
/ 2;
110 if (is_high_priority(client
)) {
115 id
= find_next_zero_bit(client
->guc
->doorbell_bitmap
, end
, offset
);
119 __set_bit(id
, client
->guc
->doorbell_bitmap
);
120 client
->doorbell_id
= id
;
121 DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
122 client
->stage_id
, yesno(is_high_priority(client
)),
127 static bool has_doorbell(struct intel_guc_client
*client
)
129 if (client
->doorbell_id
== GUC_DOORBELL_INVALID
)
132 return test_bit(client
->doorbell_id
, client
->guc
->doorbell_bitmap
);
135 static void unreserve_doorbell(struct intel_guc_client
*client
)
137 GEM_BUG_ON(!has_doorbell(client
));
139 __clear_bit(client
->doorbell_id
, client
->guc
->doorbell_bitmap
);
140 client
->doorbell_id
= GUC_DOORBELL_INVALID
;
144 * Tell the GuC to allocate or deallocate a specific doorbell
147 static int __guc_allocate_doorbell(struct intel_guc
*guc
, u32 stage_id
)
150 INTEL_GUC_ACTION_ALLOCATE_DOORBELL
,
154 return intel_guc_send(guc
, action
, ARRAY_SIZE(action
));
157 static int __guc_deallocate_doorbell(struct intel_guc
*guc
, u32 stage_id
)
160 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL
,
164 return intel_guc_send(guc
, action
, ARRAY_SIZE(action
));
167 static struct guc_stage_desc
*__get_stage_desc(struct intel_guc_client
*client
)
169 struct guc_stage_desc
*base
= client
->guc
->stage_desc_pool_vaddr
;
171 return &base
[client
->stage_id
];
175 * Initialise, update, or clear doorbell data shared with the GuC
177 * These functions modify shared data and so need access to the mapped
178 * client object which contains the page being used for the doorbell
181 static void __update_doorbell_desc(struct intel_guc_client
*client
, u16 new_id
)
183 struct guc_stage_desc
*desc
;
185 /* Update the GuC's idea of the doorbell ID */
186 desc
= __get_stage_desc(client
);
187 desc
->db_id
= new_id
;
190 static struct guc_doorbell_info
*__get_doorbell(struct intel_guc_client
*client
)
192 return client
->vaddr
+ client
->doorbell_offset
;
195 static void __create_doorbell(struct intel_guc_client
*client
)
197 struct guc_doorbell_info
*doorbell
;
199 doorbell
= __get_doorbell(client
);
200 doorbell
->db_status
= GUC_DOORBELL_ENABLED
;
201 doorbell
->cookie
= 0;
204 static void __destroy_doorbell(struct intel_guc_client
*client
)
206 struct drm_i915_private
*dev_priv
= guc_to_i915(client
->guc
);
207 struct guc_doorbell_info
*doorbell
;
208 u16 db_id
= client
->doorbell_id
;
210 doorbell
= __get_doorbell(client
);
211 doorbell
->db_status
= GUC_DOORBELL_DISABLED
;
212 doorbell
->cookie
= 0;
214 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
215 * to go to zero after updating db_status before we call the GuC to
216 * release the doorbell
218 if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id
)) & GEN8_DRB_VALID
), 10))
219 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
222 static int create_doorbell(struct intel_guc_client
*client
)
226 if (WARN_ON(!has_doorbell(client
)))
227 return -ENODEV
; /* internal setup error, should never happen */
229 __update_doorbell_desc(client
, client
->doorbell_id
);
230 __create_doorbell(client
);
232 ret
= __guc_allocate_doorbell(client
->guc
, client
->stage_id
);
234 __destroy_doorbell(client
);
235 __update_doorbell_desc(client
, GUC_DOORBELL_INVALID
);
236 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
237 client
->stage_id
, ret
);
244 static int destroy_doorbell(struct intel_guc_client
*client
)
248 GEM_BUG_ON(!has_doorbell(client
));
250 __destroy_doorbell(client
);
251 ret
= __guc_deallocate_doorbell(client
->guc
, client
->stage_id
);
253 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
254 client
->stage_id
, ret
);
256 __update_doorbell_desc(client
, GUC_DOORBELL_INVALID
);
261 static unsigned long __select_cacheline(struct intel_guc
*guc
)
263 unsigned long offset
;
265 /* Doorbell uses a single cache line within a page */
266 offset
= offset_in_page(guc
->db_cacheline
);
268 /* Moving to next cache line to reduce contention */
269 guc
->db_cacheline
+= cache_line_size();
271 DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
272 offset
, guc
->db_cacheline
, cache_line_size());
276 static inline struct guc_process_desc
*
277 __get_process_desc(struct intel_guc_client
*client
)
279 return client
->vaddr
+ client
->proc_desc_offset
;
283 * Initialise the process descriptor shared with the GuC firmware.
285 static void guc_proc_desc_init(struct intel_guc
*guc
,
286 struct intel_guc_client
*client
)
288 struct guc_process_desc
*desc
;
290 desc
= memset(__get_process_desc(client
), 0, sizeof(*desc
));
293 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
294 * space for ring3 clients (set them as in mmap_ioctl) or kernel
295 * space for kernel clients (map on demand instead? May make debug
296 * easier to have it mapped).
298 desc
->wq_base_addr
= 0;
299 desc
->db_base_addr
= 0;
301 desc
->stage_id
= client
->stage_id
;
302 desc
->wq_size_bytes
= GUC_WQ_SIZE
;
303 desc
->wq_status
= WQ_STATUS_ACTIVE
;
304 desc
->priority
= client
->priority
;
307 static int guc_stage_desc_pool_create(struct intel_guc
*guc
)
309 struct i915_vma
*vma
;
312 vma
= intel_guc_allocate_vma(guc
,
313 PAGE_ALIGN(sizeof(struct guc_stage_desc
) *
314 GUC_MAX_STAGE_DESCRIPTORS
));
318 vaddr
= i915_gem_object_pin_map(vma
->obj
, I915_MAP_WB
);
320 i915_vma_unpin_and_release(&vma
);
321 return PTR_ERR(vaddr
);
324 guc
->stage_desc_pool
= vma
;
325 guc
->stage_desc_pool_vaddr
= vaddr
;
326 ida_init(&guc
->stage_ids
);
331 static void guc_stage_desc_pool_destroy(struct intel_guc
*guc
)
333 ida_destroy(&guc
->stage_ids
);
334 i915_gem_object_unpin_map(guc
->stage_desc_pool
->obj
);
335 i915_vma_unpin_and_release(&guc
->stage_desc_pool
);
339 * Initialise/clear the stage descriptor shared with the GuC firmware.
341 * This descriptor tells the GuC where (in GGTT space) to find the important
342 * data structures relating to this client (doorbell, process descriptor,
345 static void guc_stage_desc_init(struct intel_guc
*guc
,
346 struct intel_guc_client
*client
)
348 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
349 struct intel_engine_cs
*engine
;
350 struct i915_gem_context
*ctx
= client
->owner
;
351 struct guc_stage_desc
*desc
;
355 desc
= __get_stage_desc(client
);
356 memset(desc
, 0, sizeof(*desc
));
358 desc
->attribute
= GUC_STAGE_DESC_ATTR_ACTIVE
|
359 GUC_STAGE_DESC_ATTR_KERNEL
;
360 if (is_high_priority(client
))
361 desc
->attribute
|= GUC_STAGE_DESC_ATTR_PREEMPT
;
362 desc
->stage_id
= client
->stage_id
;
363 desc
->priority
= client
->priority
;
364 desc
->db_id
= client
->doorbell_id
;
366 for_each_engine_masked(engine
, dev_priv
, client
->engines
, tmp
) {
367 struct intel_context
*ce
= to_intel_context(ctx
, engine
);
368 u32 guc_engine_id
= engine
->guc_id
;
369 struct guc_execlist_context
*lrc
= &desc
->lrc
[guc_engine_id
];
371 /* TODO: We have a design issue to be solved here. Only when we
372 * receive the first batch, we know which engine is used by the
373 * user. But here GuC expects the lrc and ring to be pinned. It
374 * is not an issue for default context, which is the only one
375 * for now who owns a GuC client. But for future owner of GuC
376 * client, need to make sure lrc is pinned prior to enter here.
379 break; /* XXX: continue? */
382 * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
383 * submission or, in other words, not using a direct submission
384 * model) the KMD's LRCA is not used for any work submission.
385 * Instead, the GuC uses the LRCA of the user mode context (see
386 * guc_add_request below).
388 lrc
->context_desc
= lower_32_bits(ce
->lrc_desc
);
390 /* The state page is after PPHWSP */
391 lrc
->ring_lrca
= intel_guc_ggtt_offset(guc
, ce
->state
) +
392 LRC_STATE_PN
* PAGE_SIZE
;
394 /* XXX: In direct submission, the GuC wants the HW context id
395 * here. In proxy submission, it wants the stage id
397 lrc
->context_id
= (client
->stage_id
<< GUC_ELC_CTXID_OFFSET
) |
398 (guc_engine_id
<< GUC_ELC_ENGINE_OFFSET
);
400 lrc
->ring_begin
= intel_guc_ggtt_offset(guc
, ce
->ring
->vma
);
401 lrc
->ring_end
= lrc
->ring_begin
+ ce
->ring
->size
- 1;
402 lrc
->ring_next_free_location
= lrc
->ring_begin
;
403 lrc
->ring_current_tail_pointer_value
= 0;
405 desc
->engines_used
|= (1 << guc_engine_id
);
408 DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
409 client
->engines
, desc
->engines_used
);
410 WARN_ON(desc
->engines_used
== 0);
413 * The doorbell, process descriptor, and workqueue are all parts
414 * of the client object, which the GuC will reference via the GGTT
416 gfx_addr
= intel_guc_ggtt_offset(guc
, client
->vma
);
417 desc
->db_trigger_phy
= sg_dma_address(client
->vma
->pages
->sgl
) +
418 client
->doorbell_offset
;
419 desc
->db_trigger_cpu
= ptr_to_u64(__get_doorbell(client
));
420 desc
->db_trigger_uk
= gfx_addr
+ client
->doorbell_offset
;
421 desc
->process_desc
= gfx_addr
+ client
->proc_desc_offset
;
422 desc
->wq_addr
= gfx_addr
+ GUC_DB_SIZE
;
423 desc
->wq_size
= GUC_WQ_SIZE
;
425 desc
->desc_private
= ptr_to_u64(client
);
428 static void guc_stage_desc_fini(struct intel_guc
*guc
,
429 struct intel_guc_client
*client
)
431 struct guc_stage_desc
*desc
;
433 desc
= __get_stage_desc(client
);
434 memset(desc
, 0, sizeof(*desc
));
437 /* Construct a Work Item and append it to the GuC's Work Queue */
438 static void guc_wq_item_append(struct intel_guc_client
*client
,
439 u32 target_engine
, u32 context_desc
,
440 u32 ring_tail
, u32 fence_id
)
442 /* wqi_len is in DWords, and does not include the one-word header */
443 const size_t wqi_size
= sizeof(struct guc_wq_item
);
444 const u32 wqi_len
= wqi_size
/ sizeof(u32
) - 1;
445 struct guc_process_desc
*desc
= __get_process_desc(client
);
446 struct guc_wq_item
*wqi
;
449 lockdep_assert_held(&client
->wq_lock
);
451 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
452 * should not have the case where structure wqi is across page, neither
453 * wrapped to the beginning. This simplifies the implementation below.
455 * XXX: if not the case, we need save data to a temp wqi and copy it to
456 * workqueue buffer dw by dw.
458 BUILD_BUG_ON(wqi_size
!= 16);
460 /* Free space is guaranteed. */
461 wq_off
= READ_ONCE(desc
->tail
);
462 GEM_BUG_ON(CIRC_SPACE(wq_off
, READ_ONCE(desc
->head
),
463 GUC_WQ_SIZE
) < wqi_size
);
464 GEM_BUG_ON(wq_off
& (wqi_size
- 1));
466 /* WQ starts from the page after doorbell / process_desc */
467 wqi
= client
->vaddr
+ wq_off
+ GUC_DB_SIZE
;
469 /* Now fill in the 4-word work queue item */
470 wqi
->header
= WQ_TYPE_INORDER
|
471 (wqi_len
<< WQ_LEN_SHIFT
) |
472 (target_engine
<< WQ_TARGET_SHIFT
) |
474 wqi
->context_desc
= context_desc
;
475 wqi
->submit_element_info
= ring_tail
<< WQ_RING_TAIL_SHIFT
;
476 GEM_BUG_ON(ring_tail
> WQ_RING_TAIL_MAX
);
477 wqi
->fence_id
= fence_id
;
479 /* Make the update visible to GuC */
480 WRITE_ONCE(desc
->tail
, (wq_off
+ wqi_size
) & (GUC_WQ_SIZE
- 1));
483 static void guc_reset_wq(struct intel_guc_client
*client
)
485 struct guc_process_desc
*desc
= __get_process_desc(client
);
491 static void guc_ring_doorbell(struct intel_guc_client
*client
)
493 struct guc_doorbell_info
*db
;
496 lockdep_assert_held(&client
->wq_lock
);
498 /* pointer of current doorbell cacheline */
499 db
= __get_doorbell(client
);
502 * We're not expecting the doorbell cookie to change behind our back,
503 * we also need to treat 0 as a reserved value.
505 cookie
= READ_ONCE(db
->cookie
);
506 WARN_ON_ONCE(xchg(&db
->cookie
, cookie
+ 1 ?: cookie
+ 2) != cookie
);
508 /* XXX: doorbell was lost and need to acquire it again */
509 GEM_BUG_ON(db
->db_status
!= GUC_DOORBELL_ENABLED
);
512 static void guc_add_request(struct intel_guc
*guc
, struct i915_request
*rq
)
514 struct intel_guc_client
*client
= guc
->execbuf_client
;
515 struct intel_engine_cs
*engine
= rq
->engine
;
516 u32 ctx_desc
= lower_32_bits(rq
->hw_context
->lrc_desc
);
517 u32 ring_tail
= intel_ring_set_tail(rq
->ring
, rq
->tail
) / sizeof(u64
);
519 spin_lock(&client
->wq_lock
);
521 guc_wq_item_append(client
, engine
->guc_id
, ctx_desc
,
522 ring_tail
, rq
->global_seqno
);
523 guc_ring_doorbell(client
);
525 client
->submissions
[engine
->id
] += 1;
527 spin_unlock(&client
->wq_lock
);
531 * When we're doing submissions using regular execlists backend, writing to
532 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
533 * pinned in mappable aperture portion of GGTT are visible to command streamer.
534 * Writes done by GuC on our behalf are not guaranteeing such ordering,
535 * therefore, to ensure the flush, we're issuing a POSTING READ.
537 static void flush_ggtt_writes(struct i915_vma
*vma
)
539 struct drm_i915_private
*dev_priv
= vma
->vm
->i915
;
541 if (i915_vma_is_map_and_fenceable(vma
))
542 POSTING_READ_FW(GUC_STATUS
);
545 static void inject_preempt_context(struct work_struct
*work
)
547 struct guc_preempt_work
*preempt_work
=
548 container_of(work
, typeof(*preempt_work
), work
);
549 struct intel_engine_cs
*engine
= preempt_work
->engine
;
550 struct intel_guc
*guc
= container_of(preempt_work
, typeof(*guc
),
551 preempt_work
[engine
->id
]);
552 struct intel_guc_client
*client
= guc
->preempt_client
;
553 struct guc_stage_desc
*stage_desc
= __get_stage_desc(client
);
554 u32 ctx_desc
= lower_32_bits(to_intel_context(client
->owner
,
559 * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
560 * See guc_fill_preempt_context().
562 spin_lock_irq(&client
->wq_lock
);
563 guc_wq_item_append(client
, engine
->guc_id
, ctx_desc
,
564 GUC_PREEMPT_BREADCRUMB_BYTES
/ sizeof(u64
), 0);
565 spin_unlock_irq(&client
->wq_lock
);
568 * If GuC firmware performs an engine reset while that engine had
569 * a preemption pending, it will set the terminated attribute bit
570 * on our preemption stage descriptor. GuC firmware retains all
571 * pending work items for a high-priority GuC client, unlike the
572 * normal-priority GuC client where work items are dropped. It
573 * wants to make sure the preempt-to-idle work doesn't run when
574 * scheduling resumes, and uses this bit to inform its scheduler
575 * and presumably us as well. Our job is to clear it for the next
576 * preemption after reset, otherwise that and future preemptions
577 * will never complete. We'll just clear it every time.
579 stage_desc
->attribute
&= ~GUC_STAGE_DESC_ATTR_TERMINATED
;
581 data
[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION
;
582 data
[1] = client
->stage_id
;
583 data
[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q
|
584 INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q
;
585 data
[3] = engine
->guc_id
;
586 data
[4] = guc
->execbuf_client
->priority
;
587 data
[5] = guc
->execbuf_client
->stage_id
;
588 data
[6] = intel_guc_ggtt_offset(guc
, guc
->shared_data
);
590 if (WARN_ON(intel_guc_send(guc
, data
, ARRAY_SIZE(data
)))) {
591 execlists_clear_active(&engine
->execlists
,
592 EXECLISTS_ACTIVE_PREEMPT
);
593 tasklet_schedule(&engine
->execlists
.tasklet
);
598 * We're using user interrupt and HWSP value to mark that preemption has
599 * finished and GPU is idle. Normally, we could unwind and continue similar to
600 * execlists submission path. Unfortunately, with GuC we also need to wait for
601 * it to finish its own postprocessing, before attempting to submit. Otherwise
602 * GuC may silently ignore our submissions, and thus we risk losing request at
603 * best, executing out-of-order and causing kernel panic at worst.
605 #define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
606 static void wait_for_guc_preempt_report(struct intel_engine_cs
*engine
)
608 struct intel_guc
*guc
= &engine
->i915
->guc
;
609 struct guc_shared_ctx_data
*data
= guc
->shared_data_vaddr
;
610 struct guc_ctx_report
*report
=
611 &data
->preempt_ctx_report
[engine
->guc_id
];
613 WARN_ON(wait_for_atomic(report
->report_return_status
==
614 INTEL_GUC_REPORT_STATUS_COMPLETE
,
615 GUC_PREEMPT_POSTPROCESS_DELAY_MS
));
617 * GuC is expecting that we're also going to clear the affected context
618 * counter, let's also reset the return status to not depend on GuC
619 * resetting it after recieving another preempt action
621 report
->affected_count
= 0;
622 report
->report_return_status
= INTEL_GUC_REPORT_STATUS_UNKNOWN
;
625 static void complete_preempt_context(struct intel_engine_cs
*engine
)
627 struct intel_engine_execlists
*execlists
= &engine
->execlists
;
629 GEM_BUG_ON(!execlists_is_active(execlists
, EXECLISTS_ACTIVE_PREEMPT
));
631 if (inject_preempt_hang(execlists
))
634 execlists_cancel_port_requests(execlists
);
635 execlists_unwind_incomplete_requests(execlists
);
637 wait_for_guc_preempt_report(engine
);
638 intel_write_status_page(engine
, I915_GEM_HWS_PREEMPT_INDEX
, 0);
642 * guc_submit() - Submit commands through GuC
643 * @engine: engine associated with the commands
645 * The only error here arises if the doorbell hardware isn't functioning
646 * as expected, which really shouln't happen.
648 static void guc_submit(struct intel_engine_cs
*engine
)
650 struct intel_guc
*guc
= &engine
->i915
->guc
;
651 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
652 struct execlist_port
*port
= execlists
->port
;
655 for (n
= 0; n
< execlists_num_ports(execlists
); n
++) {
656 struct i915_request
*rq
;
659 rq
= port_unpack(&port
[n
], &count
);
660 if (rq
&& count
== 0) {
661 port_set(&port
[n
], port_pack(rq
, ++count
));
663 flush_ggtt_writes(rq
->ring
->vma
);
665 guc_add_request(guc
, rq
);
670 static void port_assign(struct execlist_port
*port
, struct i915_request
*rq
)
672 GEM_BUG_ON(port_isset(port
));
674 port_set(port
, i915_request_get(rq
));
677 static inline int rq_prio(const struct i915_request
*rq
)
679 return rq
->sched
.attr
.priority
;
682 static inline int port_prio(const struct execlist_port
*port
)
684 return rq_prio(port_request(port
));
687 static bool __guc_dequeue(struct intel_engine_cs
*engine
)
689 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
690 struct execlist_port
*port
= execlists
->port
;
691 struct i915_request
*last
= NULL
;
692 const struct execlist_port
* const last_port
=
693 &execlists
->port
[execlists
->port_mask
];
697 lockdep_assert_held(&engine
->timeline
.lock
);
699 if (port_isset(port
)) {
700 if (intel_engine_has_preemption(engine
)) {
701 struct guc_preempt_work
*preempt_work
=
702 &engine
->i915
->guc
.preempt_work
[engine
->id
];
703 int prio
= execlists
->queue_priority
;
705 if (__execlists_need_preempt(prio
, port_prio(port
))) {
706 execlists_set_active(execlists
,
707 EXECLISTS_ACTIVE_PREEMPT
);
708 queue_work(engine
->i915
->guc
.preempt_wq
,
709 &preempt_work
->work
);
715 if (port_isset(port
))
718 GEM_BUG_ON(port_isset(port
));
720 while ((rb
= rb_first_cached(&execlists
->queue
))) {
721 struct i915_priolist
*p
= to_priolist(rb
);
722 struct i915_request
*rq
, *rn
;
724 list_for_each_entry_safe(rq
, rn
, &p
->requests
, sched
.link
) {
725 if (last
&& rq
->hw_context
!= last
->hw_context
) {
726 if (port
== last_port
) {
727 __list_del_many(&p
->requests
,
733 port_assign(port
, last
);
737 INIT_LIST_HEAD(&rq
->sched
.link
);
739 __i915_request_submit(rq
);
740 trace_i915_request_in(rq
, port_index(port
, execlists
));
745 rb_erase_cached(&p
->node
, &execlists
->queue
);
746 INIT_LIST_HEAD(&p
->requests
);
747 if (p
->priority
!= I915_PRIORITY_NORMAL
)
748 kmem_cache_free(engine
->i915
->priorities
, p
);
751 execlists
->queue_priority
= rb
? to_priolist(rb
)->priority
: INT_MIN
;
753 port_assign(port
, last
);
755 execlists_user_begin(execlists
, execlists
->port
);
757 /* We must always keep the beast fed if we have work piled up */
758 GEM_BUG_ON(port_isset(execlists
->port
) &&
759 !execlists_is_active(execlists
, EXECLISTS_ACTIVE_USER
));
760 GEM_BUG_ON(rb_first_cached(&execlists
->queue
) &&
761 !port_isset(execlists
->port
));
766 static void guc_dequeue(struct intel_engine_cs
*engine
)
771 local_irq_save(flags
);
773 spin_lock(&engine
->timeline
.lock
);
774 submit
= __guc_dequeue(engine
);
775 spin_unlock(&engine
->timeline
.lock
);
780 local_irq_restore(flags
);
783 static void guc_submission_tasklet(unsigned long data
)
785 struct intel_engine_cs
* const engine
= (struct intel_engine_cs
*)data
;
786 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
787 struct execlist_port
*port
= execlists
->port
;
788 struct i915_request
*rq
;
790 rq
= port_request(port
);
791 while (rq
&& i915_request_completed(rq
)) {
792 trace_i915_request_out(rq
);
793 i915_request_put(rq
);
795 port
= execlists_port_complete(execlists
, port
);
796 if (port_isset(port
)) {
797 execlists_user_begin(execlists
, port
);
798 rq
= port_request(port
);
800 execlists_user_end(execlists
);
805 if (execlists_is_active(execlists
, EXECLISTS_ACTIVE_PREEMPT
) &&
806 intel_read_status_page(engine
, I915_GEM_HWS_PREEMPT_INDEX
) ==
807 GUC_PREEMPT_FINISHED
)
808 complete_preempt_context(engine
);
810 if (!execlists_is_active(execlists
, EXECLISTS_ACTIVE_PREEMPT
))
814 static struct i915_request
*
815 guc_reset_prepare(struct intel_engine_cs
*engine
)
817 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
819 GEM_TRACE("%s\n", engine
->name
);
822 * Prevent request submission to the hardware until we have
823 * completed the reset in i915_gem_reset_finish(). If a request
824 * is completed by one engine, it may then queue a request
825 * to a second via its execlists->tasklet *just* as we are
826 * calling engine->init_hw() and also writing the ELSP.
827 * Turning off the execlists->tasklet until the reset is over
830 __tasklet_disable_sync_once(&execlists
->tasklet
);
833 * We're using worker to queue preemption requests from the tasklet in
834 * GuC submission mode.
835 * Even though tasklet was disabled, we may still have a worker queued.
836 * Let's make sure that all workers scheduled before disabling the
837 * tasklet are completed before continuing with the reset.
839 if (engine
->i915
->guc
.preempt_wq
)
840 flush_workqueue(engine
->i915
->guc
.preempt_wq
);
842 return i915_gem_find_active_request(engine
);
846 * Everything below here is concerned with setup & teardown, and is
847 * therefore not part of the somewhat time-critical batch-submission
848 * path of guc_submit() above.
851 /* Check that a doorbell register is in the expected state */
852 static bool doorbell_ok(struct intel_guc
*guc
, u16 db_id
)
854 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
858 GEM_BUG_ON(db_id
>= GUC_DOORBELL_INVALID
);
860 drbregl
= I915_READ(GEN8_DRBREGL(db_id
));
861 valid
= drbregl
& GEN8_DRB_VALID
;
863 if (test_bit(db_id
, guc
->doorbell_bitmap
) == valid
)
866 DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
867 db_id
, drbregl
, yesno(valid
));
872 static bool guc_verify_doorbells(struct intel_guc
*guc
)
876 for (db_id
= 0; db_id
< GUC_NUM_DOORBELLS
; ++db_id
)
877 if (!doorbell_ok(guc
, db_id
))
883 static int guc_clients_doorbell_init(struct intel_guc
*guc
)
887 ret
= create_doorbell(guc
->execbuf_client
);
891 if (guc
->preempt_client
) {
892 ret
= create_doorbell(guc
->preempt_client
);
894 destroy_doorbell(guc
->execbuf_client
);
902 static void guc_clients_doorbell_fini(struct intel_guc
*guc
)
905 * By the time we're here, GuC has already been reset.
906 * Instead of trying (in vain) to communicate with it, let's just
907 * cleanup the doorbell HW and our internal state.
909 if (guc
->preempt_client
) {
910 __destroy_doorbell(guc
->preempt_client
);
911 __update_doorbell_desc(guc
->preempt_client
,
912 GUC_DOORBELL_INVALID
);
915 if (guc
->execbuf_client
) {
916 __destroy_doorbell(guc
->execbuf_client
);
917 __update_doorbell_desc(guc
->execbuf_client
,
918 GUC_DOORBELL_INVALID
);
923 * guc_client_alloc() - Allocate an intel_guc_client
924 * @dev_priv: driver private data structure
925 * @engines: The set of engines to enable for this client
926 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
927 * The kernel client to replace ExecList submission is created with
928 * NORMAL priority. Priority of a client for scheduler can be HIGH,
929 * while a preemption context can use CRITICAL.
930 * @ctx: the context that owns the client (we use the default render
933 * Return: An intel_guc_client object if success, else NULL.
935 static struct intel_guc_client
*
936 guc_client_alloc(struct drm_i915_private
*dev_priv
,
939 struct i915_gem_context
*ctx
)
941 struct intel_guc_client
*client
;
942 struct intel_guc
*guc
= &dev_priv
->guc
;
943 struct i915_vma
*vma
;
947 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
949 return ERR_PTR(-ENOMEM
);
953 client
->engines
= engines
;
954 client
->priority
= priority
;
955 client
->doorbell_id
= GUC_DOORBELL_INVALID
;
956 spin_lock_init(&client
->wq_lock
);
958 ret
= ida_simple_get(&guc
->stage_ids
, 0, GUC_MAX_STAGE_DESCRIPTORS
,
963 client
->stage_id
= ret
;
965 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
966 vma
= intel_guc_allocate_vma(guc
, GUC_DB_SIZE
+ GUC_WQ_SIZE
);
972 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
975 vaddr
= i915_gem_object_pin_map(vma
->obj
, I915_MAP_WB
);
977 ret
= PTR_ERR(vaddr
);
980 client
->vaddr
= vaddr
;
982 client
->doorbell_offset
= __select_cacheline(guc
);
985 * Since the doorbell only requires a single cacheline, we can save
986 * space by putting the application process descriptor in the same
987 * page. Use the half of the page that doesn't include the doorbell.
989 if (client
->doorbell_offset
>= (GUC_DB_SIZE
/ 2))
990 client
->proc_desc_offset
= 0;
992 client
->proc_desc_offset
= (GUC_DB_SIZE
/ 2);
994 guc_proc_desc_init(guc
, client
);
995 guc_stage_desc_init(guc
, client
);
997 ret
= reserve_doorbell(client
);
1001 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
1002 priority
, client
, client
->engines
, client
->stage_id
);
1003 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
1004 client
->doorbell_id
, client
->doorbell_offset
);
1009 i915_gem_object_unpin_map(client
->vma
->obj
);
1011 i915_vma_unpin_and_release(&client
->vma
);
1013 ida_simple_remove(&guc
->stage_ids
, client
->stage_id
);
1016 return ERR_PTR(ret
);
1019 static void guc_client_free(struct intel_guc_client
*client
)
1021 unreserve_doorbell(client
);
1022 guc_stage_desc_fini(client
->guc
, client
);
1023 i915_gem_object_unpin_map(client
->vma
->obj
);
1024 i915_vma_unpin_and_release(&client
->vma
);
1025 ida_simple_remove(&client
->guc
->stage_ids
, client
->stage_id
);
1029 static inline bool ctx_save_restore_disabled(struct intel_context
*ce
)
1031 u32 sr
= ce
->lrc_reg_state
[CTX_CONTEXT_CONTROL
+ 1];
1033 #define SR_DISABLED \
1034 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
1035 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
1037 return (sr
& SR_DISABLED
) == SR_DISABLED
;
1042 static void guc_fill_preempt_context(struct intel_guc
*guc
)
1044 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
1045 struct intel_guc_client
*client
= guc
->preempt_client
;
1046 struct intel_engine_cs
*engine
;
1047 enum intel_engine_id id
;
1049 for_each_engine(engine
, dev_priv
, id
) {
1050 struct intel_context
*ce
=
1051 to_intel_context(client
->owner
, engine
);
1052 u32 addr
= intel_hws_preempt_done_address(engine
);
1055 GEM_BUG_ON(!ce
->pin_count
);
1058 * We rely on this context image *not* being saved after
1059 * preemption. This ensures that the RING_HEAD / RING_TAIL
1060 * remain pointing at initial values forever.
1062 GEM_BUG_ON(!ctx_save_restore_disabled(ce
));
1064 cs
= ce
->ring
->vaddr
;
1066 cs
= gen8_emit_ggtt_write_rcs(cs
,
1067 GUC_PREEMPT_FINISHED
,
1070 cs
= gen8_emit_ggtt_write(cs
,
1071 GUC_PREEMPT_FINISHED
,
1076 *cs
++ = MI_USER_INTERRUPT
;
1079 GEM_BUG_ON((void *)cs
- ce
->ring
->vaddr
!=
1080 GUC_PREEMPT_BREADCRUMB_BYTES
);
1082 flush_ggtt_writes(ce
->ring
->vma
);
1086 static int guc_clients_create(struct intel_guc
*guc
)
1088 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
1089 struct intel_guc_client
*client
;
1091 GEM_BUG_ON(guc
->execbuf_client
);
1092 GEM_BUG_ON(guc
->preempt_client
);
1094 client
= guc_client_alloc(dev_priv
,
1095 INTEL_INFO(dev_priv
)->ring_mask
,
1096 GUC_CLIENT_PRIORITY_KMD_NORMAL
,
1097 dev_priv
->kernel_context
);
1098 if (IS_ERR(client
)) {
1099 DRM_ERROR("Failed to create GuC client for submission!\n");
1100 return PTR_ERR(client
);
1102 guc
->execbuf_client
= client
;
1104 if (dev_priv
->preempt_context
) {
1105 client
= guc_client_alloc(dev_priv
,
1106 INTEL_INFO(dev_priv
)->ring_mask
,
1107 GUC_CLIENT_PRIORITY_KMD_HIGH
,
1108 dev_priv
->preempt_context
);
1109 if (IS_ERR(client
)) {
1110 DRM_ERROR("Failed to create GuC client for preemption!\n");
1111 guc_client_free(guc
->execbuf_client
);
1112 guc
->execbuf_client
= NULL
;
1113 return PTR_ERR(client
);
1115 guc
->preempt_client
= client
;
1117 guc_fill_preempt_context(guc
);
1123 static void guc_clients_destroy(struct intel_guc
*guc
)
1125 struct intel_guc_client
*client
;
1127 client
= fetch_and_zero(&guc
->preempt_client
);
1129 guc_client_free(client
);
1131 client
= fetch_and_zero(&guc
->execbuf_client
);
1133 guc_client_free(client
);
1137 * Set up the memory resources to be shared with the GuC (via the GGTT)
1138 * at firmware loading time.
1140 int intel_guc_submission_init(struct intel_guc
*guc
)
1142 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
1143 struct intel_engine_cs
*engine
;
1144 enum intel_engine_id id
;
1147 if (guc
->stage_desc_pool
)
1150 ret
= guc_stage_desc_pool_create(guc
);
1154 * Keep static analysers happy, let them know that we allocated the
1155 * vma after testing that it didn't exist earlier.
1157 GEM_BUG_ON(!guc
->stage_desc_pool
);
1159 WARN_ON(!guc_verify_doorbells(guc
));
1160 ret
= guc_clients_create(guc
);
1164 for_each_engine(engine
, dev_priv
, id
) {
1165 guc
->preempt_work
[id
].engine
= engine
;
1166 INIT_WORK(&guc
->preempt_work
[id
].work
, inject_preempt_context
);
1172 guc_stage_desc_pool_destroy(guc
);
1176 void intel_guc_submission_fini(struct intel_guc
*guc
)
1178 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
1179 struct intel_engine_cs
*engine
;
1180 enum intel_engine_id id
;
1182 for_each_engine(engine
, dev_priv
, id
)
1183 cancel_work_sync(&guc
->preempt_work
[id
].work
);
1185 guc_clients_destroy(guc
);
1186 WARN_ON(!guc_verify_doorbells(guc
));
1188 if (guc
->stage_desc_pool
)
1189 guc_stage_desc_pool_destroy(guc
);
1192 static void guc_interrupts_capture(struct drm_i915_private
*dev_priv
)
1194 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
1195 struct intel_engine_cs
*engine
;
1196 enum intel_engine_id id
;
1199 /* tell all command streamers to forward interrupts (but not vblank)
1202 irqs
= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING
);
1203 for_each_engine(engine
, dev_priv
, id
)
1204 I915_WRITE(RING_MODE_GEN7(engine
), irqs
);
1206 /* route USER_INTERRUPT to Host, all others are sent to GuC. */
1207 irqs
= GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
1208 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
;
1209 /* These three registers have the same bit definitions */
1210 I915_WRITE(GUC_BCS_RCS_IER
, ~irqs
);
1211 I915_WRITE(GUC_VCS2_VCS1_IER
, ~irqs
);
1212 I915_WRITE(GUC_WD_VECS_IER
, ~irqs
);
1215 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
1216 * (unmasked) PM interrupts to the GuC. All other bits of this
1217 * register *disable* generation of a specific interrupt.
1219 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
1220 * writing to the PM interrupt mask register, i.e. interrupts
1221 * that must not be disabled.
1223 * If the GuC is handling these interrupts, then we must not let
1224 * the PM code disable ANY interrupt that the GuC is expecting.
1225 * So for each ENABLED (0) bit in this register, we must SET the
1226 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
1227 * GuC needs ARAT expired interrupt unmasked hence it is set in
1230 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1231 * result in the register bit being left SET!
1233 rps
->pm_intrmsk_mbz
|= ARAT_EXPIRED_INTRMSK
;
1234 rps
->pm_intrmsk_mbz
&= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC
;
1237 static void guc_interrupts_release(struct drm_i915_private
*dev_priv
)
1239 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
1240 struct intel_engine_cs
*engine
;
1241 enum intel_engine_id id
;
1245 * tell all command streamers NOT to forward interrupts or vblank
1248 irqs
= _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK
, GFX_FORWARD_VBLANK_NEVER
);
1249 irqs
|= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING
);
1250 for_each_engine(engine
, dev_priv
, id
)
1251 I915_WRITE(RING_MODE_GEN7(engine
), irqs
);
1253 /* route all GT interrupts to the host */
1254 I915_WRITE(GUC_BCS_RCS_IER
, 0);
1255 I915_WRITE(GUC_VCS2_VCS1_IER
, 0);
1256 I915_WRITE(GUC_WD_VECS_IER
, 0);
1258 rps
->pm_intrmsk_mbz
|= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC
;
1259 rps
->pm_intrmsk_mbz
&= ~ARAT_EXPIRED_INTRMSK
;
1262 static void guc_submission_park(struct intel_engine_cs
*engine
)
1264 intel_engine_unpin_breadcrumbs_irq(engine
);
1267 static void guc_submission_unpark(struct intel_engine_cs
*engine
)
1269 intel_engine_pin_breadcrumbs_irq(engine
);
1272 static void guc_set_default_submission(struct intel_engine_cs
*engine
)
1275 * We inherit a bunch of functions from execlists that we'd like
1278 * engine->submit_request = execlists_submit_request;
1279 * engine->cancel_requests = execlists_cancel_requests;
1280 * engine->schedule = execlists_schedule;
1282 * But we need to override the actual submission backend in order
1283 * to talk to the GuC.
1285 intel_execlists_set_default_submission(engine
);
1287 engine
->execlists
.tasklet
.func
= guc_submission_tasklet
;
1289 engine
->park
= guc_submission_park
;
1290 engine
->unpark
= guc_submission_unpark
;
1292 engine
->reset
.prepare
= guc_reset_prepare
;
1294 engine
->flags
&= ~I915_ENGINE_SUPPORTS_STATS
;
1297 int intel_guc_submission_enable(struct intel_guc
*guc
)
1299 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
1300 struct intel_engine_cs
*engine
;
1301 enum intel_engine_id id
;
1305 * We're using GuC work items for submitting work through GuC. Since
1306 * we're coalescing multiple requests from a single context into a
1307 * single work item prior to assigning it to execlist_port, we can
1308 * never have more work items than the total number of ports (for all
1309 * engines). The GuC firmware is controlling the HEAD of work queue,
1310 * and it is guaranteed that it will remove the work item from the
1311 * queue before our request is completed.
1313 BUILD_BUG_ON(ARRAY_SIZE(engine
->execlists
.port
) *
1314 sizeof(struct guc_wq_item
) *
1315 I915_NUM_ENGINES
> GUC_WQ_SIZE
);
1317 GEM_BUG_ON(!guc
->execbuf_client
);
1319 guc_reset_wq(guc
->execbuf_client
);
1320 if (guc
->preempt_client
)
1321 guc_reset_wq(guc
->preempt_client
);
1323 err
= intel_guc_sample_forcewake(guc
);
1327 err
= guc_clients_doorbell_init(guc
);
1331 /* Take over from manual control of ELSP (execlists) */
1332 guc_interrupts_capture(dev_priv
);
1334 for_each_engine(engine
, dev_priv
, id
) {
1335 engine
->set_default_submission
= guc_set_default_submission
;
1336 engine
->set_default_submission(engine
);
1342 void intel_guc_submission_disable(struct intel_guc
*guc
)
1344 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
1346 GEM_BUG_ON(dev_priv
->gt
.awake
); /* GT should be parked first */
1348 guc_interrupts_release(dev_priv
);
1349 guc_clients_doorbell_fini(guc
);
1352 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1353 #include "selftests/intel_guc.c"