1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
6 #include "gt/intel_gt.h"
7 #include "gt/intel_reset.h"
9 #include "intel_guc_ads.h"
10 #include "intel_guc_submission.h"
15 static const struct intel_uc_ops uc_ops_off
;
16 static const struct intel_uc_ops uc_ops_on
;
18 /* Reset GuC providing us with fresh state for both GuC and HuC.
20 static int __intel_uc_reset_hw(struct intel_uc
*uc
)
22 struct intel_gt
*gt
= uc_to_gt(uc
);
26 ret
= i915_inject_probe_error(gt
->i915
, -ENXIO
);
30 ret
= intel_reset_guc(gt
);
32 DRM_ERROR("Failed to reset GuC, ret = %d\n", ret
);
36 guc_status
= intel_uncore_read(gt
->uncore
, GUC_STATUS
);
37 WARN(!(guc_status
& GS_MIA_IN_RESET
),
38 "GuC status: 0x%x, MIA core expected to be in reset\n",
44 static void __confirm_options(struct intel_uc
*uc
)
46 struct drm_i915_private
*i915
= uc_to_gt(uc
)->i915
;
48 DRM_DEV_DEBUG_DRIVER(i915
->drm
.dev
,
49 "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
50 i915_modparams
.enable_guc
,
51 yesno(intel_uc_uses_guc(uc
)),
52 yesno(intel_uc_uses_guc_submission(uc
)),
53 yesno(intel_uc_uses_huc(uc
)));
55 if (i915_modparams
.enable_guc
== -1)
58 if (i915_modparams
.enable_guc
== 0) {
59 GEM_BUG_ON(intel_uc_uses_guc(uc
));
60 GEM_BUG_ON(intel_uc_uses_guc_submission(uc
));
61 GEM_BUG_ON(intel_uc_uses_huc(uc
));
65 if (!intel_uc_supports_guc(uc
))
66 dev_info(i915
->drm
.dev
,
67 "Incompatible option enable_guc=%d - %s\n",
68 i915_modparams
.enable_guc
, "GuC is not supported!");
70 if (i915_modparams
.enable_guc
& ENABLE_GUC_LOAD_HUC
&&
71 !intel_uc_supports_huc(uc
))
72 dev_info(i915
->drm
.dev
,
73 "Incompatible option enable_guc=%d - %s\n",
74 i915_modparams
.enable_guc
, "HuC is not supported!");
76 if (i915_modparams
.enable_guc
& ENABLE_GUC_SUBMISSION
&&
77 !intel_uc_supports_guc_submission(uc
))
78 dev_info(i915
->drm
.dev
,
79 "Incompatible option enable_guc=%d - %s\n",
80 i915_modparams
.enable_guc
, "GuC submission is N/A");
82 if (i915_modparams
.enable_guc
& ~(ENABLE_GUC_SUBMISSION
|
84 dev_info(i915
->drm
.dev
,
85 "Incompatible option enable_guc=%d - %s\n",
86 i915_modparams
.enable_guc
, "undocumented flag");
89 void intel_uc_init_early(struct intel_uc
*uc
)
91 intel_guc_init_early(&uc
->guc
);
92 intel_huc_init_early(&uc
->huc
);
94 __confirm_options(uc
);
96 if (intel_uc_uses_guc(uc
))
99 uc
->ops
= &uc_ops_off
;
102 void intel_uc_driver_late_release(struct intel_uc
*uc
)
107 * intel_uc_init_mmio - setup uC MMIO access
108 * @uc: the intel_uc structure
110 * Setup minimal state necessary for MMIO accesses later in the
111 * initialization sequence.
113 void intel_uc_init_mmio(struct intel_uc
*uc
)
115 intel_guc_init_send_regs(&uc
->guc
);
118 static void __uc_capture_load_err_log(struct intel_uc
*uc
)
120 struct intel_guc
*guc
= &uc
->guc
;
122 if (guc
->log
.vma
&& !uc
->load_err_log
)
123 uc
->load_err_log
= i915_gem_object_get(guc
->log
.vma
->obj
);
126 static void __uc_free_load_err_log(struct intel_uc
*uc
)
128 struct drm_i915_gem_object
*log
= fetch_and_zero(&uc
->load_err_log
);
131 i915_gem_object_put(log
);
134 static inline bool guc_communication_enabled(struct intel_guc
*guc
)
136 return intel_guc_ct_enabled(&guc
->ct
);
140 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
141 * register using the same bits used in the CT message payload. Since our
142 * communication channel with guc is turned off at this point, we can save the
143 * message and handle it after we turn it back on.
145 static void guc_clear_mmio_msg(struct intel_guc
*guc
)
147 intel_uncore_write(guc_to_gt(guc
)->uncore
, SOFT_SCRATCH(15), 0);
150 static void guc_get_mmio_msg(struct intel_guc
*guc
)
154 spin_lock_irq(&guc
->irq_lock
);
156 val
= intel_uncore_read(guc_to_gt(guc
)->uncore
, SOFT_SCRATCH(15));
157 guc
->mmio_msg
|= val
& guc
->msg_enabled_mask
;
160 * clear all events, including the ones we're not currently servicing,
161 * to make sure we don't try to process a stale message if we enable
162 * handling of more events later.
164 guc_clear_mmio_msg(guc
);
166 spin_unlock_irq(&guc
->irq_lock
);
169 static void guc_handle_mmio_msg(struct intel_guc
*guc
)
171 struct drm_i915_private
*i915
= guc_to_gt(guc
)->i915
;
173 /* we need communication to be enabled to reply to GuC */
174 GEM_BUG_ON(!guc_communication_enabled(guc
));
179 spin_lock_irq(&i915
->irq_lock
);
180 intel_guc_to_host_process_recv_msg(guc
, &guc
->mmio_msg
, 1);
181 spin_unlock_irq(&i915
->irq_lock
);
186 static void guc_reset_interrupts(struct intel_guc
*guc
)
188 guc
->interrupts
.reset(guc
);
191 static void guc_enable_interrupts(struct intel_guc
*guc
)
193 guc
->interrupts
.enable(guc
);
196 static void guc_disable_interrupts(struct intel_guc
*guc
)
198 guc
->interrupts
.disable(guc
);
201 static int guc_enable_communication(struct intel_guc
*guc
)
203 struct drm_i915_private
*i915
= guc_to_gt(guc
)->i915
;
206 GEM_BUG_ON(guc_communication_enabled(guc
));
208 ret
= i915_inject_probe_error(i915
, -ENXIO
);
212 ret
= intel_guc_ct_enable(&guc
->ct
);
216 /* check for mmio messages received before/during the CT enable */
217 guc_get_mmio_msg(guc
);
218 guc_handle_mmio_msg(guc
);
220 guc_enable_interrupts(guc
);
222 /* check for CT messages received before we enabled interrupts */
223 spin_lock_irq(&i915
->irq_lock
);
224 intel_guc_ct_event_handler(&guc
->ct
);
225 spin_unlock_irq(&i915
->irq_lock
);
227 DRM_INFO("GuC communication enabled\n");
232 static void guc_disable_communication(struct intel_guc
*guc
)
235 * Events generated during or after CT disable are logged by guc in
236 * via mmio. Make sure the register is clear before disabling CT since
237 * all events we cared about have already been processed via CT.
239 guc_clear_mmio_msg(guc
);
241 guc_disable_interrupts(guc
);
243 intel_guc_ct_disable(&guc
->ct
);
246 * Check for messages received during/after the CT disable. We do not
247 * expect any messages to have arrived via CT between the interrupt
248 * disable and the CT disable because GuC should've been idle until we
249 * triggered the CT disable protocol.
251 guc_get_mmio_msg(guc
);
253 DRM_INFO("GuC communication disabled\n");
256 static void __uc_fetch_firmwares(struct intel_uc
*uc
)
260 GEM_BUG_ON(!intel_uc_uses_guc(uc
));
262 err
= intel_uc_fw_fetch(&uc
->guc
.fw
);
266 if (intel_uc_uses_huc(uc
))
267 intel_uc_fw_fetch(&uc
->huc
.fw
);
270 static void __uc_cleanup_firmwares(struct intel_uc
*uc
)
272 intel_uc_fw_cleanup_fetch(&uc
->huc
.fw
);
273 intel_uc_fw_cleanup_fetch(&uc
->guc
.fw
);
276 static void __uc_init(struct intel_uc
*uc
)
278 struct intel_guc
*guc
= &uc
->guc
;
279 struct intel_huc
*huc
= &uc
->huc
;
282 GEM_BUG_ON(!intel_uc_uses_guc(uc
));
284 /* XXX: GuC submission is unavailable for now */
285 GEM_BUG_ON(intel_uc_supports_guc_submission(uc
));
287 ret
= intel_guc_init(guc
);
289 intel_uc_fw_cleanup_fetch(&huc
->fw
);
293 if (intel_uc_uses_huc(uc
))
297 static void __uc_fini(struct intel_uc
*uc
)
299 intel_huc_fini(&uc
->huc
);
300 intel_guc_fini(&uc
->guc
);
302 __uc_free_load_err_log(uc
);
305 static int __uc_sanitize(struct intel_uc
*uc
)
307 struct intel_guc
*guc
= &uc
->guc
;
308 struct intel_huc
*huc
= &uc
->huc
;
310 GEM_BUG_ON(!intel_uc_supports_guc(uc
));
312 intel_huc_sanitize(huc
);
313 intel_guc_sanitize(guc
);
315 return __intel_uc_reset_hw(uc
);
318 /* Initialize and verify the uC regs related to uC positioning in WOPCM */
319 static int uc_init_wopcm(struct intel_uc
*uc
)
321 struct intel_gt
*gt
= uc_to_gt(uc
);
322 struct intel_uncore
*uncore
= gt
->uncore
;
323 u32 base
= intel_wopcm_guc_base(>
->i915
->wopcm
);
324 u32 size
= intel_wopcm_guc_size(>
->i915
->wopcm
);
325 u32 huc_agent
= intel_uc_uses_huc(uc
) ? HUC_LOADING_AGENT_GUC
: 0;
329 if (unlikely(!base
|| !size
)) {
330 i915_probe_error(gt
->i915
, "Unsuccessful WOPCM partitioning\n");
334 GEM_BUG_ON(!intel_uc_supports_guc(uc
));
335 GEM_BUG_ON(!(base
& GUC_WOPCM_OFFSET_MASK
));
336 GEM_BUG_ON(base
& ~GUC_WOPCM_OFFSET_MASK
);
337 GEM_BUG_ON(!(size
& GUC_WOPCM_SIZE_MASK
));
338 GEM_BUG_ON(size
& ~GUC_WOPCM_SIZE_MASK
);
340 err
= i915_inject_probe_error(gt
->i915
, -ENXIO
);
344 mask
= GUC_WOPCM_SIZE_MASK
| GUC_WOPCM_SIZE_LOCKED
;
345 err
= intel_uncore_write_and_verify(uncore
, GUC_WOPCM_SIZE
, size
, mask
,
346 size
| GUC_WOPCM_SIZE_LOCKED
);
350 mask
= GUC_WOPCM_OFFSET_MASK
| GUC_WOPCM_OFFSET_VALID
| huc_agent
;
351 err
= intel_uncore_write_and_verify(uncore
, DMA_GUC_WOPCM_OFFSET
,
352 base
| huc_agent
, mask
,
354 GUC_WOPCM_OFFSET_VALID
);
361 i915_probe_error(gt
->i915
, "Failed to init uC WOPCM registers!\n");
362 i915_probe_error(gt
->i915
, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
363 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET
),
364 intel_uncore_read(uncore
, DMA_GUC_WOPCM_OFFSET
));
365 i915_probe_error(gt
->i915
, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
366 i915_mmio_reg_offset(GUC_WOPCM_SIZE
),
367 intel_uncore_read(uncore
, GUC_WOPCM_SIZE
));
372 static bool uc_is_wopcm_locked(struct intel_uc
*uc
)
374 struct intel_gt
*gt
= uc_to_gt(uc
);
375 struct intel_uncore
*uncore
= gt
->uncore
;
377 return (intel_uncore_read(uncore
, GUC_WOPCM_SIZE
) & GUC_WOPCM_SIZE_LOCKED
) ||
378 (intel_uncore_read(uncore
, DMA_GUC_WOPCM_OFFSET
) & GUC_WOPCM_OFFSET_VALID
);
381 static int __uc_check_hw(struct intel_uc
*uc
)
383 if (!intel_uc_supports_guc(uc
))
387 * We can silently continue without GuC only if it was never enabled
388 * before on this system after reboot, otherwise we risk GPU hangs.
389 * To check if GuC was loaded before we look at WOPCM registers.
391 if (uc_is_wopcm_locked(uc
))
397 static int __uc_init_hw(struct intel_uc
*uc
)
399 struct drm_i915_private
*i915
= uc_to_gt(uc
)->i915
;
400 struct intel_guc
*guc
= &uc
->guc
;
401 struct intel_huc
*huc
= &uc
->huc
;
404 GEM_BUG_ON(!intel_uc_supports_guc(uc
));
405 GEM_BUG_ON(!intel_uc_uses_guc(uc
));
407 if (!intel_uc_fw_is_available(&guc
->fw
)) {
408 ret
= __uc_check_hw(uc
) ||
409 intel_uc_fw_is_overridden(&guc
->fw
) ||
410 intel_uc_supports_guc_submission(uc
) ?
411 intel_uc_fw_status_to_error(guc
->fw
.status
) : 0;
415 ret
= uc_init_wopcm(uc
);
419 guc_reset_interrupts(guc
);
421 /* WaEnableuKernelHeaderValidFix:skl */
422 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
430 * Always reset the GuC just before (re)loading, so
431 * that the state and timing are fairly predictable
433 ret
= __uc_sanitize(uc
);
437 intel_huc_fw_upload(huc
);
438 intel_guc_ads_reset(guc
);
439 intel_guc_write_params(guc
);
440 ret
= intel_guc_fw_upload(guc
);
444 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
445 "retry %d more time(s)\n", ret
, attempts
);
448 /* Did we succeded or run out of retries? */
450 goto err_log_capture
;
452 ret
= guc_enable_communication(guc
);
454 goto err_log_capture
;
458 ret
= intel_guc_sample_forcewake(guc
);
460 goto err_communication
;
462 if (intel_uc_supports_guc_submission(uc
))
463 intel_guc_submission_enable(guc
);
465 dev_info(i915
->drm
.dev
, "%s firmware %s version %u.%u %s:%s\n",
466 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC
), guc
->fw
.path
,
467 guc
->fw
.major_ver_found
, guc
->fw
.minor_ver_found
,
469 enableddisabled(intel_uc_supports_guc_submission(uc
)));
471 if (intel_uc_uses_huc(uc
)) {
472 dev_info(i915
->drm
.dev
, "%s firmware %s version %u.%u %s:%s\n",
473 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC
),
475 huc
->fw
.major_ver_found
, huc
->fw
.minor_ver_found
,
477 yesno(intel_huc_is_authenticated(huc
)));
483 * We've failed to load the firmware :(
486 guc_disable_communication(guc
);
488 __uc_capture_load_err_log(uc
);
493 dev_notice(i915
->drm
.dev
, "GuC is uninitialized\n");
494 /* We want to run without GuC submission */
498 i915_probe_error(i915
, "GuC initialization failed %d\n", ret
);
500 /* We want to keep KMS alive */
504 static void __uc_fini_hw(struct intel_uc
*uc
)
506 struct intel_guc
*guc
= &uc
->guc
;
508 if (!intel_guc_is_running(guc
))
511 if (intel_uc_supports_guc_submission(uc
))
512 intel_guc_submission_disable(guc
);
514 if (guc_communication_enabled(guc
))
515 guc_disable_communication(guc
);
521 * intel_uc_reset_prepare - Prepare for reset
522 * @uc: the intel_uc structure
524 * Preparing for full gpu reset.
526 void intel_uc_reset_prepare(struct intel_uc
*uc
)
528 struct intel_guc
*guc
= &uc
->guc
;
530 if (!intel_guc_is_running(guc
))
533 guc_disable_communication(guc
);
537 void intel_uc_runtime_suspend(struct intel_uc
*uc
)
539 struct intel_guc
*guc
= &uc
->guc
;
542 if (!intel_guc_is_running(guc
))
545 err
= intel_guc_suspend(guc
);
547 DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err
);
549 guc_disable_communication(guc
);
552 void intel_uc_suspend(struct intel_uc
*uc
)
554 struct intel_guc
*guc
= &uc
->guc
;
555 intel_wakeref_t wakeref
;
557 if (!intel_guc_is_running(guc
))
560 with_intel_runtime_pm(uc_to_gt(uc
)->uncore
->rpm
, wakeref
)
561 intel_uc_runtime_suspend(uc
);
564 static int __uc_resume(struct intel_uc
*uc
, bool enable_communication
)
566 struct intel_guc
*guc
= &uc
->guc
;
569 if (!intel_guc_is_running(guc
))
572 /* Make sure we enable communication if and only if it's disabled */
573 GEM_BUG_ON(enable_communication
== guc_communication_enabled(guc
));
575 if (enable_communication
)
576 guc_enable_communication(guc
);
578 err
= intel_guc_resume(guc
);
580 DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err
);
587 int intel_uc_resume(struct intel_uc
*uc
)
590 * When coming out of S3/S4 we sanitize and re-init the HW, so
591 * communication is already re-enabled at this point.
593 return __uc_resume(uc
, false);
596 int intel_uc_runtime_resume(struct intel_uc
*uc
)
599 * During runtime resume we don't sanitize, so we need to re-init
600 * communication as well.
602 return __uc_resume(uc
, true);
605 static const struct intel_uc_ops uc_ops_off
= {
606 .init_hw
= __uc_check_hw
,
609 static const struct intel_uc_ops uc_ops_on
= {
610 .sanitize
= __uc_sanitize
,
612 .init_fw
= __uc_fetch_firmwares
,
613 .fini_fw
= __uc_cleanup_firmwares
,
618 .init_hw
= __uc_init_hw
,
619 .fini_hw
= __uc_fini_hw
,