1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <drm/drm_print.h>
10 #include "intel_uc_fw.h"
11 #include "intel_uc_fw_abi.h"
14 static inline struct intel_gt
*
15 ____uc_fw_to_gt(struct intel_uc_fw
*uc_fw
, enum intel_uc_fw_type type
)
17 if (type
== INTEL_UC_FW_TYPE_GUC
)
18 return container_of(uc_fw
, struct intel_gt
, uc
.guc
.fw
);
20 GEM_BUG_ON(type
!= INTEL_UC_FW_TYPE_HUC
);
21 return container_of(uc_fw
, struct intel_gt
, uc
.huc
.fw
);
24 static inline struct intel_gt
*__uc_fw_to_gt(struct intel_uc_fw
*uc_fw
)
26 GEM_BUG_ON(uc_fw
->status
== INTEL_UC_FIRMWARE_UNINITIALIZED
);
27 return ____uc_fw_to_gt(uc_fw
, uc_fw
->type
);
30 #ifdef CONFIG_DRM_I915_DEBUG_GUC
31 void intel_uc_fw_change_status(struct intel_uc_fw
*uc_fw
,
32 enum intel_uc_fw_status status
)
34 uc_fw
->__status
= status
;
35 drm_dbg(&__uc_fw_to_gt(uc_fw
)->i915
->drm
,
36 "%s firmware -> %s\n",
37 intel_uc_fw_type_repr(uc_fw
->type
),
38 status
== INTEL_UC_FIRMWARE_SELECTED
?
39 uc_fw
->path
: intel_uc_fw_status_repr(status
));
44 * List of required GuC and HuC binaries per-platform.
45 * Must be ordered based on platform + revid, from newer to older.
47 * Note that RKL uses the same firmware as TGL.
49 #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
50 fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
51 fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
52 fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
53 fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
54 fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
55 fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
56 fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
57 fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
58 fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
59 fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
60 fw_def(BROXTON, 0, guc_def(bxt, 49, 0, 1), huc_def(bxt, 2, 0, 0)) \
61 fw_def(SKYLAKE, 0, guc_def(skl, 49, 0, 1), huc_def(skl, 2, 0, 0))
63 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
65 __stringify(prefix_) name_ \
66 __stringify(major_) "." \
67 __stringify(minor_) "." \
68 __stringify(patch_) ".bin"
70 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
71 __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
73 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
74 __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
76 /* All blobs need to be declared via MODULE_FIRMWARE() */
77 #define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
78 MODULE_FIRMWARE(guc_); \
79 MODULE_FIRMWARE(huc_);
81 INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW
, MAKE_GUC_FW_PATH
, MAKE_HUC_FW_PATH
)
83 /* The below structs and macros are used to iterate across the list of blobs */
84 struct __packed uc_fw_blob
{
90 #define UC_FW_BLOB(major_, minor_, path_) \
91 { .major = major_, .minor = minor_, .path = path_ }
93 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
94 UC_FW_BLOB(major_, minor_, \
95 MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
97 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
98 UC_FW_BLOB(major_, minor_, \
99 MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
101 struct __packed uc_fw_platform_requirement
{
102 enum intel_platform p
;
103 u8 rev
; /* first platform rev using this FW */
104 const struct uc_fw_blob blobs
[INTEL_UC_FW_NUM_TYPES
];
107 #define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
109 .p = INTEL_##platform_, \
111 .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
112 .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
116 __uc_fw_auto_select(struct drm_i915_private
*i915
, struct intel_uc_fw
*uc_fw
)
118 static const struct uc_fw_platform_requirement fw_blobs
[] = {
119 INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST
, GUC_FW_BLOB
, HUC_FW_BLOB
)
121 enum intel_platform p
= INTEL_INFO(i915
)->platform
;
122 u8 rev
= INTEL_REVID(i915
);
125 for (i
= 0; i
< ARRAY_SIZE(fw_blobs
) && p
<= fw_blobs
[i
].p
; i
++) {
126 if (p
== fw_blobs
[i
].p
&& rev
>= fw_blobs
[i
].rev
) {
127 const struct uc_fw_blob
*blob
=
128 &fw_blobs
[i
].blobs
[uc_fw
->type
];
129 uc_fw
->path
= blob
->path
;
130 uc_fw
->major_ver_wanted
= blob
->major
;
131 uc_fw
->minor_ver_wanted
= blob
->minor
;
136 /* make sure the list is ordered as expected */
137 if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST
)) {
138 for (i
= 1; i
< ARRAY_SIZE(fw_blobs
); i
++) {
139 if (fw_blobs
[i
].p
< fw_blobs
[i
- 1].p
)
142 if (fw_blobs
[i
].p
== fw_blobs
[i
- 1].p
&&
143 fw_blobs
[i
].rev
< fw_blobs
[i
- 1].rev
)
146 pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
147 intel_platform_name(fw_blobs
[i
- 1].p
),
149 intel_platform_name(fw_blobs
[i
].p
),
156 /* We don't want to enable GuC/HuC on pre-Gen11 by default */
157 if (i915
->params
.enable_guc
== -1 && p
< INTEL_ICELAKE
)
161 static const char *__override_guc_firmware_path(struct drm_i915_private
*i915
)
163 if (i915
->params
.enable_guc
& (ENABLE_GUC_SUBMISSION
|
164 ENABLE_GUC_LOAD_HUC
))
165 return i915
->params
.guc_firmware_path
;
169 static const char *__override_huc_firmware_path(struct drm_i915_private
*i915
)
171 if (i915
->params
.enable_guc
& ENABLE_GUC_LOAD_HUC
)
172 return i915
->params
.huc_firmware_path
;
176 static void __uc_fw_user_override(struct drm_i915_private
*i915
, struct intel_uc_fw
*uc_fw
)
178 const char *path
= NULL
;
180 switch (uc_fw
->type
) {
181 case INTEL_UC_FW_TYPE_GUC
:
182 path
= __override_guc_firmware_path(i915
);
184 case INTEL_UC_FW_TYPE_HUC
:
185 path
= __override_huc_firmware_path(i915
);
189 if (unlikely(path
)) {
191 uc_fw
->user_overridden
= true;
196 * intel_uc_fw_init_early - initialize the uC object and select the firmware
197 * @uc_fw: uC firmware
200 * Initialize the state of our uC object and relevant tracking and select the
201 * firmware to fetch and load.
203 void intel_uc_fw_init_early(struct intel_uc_fw
*uc_fw
,
204 enum intel_uc_fw_type type
)
206 struct drm_i915_private
*i915
= ____uc_fw_to_gt(uc_fw
, type
)->i915
;
209 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
210 * before we're looked at the HW caps to see if we have uc support
212 BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED
);
213 GEM_BUG_ON(uc_fw
->status
);
214 GEM_BUG_ON(uc_fw
->path
);
218 if (HAS_GT_UC(i915
)) {
219 __uc_fw_auto_select(i915
, uc_fw
);
220 __uc_fw_user_override(i915
, uc_fw
);
223 intel_uc_fw_change_status(uc_fw
, uc_fw
->path
? *uc_fw
->path
?
224 INTEL_UC_FIRMWARE_SELECTED
:
225 INTEL_UC_FIRMWARE_DISABLED
:
226 INTEL_UC_FIRMWARE_NOT_SUPPORTED
);
229 static void __force_fw_fetch_failures(struct intel_uc_fw
*uc_fw
, int e
)
231 struct drm_i915_private
*i915
= __uc_fw_to_gt(uc_fw
)->i915
;
232 bool user
= e
== -EINVAL
;
234 if (i915_inject_probe_error(i915
, e
)) {
235 /* non-existing blob */
236 uc_fw
->path
= "<invalid>";
237 uc_fw
->user_overridden
= user
;
238 } else if (i915_inject_probe_error(i915
, e
)) {
239 /* require next major version */
240 uc_fw
->major_ver_wanted
+= 1;
241 uc_fw
->minor_ver_wanted
= 0;
242 uc_fw
->user_overridden
= user
;
243 } else if (i915_inject_probe_error(i915
, e
)) {
244 /* require next minor version */
245 uc_fw
->minor_ver_wanted
+= 1;
246 uc_fw
->user_overridden
= user
;
247 } else if (uc_fw
->major_ver_wanted
&&
248 i915_inject_probe_error(i915
, e
)) {
249 /* require prev major version */
250 uc_fw
->major_ver_wanted
-= 1;
251 uc_fw
->minor_ver_wanted
= 0;
252 uc_fw
->user_overridden
= user
;
253 } else if (uc_fw
->minor_ver_wanted
&&
254 i915_inject_probe_error(i915
, e
)) {
255 /* require prev minor version - hey, this should work! */
256 uc_fw
->minor_ver_wanted
-= 1;
257 uc_fw
->user_overridden
= user
;
258 } else if (user
&& i915_inject_probe_error(i915
, e
)) {
259 /* officially unsupported platform */
260 uc_fw
->major_ver_wanted
= 0;
261 uc_fw
->minor_ver_wanted
= 0;
262 uc_fw
->user_overridden
= true;
267 * intel_uc_fw_fetch - fetch uC firmware
268 * @uc_fw: uC firmware
270 * Fetch uC firmware into GEM obj.
272 * Return: 0 on success, a negative errno code on failure.
274 int intel_uc_fw_fetch(struct intel_uc_fw
*uc_fw
)
276 struct drm_i915_private
*i915
= __uc_fw_to_gt(uc_fw
)->i915
;
277 struct device
*dev
= i915
->drm
.dev
;
278 struct drm_i915_gem_object
*obj
;
279 const struct firmware
*fw
= NULL
;
280 struct uc_css_header
*css
;
284 GEM_BUG_ON(!i915
->wopcm
.size
);
285 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw
));
287 err
= i915_inject_probe_error(i915
, -ENXIO
);
291 __force_fw_fetch_failures(uc_fw
, -EINVAL
);
292 __force_fw_fetch_failures(uc_fw
, -ESTALE
);
294 err
= request_firmware(&fw
, uc_fw
->path
, dev
);
298 /* Check the size of the blob before examining buffer contents */
299 if (unlikely(fw
->size
< sizeof(struct uc_css_header
))) {
300 drm_warn(&i915
->drm
, "%s firmware %s: invalid size: %zu < %zu\n",
301 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
302 fw
->size
, sizeof(struct uc_css_header
));
307 css
= (struct uc_css_header
*)fw
->data
;
309 /* Check integrity of size values inside CSS header */
310 size
= (css
->header_size_dw
- css
->key_size_dw
- css
->modulus_size_dw
-
311 css
->exponent_size_dw
) * sizeof(u32
);
312 if (unlikely(size
!= sizeof(struct uc_css_header
))) {
314 "%s firmware %s: unexpected header size: %zu != %zu\n",
315 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
316 fw
->size
, sizeof(struct uc_css_header
));
321 /* uCode size must calculated from other sizes */
322 uc_fw
->ucode_size
= (css
->size_dw
- css
->header_size_dw
) * sizeof(u32
);
325 if (unlikely(css
->key_size_dw
!= UOS_RSA_SCRATCH_COUNT
)) {
326 drm_warn(&i915
->drm
, "%s firmware %s: unexpected key size: %u != %u\n",
327 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
328 css
->key_size_dw
, UOS_RSA_SCRATCH_COUNT
);
332 uc_fw
->rsa_size
= css
->key_size_dw
* sizeof(u32
);
334 /* At least, it should have header, uCode and RSA. Size of all three. */
335 size
= sizeof(struct uc_css_header
) + uc_fw
->ucode_size
+ uc_fw
->rsa_size
;
336 if (unlikely(fw
->size
< size
)) {
337 drm_warn(&i915
->drm
, "%s firmware %s: invalid size: %zu < %zu\n",
338 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
344 /* Sanity check whether this fw is not larger than whole WOPCM memory */
345 size
= __intel_uc_fw_get_upload_size(uc_fw
);
346 if (unlikely(size
>= i915
->wopcm
.size
)) {
347 drm_warn(&i915
->drm
, "%s firmware %s: invalid size: %zu > %zu\n",
348 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
349 size
, (size_t)i915
->wopcm
.size
);
354 /* Get version numbers from the CSS header */
355 uc_fw
->major_ver_found
= FIELD_GET(CSS_SW_VERSION_UC_MAJOR
,
357 uc_fw
->minor_ver_found
= FIELD_GET(CSS_SW_VERSION_UC_MINOR
,
360 if (uc_fw
->major_ver_found
!= uc_fw
->major_ver_wanted
||
361 uc_fw
->minor_ver_found
< uc_fw
->minor_ver_wanted
) {
362 drm_notice(&i915
->drm
, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
363 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
364 uc_fw
->major_ver_found
, uc_fw
->minor_ver_found
,
365 uc_fw
->major_ver_wanted
, uc_fw
->minor_ver_wanted
);
366 if (!intel_uc_fw_is_overridden(uc_fw
)) {
372 if (uc_fw
->type
== INTEL_UC_FW_TYPE_GUC
)
373 uc_fw
->private_data_size
= css
->private_data_size
;
375 obj
= i915_gem_object_create_shmem_from_data(i915
, fw
->data
, fw
->size
);
382 uc_fw
->size
= fw
->size
;
383 intel_uc_fw_change_status(uc_fw
, INTEL_UC_FIRMWARE_AVAILABLE
);
385 release_firmware(fw
);
389 intel_uc_fw_change_status(uc_fw
, err
== -ENOENT
?
390 INTEL_UC_FIRMWARE_MISSING
:
391 INTEL_UC_FIRMWARE_ERROR
);
393 drm_notice(&i915
->drm
, "%s firmware %s: fetch failed with error %d\n",
394 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
, err
);
395 drm_info(&i915
->drm
, "%s firmware(s) can be downloaded from %s\n",
396 intel_uc_fw_type_repr(uc_fw
->type
), INTEL_UC_FIRMWARE_URL
);
398 release_firmware(fw
); /* OK even if fw is NULL */
402 static u32
uc_fw_ggtt_offset(struct intel_uc_fw
*uc_fw
)
404 struct i915_ggtt
*ggtt
= __uc_fw_to_gt(uc_fw
)->ggtt
;
405 struct drm_mm_node
*node
= &ggtt
->uc_fw
;
407 GEM_BUG_ON(!drm_mm_node_allocated(node
));
408 GEM_BUG_ON(upper_32_bits(node
->start
));
409 GEM_BUG_ON(upper_32_bits(node
->start
+ node
->size
- 1));
411 return lower_32_bits(node
->start
);
414 static void uc_fw_bind_ggtt(struct intel_uc_fw
*uc_fw
)
416 struct drm_i915_gem_object
*obj
= uc_fw
->obj
;
417 struct i915_ggtt
*ggtt
= __uc_fw_to_gt(uc_fw
)->ggtt
;
418 struct i915_vma dummy
= {
419 .node
.start
= uc_fw_ggtt_offset(uc_fw
),
420 .node
.size
= obj
->base
.size
,
421 .pages
= obj
->mm
.pages
,
425 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
426 GEM_BUG_ON(dummy
.node
.size
> ggtt
->uc_fw
.size
);
428 /* uc_fw->obj cache domains were not controlled across suspend */
429 drm_clflush_sg(dummy
.pages
);
431 ggtt
->vm
.insert_entries(&ggtt
->vm
, &dummy
, I915_CACHE_NONE
, 0);
434 static void uc_fw_unbind_ggtt(struct intel_uc_fw
*uc_fw
)
436 struct drm_i915_gem_object
*obj
= uc_fw
->obj
;
437 struct i915_ggtt
*ggtt
= __uc_fw_to_gt(uc_fw
)->ggtt
;
438 u64 start
= uc_fw_ggtt_offset(uc_fw
);
440 ggtt
->vm
.clear_range(&ggtt
->vm
, start
, obj
->base
.size
);
443 static int uc_fw_xfer(struct intel_uc_fw
*uc_fw
, u32 dst_offset
, u32 dma_flags
)
445 struct intel_gt
*gt
= __uc_fw_to_gt(uc_fw
);
446 struct intel_uncore
*uncore
= gt
->uncore
;
450 ret
= i915_inject_probe_error(gt
->i915
, -ETIMEDOUT
);
454 intel_uncore_forcewake_get(uncore
, FORCEWAKE_ALL
);
456 /* Set the source address for the uCode */
457 offset
= uc_fw_ggtt_offset(uc_fw
);
458 GEM_BUG_ON(upper_32_bits(offset
) & 0xFFFF0000);
459 intel_uncore_write_fw(uncore
, DMA_ADDR_0_LOW
, lower_32_bits(offset
));
460 intel_uncore_write_fw(uncore
, DMA_ADDR_0_HIGH
, upper_32_bits(offset
));
462 /* Set the DMA destination */
463 intel_uncore_write_fw(uncore
, DMA_ADDR_1_LOW
, dst_offset
);
464 intel_uncore_write_fw(uncore
, DMA_ADDR_1_HIGH
, DMA_ADDRESS_SPACE_WOPCM
);
467 * Set the transfer size. The header plus uCode will be copied to WOPCM
468 * via DMA, excluding any other components
470 intel_uncore_write_fw(uncore
, DMA_COPY_SIZE
,
471 sizeof(struct uc_css_header
) + uc_fw
->ucode_size
);
474 intel_uncore_write_fw(uncore
, DMA_CTRL
,
475 _MASKED_BIT_ENABLE(dma_flags
| START_DMA
));
477 /* Wait for DMA to finish */
478 ret
= intel_wait_for_register_fw(uncore
, DMA_CTRL
, START_DMA
, 0, 100);
480 drm_err(>
->i915
->drm
, "DMA for %s fw failed, DMA_CTRL=%u\n",
481 intel_uc_fw_type_repr(uc_fw
->type
),
482 intel_uncore_read_fw(uncore
, DMA_CTRL
));
484 /* Disable the bits once DMA is over */
485 intel_uncore_write_fw(uncore
, DMA_CTRL
, _MASKED_BIT_DISABLE(dma_flags
));
487 intel_uncore_forcewake_put(uncore
, FORCEWAKE_ALL
);
493 * intel_uc_fw_upload - load uC firmware using custom loader
494 * @uc_fw: uC firmware
495 * @dst_offset: destination offset
496 * @dma_flags: flags for flags for dma ctrl
498 * Loads uC firmware and updates internal flags.
500 * Return: 0 on success, non-zero on failure.
502 int intel_uc_fw_upload(struct intel_uc_fw
*uc_fw
, u32 dst_offset
, u32 dma_flags
)
504 struct intel_gt
*gt
= __uc_fw_to_gt(uc_fw
);
507 /* make sure the status was cleared the last time we reset the uc */
508 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw
));
510 err
= i915_inject_probe_error(gt
->i915
, -ENOEXEC
);
514 if (!intel_uc_fw_is_loadable(uc_fw
))
517 /* Call custom loader */
518 uc_fw_bind_ggtt(uc_fw
);
519 err
= uc_fw_xfer(uc_fw
, dst_offset
, dma_flags
);
520 uc_fw_unbind_ggtt(uc_fw
);
524 intel_uc_fw_change_status(uc_fw
, INTEL_UC_FIRMWARE_TRANSFERRED
);
528 i915_probe_error(gt
->i915
, "Failed to load %s firmware %s (%d)\n",
529 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
,
531 intel_uc_fw_change_status(uc_fw
, INTEL_UC_FIRMWARE_FAIL
);
535 int intel_uc_fw_init(struct intel_uc_fw
*uc_fw
)
539 /* this should happen before the load! */
540 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw
));
542 if (!intel_uc_fw_is_available(uc_fw
))
545 err
= i915_gem_object_pin_pages(uc_fw
->obj
);
547 DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
548 intel_uc_fw_type_repr(uc_fw
->type
), err
);
549 intel_uc_fw_change_status(uc_fw
, INTEL_UC_FIRMWARE_FAIL
);
555 void intel_uc_fw_fini(struct intel_uc_fw
*uc_fw
)
557 if (i915_gem_object_has_pinned_pages(uc_fw
->obj
))
558 i915_gem_object_unpin_pages(uc_fw
->obj
);
560 intel_uc_fw_change_status(uc_fw
, INTEL_UC_FIRMWARE_AVAILABLE
);
564 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
565 * @uc_fw: uC firmware
567 * Cleans up uC firmware by releasing the firmware GEM obj.
569 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw
*uc_fw
)
571 if (!intel_uc_fw_is_available(uc_fw
))
574 i915_gem_object_put(fetch_and_zero(&uc_fw
->obj
));
576 intel_uc_fw_change_status(uc_fw
, INTEL_UC_FIRMWARE_SELECTED
);
580 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
582 * @uc_fw: uC firmware
584 * @max_len: max number of bytes to copy
586 * Return: number of copied bytes.
588 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw
*uc_fw
, void *dst
, u32 max_len
)
590 struct sg_table
*pages
= uc_fw
->obj
->mm
.pages
;
591 u32 size
= min_t(u32
, uc_fw
->rsa_size
, max_len
);
592 u32 offset
= sizeof(struct uc_css_header
) + uc_fw
->ucode_size
;
594 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw
));
596 return sg_pcopy_to_buffer(pages
->sgl
, pages
->nents
, dst
, size
, offset
);
600 * intel_uc_fw_dump - dump information about uC firmware
601 * @uc_fw: uC firmware
602 * @p: the &drm_printer
604 * Pretty printer for uC firmware.
606 void intel_uc_fw_dump(const struct intel_uc_fw
*uc_fw
, struct drm_printer
*p
)
608 drm_printf(p
, "%s firmware: %s\n",
609 intel_uc_fw_type_repr(uc_fw
->type
), uc_fw
->path
);
610 drm_printf(p
, "\tstatus: %s\n",
611 intel_uc_fw_status_repr(uc_fw
->status
));
612 drm_printf(p
, "\tversion: wanted %u.%u, found %u.%u\n",
613 uc_fw
->major_ver_wanted
, uc_fw
->minor_ver_wanted
,
614 uc_fw
->major_ver_found
, uc_fw
->minor_ver_found
);
615 drm_printf(p
, "\tuCode: %u bytes\n", uc_fw
->ucode_size
);
616 drm_printf(p
, "\tRSA: %u bytes\n", uc_fw
->rsa_size
);