2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "smu8_fusion.h"
29 #include "cz_smumgr.h"
30 #include "smu_ucode_xfer_cz.h"
31 #include "amdgpu_ucode.h"
33 #include "smu/smu_8_0_d.h"
34 #include "smu/smu_8_0_sh_mask.h"
35 #include "gca/gfx_8_0_d.h"
36 #include "gca/gfx_8_0_sh_mask.h"
38 uint32_t cz_get_argument(struct amdgpu_device
*adev
)
40 return RREG32(mmSMU_MP1_SRBM2P_ARG_0
);
43 static struct cz_smu_private_data
*cz_smu_get_priv(struct amdgpu_device
*adev
)
45 struct cz_smu_private_data
*priv
=
46 (struct cz_smu_private_data
*)(adev
->smu
.priv
);
51 int cz_send_msg_to_smc_async(struct amdgpu_device
*adev
, u16 msg
)
56 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
57 tmp
= REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0
),
58 SMU_MP1_SRBM2P_RESP_0
, CONTENT
);
64 /* timeout means wrong logic*/
65 if (i
== adev
->usec_timeout
)
68 WREG32(mmSMU_MP1_SRBM2P_RESP_0
, 0);
69 WREG32(mmSMU_MP1_SRBM2P_MSG_0
, msg
);
74 int cz_send_msg_to_smc(struct amdgpu_device
*adev
, u16 msg
)
77 u32 content
= 0, tmp
= 0;
79 if (cz_send_msg_to_smc_async(adev
, msg
))
82 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
83 tmp
= REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0
),
84 SMU_MP1_SRBM2P_RESP_0
, CONTENT
);
90 /* timeout means wrong logic*/
91 if (i
== adev
->usec_timeout
)
94 if (PPSMC_Result_OK
!= tmp
) {
95 dev_err(adev
->dev
, "SMC Failed to send Message.\n");
102 int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device
*adev
,
103 u16 msg
, u32 parameter
)
105 WREG32(mmSMU_MP1_SRBM2P_ARG_0
, parameter
);
106 return cz_send_msg_to_smc_async(adev
, msg
);
109 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
110 u16 msg
, u32 parameter
)
112 WREG32(mmSMU_MP1_SRBM2P_ARG_0
, parameter
);
113 return cz_send_msg_to_smc(adev
, msg
);
116 static int cz_set_smc_sram_address(struct amdgpu_device
*adev
,
117 u32 smc_address
, u32 limit
)
121 if ((smc_address
+ 3) > limit
)
124 WREG32(mmMP0PUB_IND_INDEX_0
, SMN_MP1_SRAM_START_ADDR
+ smc_address
);
129 int cz_read_smc_sram_dword(struct amdgpu_device
*adev
, u32 smc_address
,
130 u32
*value
, u32 limit
)
134 ret
= cz_set_smc_sram_address(adev
, smc_address
, limit
);
138 *value
= RREG32(mmMP0PUB_IND_DATA_0
);
143 int cz_write_smc_sram_dword(struct amdgpu_device
*adev
, u32 smc_address
,
144 u32 value
, u32 limit
)
148 ret
= cz_set_smc_sram_address(adev
, smc_address
, limit
);
152 WREG32(mmMP0PUB_IND_DATA_0
, value
);
157 static int cz_smu_request_load_fw(struct amdgpu_device
*adev
)
159 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
161 uint32_t smc_addr
= SMU8_FIRMWARE_HEADER_LOCATION
+
162 offsetof(struct SMU8_Firmware_Header
, UcodeLoadStatus
);
164 cz_write_smc_sram_dword(adev
, smc_addr
, 0, smc_addr
+ 4);
166 /*prepare toc buffers*/
167 cz_send_msg_to_smc_with_parameter(adev
,
168 PPSMC_MSG_DriverDramAddrHi
,
169 priv
->toc_buffer
.mc_addr_high
);
170 cz_send_msg_to_smc_with_parameter(adev
,
171 PPSMC_MSG_DriverDramAddrLo
,
172 priv
->toc_buffer
.mc_addr_low
);
173 cz_send_msg_to_smc(adev
, PPSMC_MSG_InitJobs
);
176 cz_send_msg_to_smc_with_parameter(adev
,
177 PPSMC_MSG_ExecuteJob
,
178 priv
->toc_entry_aram
);
180 cz_send_msg_to_smc_with_parameter(adev
,
181 PPSMC_MSG_ExecuteJob
,
182 priv
->toc_entry_power_profiling_index
);
184 cz_send_msg_to_smc_with_parameter(adev
,
185 PPSMC_MSG_ExecuteJob
,
186 priv
->toc_entry_initialize_index
);
192 *Check if the FW has been loaded, SMU will not return if loading
195 static int cz_smu_check_fw_load_finish(struct amdgpu_device
*adev
,
199 uint32_t index
= SMN_MP1_SRAM_START_ADDR
+
200 SMU8_FIRMWARE_HEADER_LOCATION
+
201 offsetof(struct SMU8_Firmware_Header
, UcodeLoadStatus
);
203 WREG32(mmMP0PUB_IND_INDEX
, index
);
205 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
206 if (fw_mask
== (RREG32(mmMP0PUB_IND_DATA
) & fw_mask
))
211 if (i
>= adev
->usec_timeout
) {
213 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
214 fw_mask
, RREG32(mmMP0PUB_IND_DATA
));
222 * interfaces for different ip blocks to check firmware loading status
223 * 0 for success otherwise failed
225 static int cz_smu_check_finished(struct amdgpu_device
*adev
,
226 enum AMDGPU_UCODE_ID id
)
229 case AMDGPU_UCODE_ID_SDMA0
:
230 if (adev
->smu
.fw_flags
& AMDGPU_SDMA0_UCODE_LOADED
)
233 case AMDGPU_UCODE_ID_SDMA1
:
234 if (adev
->smu
.fw_flags
& AMDGPU_SDMA1_UCODE_LOADED
)
237 case AMDGPU_UCODE_ID_CP_CE
:
238 if (adev
->smu
.fw_flags
& AMDGPU_CPCE_UCODE_LOADED
)
241 case AMDGPU_UCODE_ID_CP_PFP
:
242 if (adev
->smu
.fw_flags
& AMDGPU_CPPFP_UCODE_LOADED
)
244 case AMDGPU_UCODE_ID_CP_ME
:
245 if (adev
->smu
.fw_flags
& AMDGPU_CPME_UCODE_LOADED
)
248 case AMDGPU_UCODE_ID_CP_MEC1
:
249 if (adev
->smu
.fw_flags
& AMDGPU_CPMEC1_UCODE_LOADED
)
252 case AMDGPU_UCODE_ID_CP_MEC2
:
253 if (adev
->smu
.fw_flags
& AMDGPU_CPMEC2_UCODE_LOADED
)
256 case AMDGPU_UCODE_ID_RLC_G
:
257 if (adev
->smu
.fw_flags
& AMDGPU_CPRLC_UCODE_LOADED
)
260 case AMDGPU_UCODE_ID_MAXIMUM
:
268 static int cz_load_mec_firmware(struct amdgpu_device
*adev
)
270 struct amdgpu_firmware_info
*ucode
=
271 &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC1
];
275 if (ucode
->fw
== NULL
)
278 /* Disable MEC parsing/prefetching */
279 tmp
= RREG32(mmCP_MEC_CNTL
);
280 tmp
= REG_SET_FIELD(tmp
, CP_MEC_CNTL
, MEC_ME1_HALT
, 1);
281 tmp
= REG_SET_FIELD(tmp
, CP_MEC_CNTL
, MEC_ME2_HALT
, 1);
282 WREG32(mmCP_MEC_CNTL
, tmp
);
284 tmp
= RREG32(mmCP_CPC_IC_BASE_CNTL
);
285 tmp
= REG_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, VMID
, 0);
286 tmp
= REG_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, ATC
, 0);
287 tmp
= REG_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, CACHE_POLICY
, 0);
288 tmp
= REG_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, MTYPE
, 1);
289 WREG32(mmCP_CPC_IC_BASE_CNTL
, tmp
);
291 reg_data
= lower_32_bits(ucode
->mc_addr
) &
292 REG_FIELD_MASK(CP_CPC_IC_BASE_LO
, IC_BASE_LO
);
293 WREG32(mmCP_CPC_IC_BASE_LO
, reg_data
);
295 reg_data
= upper_32_bits(ucode
->mc_addr
) &
296 REG_FIELD_MASK(CP_CPC_IC_BASE_HI
, IC_BASE_HI
);
297 WREG32(mmCP_CPC_IC_BASE_HI
, reg_data
);
302 int cz_smu_start(struct amdgpu_device
*adev
)
306 uint32_t fw_to_check
= UCODE_ID_RLC_G_MASK
|
307 UCODE_ID_SDMA0_MASK
|
308 UCODE_ID_SDMA1_MASK
|
309 UCODE_ID_CP_CE_MASK
|
310 UCODE_ID_CP_ME_MASK
|
311 UCODE_ID_CP_PFP_MASK
|
312 UCODE_ID_CP_MEC_JT1_MASK
|
313 UCODE_ID_CP_MEC_JT2_MASK
;
315 if (adev
->asic_type
== CHIP_STONEY
)
316 fw_to_check
&= ~(UCODE_ID_SDMA1_MASK
| UCODE_ID_CP_MEC_JT2_MASK
);
318 cz_smu_request_load_fw(adev
);
319 ret
= cz_smu_check_fw_load_finish(adev
, fw_to_check
);
323 /* manually load MEC firmware for CZ */
324 if (adev
->asic_type
== CHIP_CARRIZO
|| adev
->asic_type
== CHIP_STONEY
) {
325 ret
= cz_load_mec_firmware(adev
);
327 dev_err(adev
->dev
, "(%d) Mec Firmware load failed\n", ret
);
332 /* setup fw load flag */
333 adev
->smu
.fw_flags
= AMDGPU_SDMA0_UCODE_LOADED
|
334 AMDGPU_SDMA1_UCODE_LOADED
|
335 AMDGPU_CPCE_UCODE_LOADED
|
336 AMDGPU_CPPFP_UCODE_LOADED
|
337 AMDGPU_CPME_UCODE_LOADED
|
338 AMDGPU_CPMEC1_UCODE_LOADED
|
339 AMDGPU_CPMEC2_UCODE_LOADED
|
340 AMDGPU_CPRLC_UCODE_LOADED
;
342 if (adev
->asic_type
== CHIP_STONEY
)
343 adev
->smu
.fw_flags
&= ~(AMDGPU_SDMA1_UCODE_LOADED
| AMDGPU_CPMEC2_UCODE_LOADED
);
348 static uint32_t cz_convert_fw_type(uint32_t fw_type
)
350 enum AMDGPU_UCODE_ID result
= AMDGPU_UCODE_ID_MAXIMUM
;
354 result
= AMDGPU_UCODE_ID_SDMA0
;
357 result
= AMDGPU_UCODE_ID_SDMA1
;
360 result
= AMDGPU_UCODE_ID_CP_CE
;
362 case UCODE_ID_CP_PFP
:
363 result
= AMDGPU_UCODE_ID_CP_PFP
;
366 result
= AMDGPU_UCODE_ID_CP_ME
;
368 case UCODE_ID_CP_MEC_JT1
:
369 case UCODE_ID_CP_MEC_JT2
:
370 result
= AMDGPU_UCODE_ID_CP_MEC1
;
373 result
= AMDGPU_UCODE_ID_RLC_G
;
376 DRM_ERROR("UCode type is out of range!");
382 static uint8_t cz_smu_translate_firmware_enum_to_arg(
383 enum cz_scratch_entry firmware_enum
)
387 switch (firmware_enum
) {
388 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
:
389 ret
= UCODE_ID_SDMA0
;
391 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1
:
392 ret
= UCODE_ID_SDMA1
;
394 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
:
395 ret
= UCODE_ID_CP_CE
;
397 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
:
398 ret
= UCODE_ID_CP_PFP
;
400 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
:
401 ret
= UCODE_ID_CP_ME
;
403 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
:
404 ret
= UCODE_ID_CP_MEC_JT1
;
406 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
:
407 ret
= UCODE_ID_CP_MEC_JT2
;
409 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG
:
410 ret
= UCODE_ID_GMCON_RENG
;
412 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
:
413 ret
= UCODE_ID_RLC_G
;
415 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
:
416 ret
= UCODE_ID_RLC_SCRATCH
;
418 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
:
419 ret
= UCODE_ID_RLC_SRM_ARAM
;
421 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
:
422 ret
= UCODE_ID_RLC_SRM_DRAM
;
424 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM
:
425 ret
= UCODE_ID_DMCU_ERAM
;
427 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM
:
428 ret
= UCODE_ID_DMCU_IRAM
;
430 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING
:
431 ret
= TASK_ARG_INIT_MM_PWR_LOG
;
433 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT
:
434 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING
:
435 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS
:
436 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT
:
437 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START
:
438 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS
:
439 ret
= TASK_ARG_REG_MMIO
;
441 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
:
442 ret
= TASK_ARG_INIT_CLK_TABLE
;
449 static int cz_smu_populate_single_firmware_entry(struct amdgpu_device
*adev
,
450 enum cz_scratch_entry firmware_enum
,
451 struct cz_buffer_entry
*entry
)
455 uint8_t ucode_id
= cz_smu_translate_firmware_enum_to_arg(firmware_enum
);
456 enum AMDGPU_UCODE_ID id
= cz_convert_fw_type(ucode_id
);
457 struct amdgpu_firmware_info
*ucode
= &adev
->firmware
.ucode
[id
];
458 const struct gfx_firmware_header_v1_0
*header
;
460 if (ucode
->fw
== NULL
)
463 gpu_addr
= ucode
->mc_addr
;
464 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
465 data_size
= le32_to_cpu(header
->header
.ucode_size_bytes
);
467 if ((firmware_enum
== CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
) ||
468 (firmware_enum
== CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
)) {
469 gpu_addr
+= le32_to_cpu(header
->jt_offset
) << 2;
470 data_size
= le32_to_cpu(header
->jt_size
) << 2;
473 entry
->mc_addr_low
= lower_32_bits(gpu_addr
);
474 entry
->mc_addr_high
= upper_32_bits(gpu_addr
);
475 entry
->data_size
= data_size
;
476 entry
->firmware_ID
= firmware_enum
;
481 static int cz_smu_populate_single_scratch_entry(struct amdgpu_device
*adev
,
482 enum cz_scratch_entry scratch_type
,
483 uint32_t size_in_byte
,
484 struct cz_buffer_entry
*entry
)
486 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
487 uint64_t mc_addr
= (((uint64_t) priv
->smu_buffer
.mc_addr_high
) << 32) |
488 priv
->smu_buffer
.mc_addr_low
;
489 mc_addr
+= size_in_byte
;
491 priv
->smu_buffer_used_bytes
+= size_in_byte
;
492 entry
->data_size
= size_in_byte
;
493 entry
->kaddr
= priv
->smu_buffer
.kaddr
+ priv
->smu_buffer_used_bytes
;
494 entry
->mc_addr_low
= lower_32_bits(mc_addr
);
495 entry
->mc_addr_high
= upper_32_bits(mc_addr
);
496 entry
->firmware_ID
= scratch_type
;
501 static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device
*adev
,
502 enum cz_scratch_entry firmware_enum
,
506 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
507 struct TOC
*toc
= (struct TOC
*)priv
->toc_buffer
.kaddr
;
508 struct SMU_Task
*task
= &toc
->tasks
[priv
->toc_entry_used_count
++];
510 task
->type
= TASK_TYPE_UCODE_LOAD
;
511 task
->arg
= cz_smu_translate_firmware_enum_to_arg(firmware_enum
);
512 task
->next
= is_last
? END_OF_TASK_LIST
: priv
->toc_entry_used_count
;
514 for (i
= 0; i
< priv
->driver_buffer_length
; i
++)
515 if (priv
->driver_buffer
[i
].firmware_ID
== firmware_enum
)
518 if (i
>= priv
->driver_buffer_length
) {
519 dev_err(adev
->dev
, "Invalid Firmware Type\n");
523 task
->addr
.low
= priv
->driver_buffer
[i
].mc_addr_low
;
524 task
->addr
.high
= priv
->driver_buffer
[i
].mc_addr_high
;
525 task
->size_bytes
= priv
->driver_buffer
[i
].data_size
;
530 static int cz_smu_populate_single_scratch_task(struct amdgpu_device
*adev
,
531 enum cz_scratch_entry firmware_enum
,
532 uint8_t type
, bool is_last
)
535 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
536 struct TOC
*toc
= (struct TOC
*)priv
->toc_buffer
.kaddr
;
537 struct SMU_Task
*task
= &toc
->tasks
[priv
->toc_entry_used_count
++];
540 task
->arg
= cz_smu_translate_firmware_enum_to_arg(firmware_enum
);
541 task
->next
= is_last
? END_OF_TASK_LIST
: priv
->toc_entry_used_count
;
543 for (i
= 0; i
< priv
->scratch_buffer_length
; i
++)
544 if (priv
->scratch_buffer
[i
].firmware_ID
== firmware_enum
)
547 if (i
>= priv
->scratch_buffer_length
) {
548 dev_err(adev
->dev
, "Invalid Firmware Type\n");
552 task
->addr
.low
= priv
->scratch_buffer
[i
].mc_addr_low
;
553 task
->addr
.high
= priv
->scratch_buffer
[i
].mc_addr_high
;
554 task
->size_bytes
= priv
->scratch_buffer
[i
].data_size
;
556 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS
== firmware_enum
) {
557 struct cz_ih_meta_data
*pIHReg_restore
=
558 (struct cz_ih_meta_data
*)priv
->scratch_buffer
[i
].kaddr
;
559 pIHReg_restore
->command
=
560 METADATA_CMD_MODE0
| METADATA_PERFORM_ON_LOAD
;
566 static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device
*adev
)
568 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
569 priv
->toc_entry_aram
= priv
->toc_entry_used_count
;
570 cz_smu_populate_single_scratch_task(adev
,
571 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
,
572 TASK_TYPE_UCODE_SAVE
, true);
577 static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device
*adev
)
579 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
580 struct TOC
*toc
= (struct TOC
*)priv
->toc_buffer
.kaddr
;
582 toc
->JobList
[JOB_GFX_SAVE
] = (uint8_t)priv
->toc_entry_used_count
;
583 cz_smu_populate_single_scratch_task(adev
,
584 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
,
585 TASK_TYPE_UCODE_SAVE
, false);
586 cz_smu_populate_single_scratch_task(adev
,
587 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
,
588 TASK_TYPE_UCODE_SAVE
, true);
593 static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device
*adev
)
595 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
596 struct TOC
*toc
= (struct TOC
*)priv
->toc_buffer
.kaddr
;
598 toc
->JobList
[JOB_GFX_RESTORE
] = (uint8_t)priv
->toc_entry_used_count
;
601 if (adev
->firmware
.smu_load
) {
602 cz_smu_populate_single_ucode_load_task(adev
,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
, false);
604 cz_smu_populate_single_ucode_load_task(adev
,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
, false);
606 cz_smu_populate_single_ucode_load_task(adev
,
607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
, false);
608 cz_smu_populate_single_ucode_load_task(adev
,
609 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
610 if (adev
->asic_type
== CHIP_STONEY
) {
611 cz_smu_populate_single_ucode_load_task(adev
,
612 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
614 cz_smu_populate_single_ucode_load_task(adev
,
615 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
, false);
617 cz_smu_populate_single_ucode_load_task(adev
,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
, false);
621 /* populate scratch */
622 cz_smu_populate_single_scratch_task(adev
,
623 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
,
624 TASK_TYPE_UCODE_LOAD
, false);
625 cz_smu_populate_single_scratch_task(adev
,
626 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
,
627 TASK_TYPE_UCODE_LOAD
, false);
628 cz_smu_populate_single_scratch_task(adev
,
629 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
,
630 TASK_TYPE_UCODE_LOAD
, true);
635 static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device
*adev
)
637 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
639 priv
->toc_entry_power_profiling_index
= priv
->toc_entry_used_count
;
641 cz_smu_populate_single_scratch_task(adev
,
642 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING
,
643 TASK_TYPE_INITIALIZE
, true);
647 static int cz_smu_construct_toc_for_bootup(struct amdgpu_device
*adev
)
649 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
651 priv
->toc_entry_initialize_index
= priv
->toc_entry_used_count
;
653 if (adev
->firmware
.smu_load
) {
654 cz_smu_populate_single_ucode_load_task(adev
,
655 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
, false);
656 if (adev
->asic_type
== CHIP_STONEY
) {
657 cz_smu_populate_single_ucode_load_task(adev
,
658 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
, false);
660 cz_smu_populate_single_ucode_load_task(adev
,
661 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1
, false);
663 cz_smu_populate_single_ucode_load_task(adev
,
664 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
, false);
665 cz_smu_populate_single_ucode_load_task(adev
,
666 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
, false);
667 cz_smu_populate_single_ucode_load_task(adev
,
668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
, false);
669 cz_smu_populate_single_ucode_load_task(adev
,
670 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
671 if (adev
->asic_type
== CHIP_STONEY
) {
672 cz_smu_populate_single_ucode_load_task(adev
,
673 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
675 cz_smu_populate_single_ucode_load_task(adev
,
676 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
, false);
678 cz_smu_populate_single_ucode_load_task(adev
,
679 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
, true);
685 static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device
*adev
)
687 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
689 priv
->toc_entry_clock_table
= priv
->toc_entry_used_count
;
691 cz_smu_populate_single_scratch_task(adev
,
692 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
,
693 TASK_TYPE_INITIALIZE
, true);
698 static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device
*adev
)
701 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
702 struct TOC
*toc
= (struct TOC
*)priv
->toc_buffer
.kaddr
;
704 for (i
= 0; i
< NUM_JOBLIST_ENTRIES
; i
++)
705 toc
->JobList
[i
] = (uint8_t)IGNORE_JOB
;
711 * cz smu uninitialization
713 int cz_smu_fini(struct amdgpu_device
*adev
)
715 amdgpu_bo_unref(&adev
->smu
.toc_buf
);
716 amdgpu_bo_unref(&adev
->smu
.smu_buf
);
717 kfree(adev
->smu
.priv
);
718 adev
->smu
.priv
= NULL
;
719 if (adev
->firmware
.smu_load
)
720 amdgpu_ucode_fini_bo(adev
);
725 int cz_smu_download_pptable(struct amdgpu_device
*adev
, void **table
)
728 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
730 for (i
= 0; i
< priv
->scratch_buffer_length
; i
++)
731 if (priv
->scratch_buffer
[i
].firmware_ID
==
732 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
)
735 if (i
>= priv
->scratch_buffer_length
) {
736 dev_err(adev
->dev
, "Invalid Scratch Type\n");
740 *table
= (struct SMU8_Fusion_ClkTable
*)priv
->scratch_buffer
[i
].kaddr
;
742 /* prepare buffer for pptable */
743 cz_send_msg_to_smc_with_parameter(adev
,
744 PPSMC_MSG_SetClkTableAddrHi
,
745 priv
->scratch_buffer
[i
].mc_addr_high
);
746 cz_send_msg_to_smc_with_parameter(adev
,
747 PPSMC_MSG_SetClkTableAddrLo
,
748 priv
->scratch_buffer
[i
].mc_addr_low
);
749 cz_send_msg_to_smc_with_parameter(adev
,
750 PPSMC_MSG_ExecuteJob
,
751 priv
->toc_entry_clock_table
);
753 /* actual downloading */
754 cz_send_msg_to_smc(adev
, PPSMC_MSG_ClkTableXferToDram
);
759 int cz_smu_upload_pptable(struct amdgpu_device
*adev
)
762 struct cz_smu_private_data
*priv
= cz_smu_get_priv(adev
);
764 for (i
= 0; i
< priv
->scratch_buffer_length
; i
++)
765 if (priv
->scratch_buffer
[i
].firmware_ID
==
766 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
)
769 if (i
>= priv
->scratch_buffer_length
) {
770 dev_err(adev
->dev
, "Invalid Scratch Type\n");
775 cz_send_msg_to_smc_with_parameter(adev
,
776 PPSMC_MSG_SetClkTableAddrHi
,
777 priv
->scratch_buffer
[i
].mc_addr_high
);
778 cz_send_msg_to_smc_with_parameter(adev
,
779 PPSMC_MSG_SetClkTableAddrLo
,
780 priv
->scratch_buffer
[i
].mc_addr_low
);
781 cz_send_msg_to_smc_with_parameter(adev
,
782 PPSMC_MSG_ExecuteJob
,
783 priv
->toc_entry_clock_table
);
785 /* actual uploading */
786 cz_send_msg_to_smc(adev
, PPSMC_MSG_ClkTableXferToSmu
);
792 * cz smumgr functions initialization
794 static const struct amdgpu_smumgr_funcs cz_smumgr_funcs
= {
795 .check_fw_load_finish
= cz_smu_check_finished
,
796 .request_smu_load_fw
= NULL
,
797 .request_smu_specific_fw
= NULL
,
801 * cz smu initialization
803 int cz_smu_init(struct amdgpu_device
*adev
)
806 uint64_t mc_addr
= 0;
807 struct amdgpu_bo
**toc_buf
= &adev
->smu
.toc_buf
;
808 struct amdgpu_bo
**smu_buf
= &adev
->smu
.smu_buf
;
809 void *toc_buf_ptr
= NULL
;
810 void *smu_buf_ptr
= NULL
;
812 struct cz_smu_private_data
*priv
=
813 kzalloc(sizeof(struct cz_smu_private_data
), GFP_KERNEL
);
817 /* allocate firmware buffers */
818 if (adev
->firmware
.smu_load
)
819 amdgpu_ucode_init_bo(adev
);
821 adev
->smu
.priv
= priv
;
822 adev
->smu
.fw_flags
= 0;
823 priv
->toc_buffer
.data_size
= 4096;
825 priv
->smu_buffer
.data_size
=
826 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE
, 32) +
827 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE
, 32) +
828 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE
, 32) +
829 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData
), 32) +
830 ALIGN(sizeof(struct SMU8_Fusion_ClkTable
), 32);
832 /* prepare toc buffer and smu buffer:
833 * 1. create amdgpu_bo for toc buffer and smu buffer
835 * 3. map kernel virtual address
837 ret
= amdgpu_bo_create(adev
, priv
->toc_buffer
.data_size
, PAGE_SIZE
,
838 true, AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
842 dev_err(adev
->dev
, "(%d) SMC TOC buffer allocation failed\n", ret
);
846 ret
= amdgpu_bo_create(adev
, priv
->smu_buffer
.data_size
, PAGE_SIZE
,
847 true, AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
851 dev_err(adev
->dev
, "(%d) SMC Internal buffer allocation failed\n", ret
);
855 /* toc buffer reserve/pin/map */
856 ret
= amdgpu_bo_reserve(adev
->smu
.toc_buf
, false);
858 amdgpu_bo_unref(&adev
->smu
.toc_buf
);
859 dev_err(adev
->dev
, "(%d) SMC TOC buffer reserve failed\n", ret
);
863 ret
= amdgpu_bo_pin(adev
->smu
.toc_buf
, AMDGPU_GEM_DOMAIN_GTT
, &mc_addr
);
865 amdgpu_bo_unreserve(adev
->smu
.toc_buf
);
866 amdgpu_bo_unref(&adev
->smu
.toc_buf
);
867 dev_err(adev
->dev
, "(%d) SMC TOC buffer pin failed\n", ret
);
871 ret
= amdgpu_bo_kmap(*toc_buf
, &toc_buf_ptr
);
873 goto smu_init_failed
;
875 amdgpu_bo_unreserve(adev
->smu
.toc_buf
);
877 priv
->toc_buffer
.mc_addr_low
= lower_32_bits(mc_addr
);
878 priv
->toc_buffer
.mc_addr_high
= upper_32_bits(mc_addr
);
879 priv
->toc_buffer
.kaddr
= toc_buf_ptr
;
881 /* smu buffer reserve/pin/map */
882 ret
= amdgpu_bo_reserve(adev
->smu
.smu_buf
, false);
884 amdgpu_bo_unref(&adev
->smu
.smu_buf
);
885 dev_err(adev
->dev
, "(%d) SMC Internal buffer reserve failed\n", ret
);
889 ret
= amdgpu_bo_pin(adev
->smu
.smu_buf
, AMDGPU_GEM_DOMAIN_GTT
, &mc_addr
);
891 amdgpu_bo_unreserve(adev
->smu
.smu_buf
);
892 amdgpu_bo_unref(&adev
->smu
.smu_buf
);
893 dev_err(adev
->dev
, "(%d) SMC Internal buffer pin failed\n", ret
);
897 ret
= amdgpu_bo_kmap(*smu_buf
, &smu_buf_ptr
);
899 goto smu_init_failed
;
901 amdgpu_bo_unreserve(adev
->smu
.smu_buf
);
903 priv
->smu_buffer
.mc_addr_low
= lower_32_bits(mc_addr
);
904 priv
->smu_buffer
.mc_addr_high
= upper_32_bits(mc_addr
);
905 priv
->smu_buffer
.kaddr
= smu_buf_ptr
;
907 if (adev
->firmware
.smu_load
) {
908 if (cz_smu_populate_single_firmware_entry(adev
,
909 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
,
910 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
911 goto smu_init_failed
;
913 if (adev
->asic_type
== CHIP_STONEY
) {
914 if (cz_smu_populate_single_firmware_entry(adev
,
915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
,
916 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
917 goto smu_init_failed
;
919 if (cz_smu_populate_single_firmware_entry(adev
,
920 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1
,
921 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
922 goto smu_init_failed
;
924 if (cz_smu_populate_single_firmware_entry(adev
,
925 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
,
926 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
927 goto smu_init_failed
;
928 if (cz_smu_populate_single_firmware_entry(adev
,
929 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
,
930 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
931 goto smu_init_failed
;
932 if (cz_smu_populate_single_firmware_entry(adev
,
933 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
,
934 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
935 goto smu_init_failed
;
936 if (cz_smu_populate_single_firmware_entry(adev
,
937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
,
938 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
939 goto smu_init_failed
;
940 if (adev
->asic_type
== CHIP_STONEY
) {
941 if (cz_smu_populate_single_firmware_entry(adev
,
942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
,
943 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
944 goto smu_init_failed
;
946 if (cz_smu_populate_single_firmware_entry(adev
,
947 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
,
948 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
949 goto smu_init_failed
;
951 if (cz_smu_populate_single_firmware_entry(adev
,
952 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
,
953 &priv
->driver_buffer
[priv
->driver_buffer_length
++]))
954 goto smu_init_failed
;
957 if (cz_smu_populate_single_scratch_entry(adev
,
958 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
,
959 UCODE_ID_RLC_SCRATCH_SIZE_BYTE
,
960 &priv
->scratch_buffer
[priv
->scratch_buffer_length
++]))
961 goto smu_init_failed
;
962 if (cz_smu_populate_single_scratch_entry(adev
,
963 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
,
964 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE
,
965 &priv
->scratch_buffer
[priv
->scratch_buffer_length
++]))
966 goto smu_init_failed
;
967 if (cz_smu_populate_single_scratch_entry(adev
,
968 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
,
969 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE
,
970 &priv
->scratch_buffer
[priv
->scratch_buffer_length
++]))
971 goto smu_init_failed
;
972 if (cz_smu_populate_single_scratch_entry(adev
,
973 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING
,
974 sizeof(struct SMU8_MultimediaPowerLogData
),
975 &priv
->scratch_buffer
[priv
->scratch_buffer_length
++]))
976 goto smu_init_failed
;
977 if (cz_smu_populate_single_scratch_entry(adev
,
978 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
,
979 sizeof(struct SMU8_Fusion_ClkTable
),
980 &priv
->scratch_buffer
[priv
->scratch_buffer_length
++]))
981 goto smu_init_failed
;
983 cz_smu_initialize_toc_empty_job_list(adev
);
984 cz_smu_construct_toc_for_rlc_aram_save(adev
);
985 cz_smu_construct_toc_for_vddgfx_enter(adev
);
986 cz_smu_construct_toc_for_vddgfx_exit(adev
);
987 cz_smu_construct_toc_for_power_profiling(adev
);
988 cz_smu_construct_toc_for_bootup(adev
);
989 cz_smu_construct_toc_for_clock_table(adev
);
990 /* init the smumgr functions */
991 adev
->smu
.smumgr_funcs
= &cz_smumgr_funcs
;
996 amdgpu_bo_unref(toc_buf
);
997 amdgpu_bo_unref(smu_buf
);