Linux 4.2.1
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / cz_smc.c
bloba72ffc7d6c26dde601bb5c28590d79a9d9827208
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "smu8.h"
27 #include "smu8_fusion.h"
28 #include "cz_ppsmc.h"
29 #include "cz_smumgr.h"
30 #include "smu_ucode_xfer_cz.h"
31 #include "amdgpu_ucode.h"
33 #include "smu/smu_8_0_d.h"
34 #include "smu/smu_8_0_sh_mask.h"
35 #include "gca/gfx_8_0_d.h"
36 #include "gca/gfx_8_0_sh_mask.h"
38 uint32_t cz_get_argument(struct amdgpu_device *adev)
40 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
43 static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
45 struct cz_smu_private_data *priv =
46 (struct cz_smu_private_data *)(adev->smu.priv);
48 return priv;
51 int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
53 int i;
54 u32 content = 0, tmp;
56 for (i = 0; i < adev->usec_timeout; i++) {
57 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
58 SMU_MP1_SRBM2P_RESP_0, CONTENT);
59 if (content != tmp)
60 break;
61 udelay(1);
64 /* timeout means wrong logic*/
65 if (i == adev->usec_timeout)
66 return -EINVAL;
68 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
69 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
71 return 0;
74 int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
76 int i;
77 u32 content = 0, tmp = 0;
79 if (cz_send_msg_to_smc_async(adev, msg))
80 return -EINVAL;
82 for (i = 0; i < adev->usec_timeout; i++) {
83 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
84 SMU_MP1_SRBM2P_RESP_0, CONTENT);
85 if (content != tmp)
86 break;
87 udelay(1);
90 /* timeout means wrong logic*/
91 if (i == adev->usec_timeout)
92 return -EINVAL;
94 if (PPSMC_Result_OK != tmp) {
95 dev_err(adev->dev, "SMC Failed to send Message.\n");
96 return -EINVAL;
99 return 0;
102 int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
103 u16 msg, u32 parameter)
105 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
106 return cz_send_msg_to_smc_async(adev, msg);
109 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
110 u16 msg, u32 parameter)
112 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
113 return cz_send_msg_to_smc(adev, msg);
116 static int cz_set_smc_sram_address(struct amdgpu_device *adev,
117 u32 smc_address, u32 limit)
119 if (smc_address & 3)
120 return -EINVAL;
121 if ((smc_address + 3) > limit)
122 return -EINVAL;
124 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
126 return 0;
129 int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
130 u32 *value, u32 limit)
132 int ret;
134 ret = cz_set_smc_sram_address(adev, smc_address, limit);
135 if (ret)
136 return ret;
138 *value = RREG32(mmMP0PUB_IND_DATA_0);
140 return 0;
143 int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
144 u32 value, u32 limit)
146 int ret;
148 ret = cz_set_smc_sram_address(adev, smc_address, limit);
149 if (ret)
150 return ret;
152 WREG32(mmMP0PUB_IND_DATA_0, value);
154 return 0;
157 static int cz_smu_request_load_fw(struct amdgpu_device *adev)
159 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
161 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
162 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
164 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
166 /*prepare toc buffers*/
167 cz_send_msg_to_smc_with_parameter(adev,
168 PPSMC_MSG_DriverDramAddrHi,
169 priv->toc_buffer.mc_addr_high);
170 cz_send_msg_to_smc_with_parameter(adev,
171 PPSMC_MSG_DriverDramAddrLo,
172 priv->toc_buffer.mc_addr_low);
173 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
175 /*execute jobs*/
176 cz_send_msg_to_smc_with_parameter(adev,
177 PPSMC_MSG_ExecuteJob,
178 priv->toc_entry_aram);
180 cz_send_msg_to_smc_with_parameter(adev,
181 PPSMC_MSG_ExecuteJob,
182 priv->toc_entry_power_profiling_index);
184 cz_send_msg_to_smc_with_parameter(adev,
185 PPSMC_MSG_ExecuteJob,
186 priv->toc_entry_initialize_index);
188 return 0;
192 *Check if the FW has been loaded, SMU will not return if loading
193 *has not finished.
195 static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
196 uint32_t fw_mask)
198 int i;
199 uint32_t index = SMN_MP1_SRAM_START_ADDR +
200 SMU8_FIRMWARE_HEADER_LOCATION +
201 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
203 WREG32(mmMP0PUB_IND_INDEX, index);
205 for (i = 0; i < adev->usec_timeout; i++) {
206 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
207 break;
208 udelay(1);
211 if (i >= adev->usec_timeout) {
212 dev_err(adev->dev,
213 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
214 fw_mask, RREG32(mmMP0PUB_IND_DATA));
215 return -EINVAL;
218 return 0;
222 * interfaces for different ip blocks to check firmware loading status
223 * 0 for success otherwise failed
225 static int cz_smu_check_finished(struct amdgpu_device *adev,
226 enum AMDGPU_UCODE_ID id)
228 switch (id) {
229 case AMDGPU_UCODE_ID_SDMA0:
230 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
231 return 0;
232 break;
233 case AMDGPU_UCODE_ID_SDMA1:
234 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
235 return 0;
236 break;
237 case AMDGPU_UCODE_ID_CP_CE:
238 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
239 return 0;
240 break;
241 case AMDGPU_UCODE_ID_CP_PFP:
242 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
243 return 0;
244 case AMDGPU_UCODE_ID_CP_ME:
245 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
246 return 0;
247 break;
248 case AMDGPU_UCODE_ID_CP_MEC1:
249 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
250 return 0;
251 break;
252 case AMDGPU_UCODE_ID_CP_MEC2:
253 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
254 return 0;
255 break;
256 case AMDGPU_UCODE_ID_RLC_G:
257 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
258 return 0;
259 break;
260 case AMDGPU_UCODE_ID_MAXIMUM:
261 default:
262 break;
265 return 1;
268 static int cz_load_mec_firmware(struct amdgpu_device *adev)
270 struct amdgpu_firmware_info *ucode =
271 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
272 uint32_t reg_data;
273 uint32_t tmp;
275 if (ucode->fw == NULL)
276 return -EINVAL;
278 /* Disable MEC parsing/prefetching */
279 tmp = RREG32(mmCP_MEC_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
281 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
282 WREG32(mmCP_MEC_CNTL, tmp);
284 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
285 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
286 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
287 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
288 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
289 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
291 reg_data = lower_32_bits(ucode->mc_addr) &
292 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
293 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
295 reg_data = upper_32_bits(ucode->mc_addr) &
296 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
297 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
299 return 0;
302 int cz_smu_start(struct amdgpu_device *adev)
304 int ret = 0;
306 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
307 UCODE_ID_SDMA0_MASK |
308 UCODE_ID_SDMA1_MASK |
309 UCODE_ID_CP_CE_MASK |
310 UCODE_ID_CP_ME_MASK |
311 UCODE_ID_CP_PFP_MASK |
312 UCODE_ID_CP_MEC_JT1_MASK |
313 UCODE_ID_CP_MEC_JT2_MASK;
315 cz_smu_request_load_fw(adev);
316 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
317 if (ret)
318 return ret;
320 /* manually load MEC firmware for CZ */
321 if (adev->asic_type == CHIP_CARRIZO) {
322 ret = cz_load_mec_firmware(adev);
323 if (ret) {
324 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
325 return ret;
329 /* setup fw load flag */
330 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
331 AMDGPU_SDMA1_UCODE_LOADED |
332 AMDGPU_CPCE_UCODE_LOADED |
333 AMDGPU_CPPFP_UCODE_LOADED |
334 AMDGPU_CPME_UCODE_LOADED |
335 AMDGPU_CPMEC1_UCODE_LOADED |
336 AMDGPU_CPMEC2_UCODE_LOADED |
337 AMDGPU_CPRLC_UCODE_LOADED;
339 return ret;
342 static uint32_t cz_convert_fw_type(uint32_t fw_type)
344 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
346 switch (fw_type) {
347 case UCODE_ID_SDMA0:
348 result = AMDGPU_UCODE_ID_SDMA0;
349 break;
350 case UCODE_ID_SDMA1:
351 result = AMDGPU_UCODE_ID_SDMA1;
352 break;
353 case UCODE_ID_CP_CE:
354 result = AMDGPU_UCODE_ID_CP_CE;
355 break;
356 case UCODE_ID_CP_PFP:
357 result = AMDGPU_UCODE_ID_CP_PFP;
358 break;
359 case UCODE_ID_CP_ME:
360 result = AMDGPU_UCODE_ID_CP_ME;
361 break;
362 case UCODE_ID_CP_MEC_JT1:
363 case UCODE_ID_CP_MEC_JT2:
364 result = AMDGPU_UCODE_ID_CP_MEC1;
365 break;
366 case UCODE_ID_RLC_G:
367 result = AMDGPU_UCODE_ID_RLC_G;
368 break;
369 default:
370 DRM_ERROR("UCode type is out of range!");
373 return result;
376 static uint8_t cz_smu_translate_firmware_enum_to_arg(
377 enum cz_scratch_entry firmware_enum)
379 uint8_t ret = 0;
381 switch (firmware_enum) {
382 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
383 ret = UCODE_ID_SDMA0;
384 break;
385 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
386 ret = UCODE_ID_SDMA1;
387 break;
388 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
389 ret = UCODE_ID_CP_CE;
390 break;
391 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
392 ret = UCODE_ID_CP_PFP;
393 break;
394 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
395 ret = UCODE_ID_CP_ME;
396 break;
397 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
398 ret = UCODE_ID_CP_MEC_JT1;
399 break;
400 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
401 ret = UCODE_ID_CP_MEC_JT2;
402 break;
403 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
404 ret = UCODE_ID_GMCON_RENG;
405 break;
406 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
407 ret = UCODE_ID_RLC_G;
408 break;
409 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
410 ret = UCODE_ID_RLC_SCRATCH;
411 break;
412 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
413 ret = UCODE_ID_RLC_SRM_ARAM;
414 break;
415 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
416 ret = UCODE_ID_RLC_SRM_DRAM;
417 break;
418 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
419 ret = UCODE_ID_DMCU_ERAM;
420 break;
421 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
422 ret = UCODE_ID_DMCU_IRAM;
423 break;
424 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
425 ret = TASK_ARG_INIT_MM_PWR_LOG;
426 break;
427 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
428 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
429 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
430 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
431 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
432 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
433 ret = TASK_ARG_REG_MMIO;
434 break;
435 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
436 ret = TASK_ARG_INIT_CLK_TABLE;
437 break;
440 return ret;
443 static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
444 enum cz_scratch_entry firmware_enum,
445 struct cz_buffer_entry *entry)
447 uint64_t gpu_addr;
448 uint32_t data_size;
449 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
450 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
451 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
452 const struct gfx_firmware_header_v1_0 *header;
454 if (ucode->fw == NULL)
455 return -EINVAL;
457 gpu_addr = ucode->mc_addr;
458 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
459 data_size = le32_to_cpu(header->header.ucode_size_bytes);
461 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
462 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
463 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
464 data_size = le32_to_cpu(header->jt_size) << 2;
467 entry->mc_addr_low = lower_32_bits(gpu_addr);
468 entry->mc_addr_high = upper_32_bits(gpu_addr);
469 entry->data_size = data_size;
470 entry->firmware_ID = firmware_enum;
472 return 0;
475 static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
476 enum cz_scratch_entry scratch_type,
477 uint32_t size_in_byte,
478 struct cz_buffer_entry *entry)
480 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
481 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
482 priv->smu_buffer.mc_addr_low;
483 mc_addr += size_in_byte;
485 priv->smu_buffer_used_bytes += size_in_byte;
486 entry->data_size = size_in_byte;
487 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
488 entry->mc_addr_low = lower_32_bits(mc_addr);
489 entry->mc_addr_high = upper_32_bits(mc_addr);
490 entry->firmware_ID = scratch_type;
492 return 0;
495 static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
496 enum cz_scratch_entry firmware_enum,
497 bool is_last)
499 uint8_t i;
500 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
501 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
502 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
504 task->type = TASK_TYPE_UCODE_LOAD;
505 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
506 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
508 for (i = 0; i < priv->driver_buffer_length; i++)
509 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
510 break;
512 if (i >= priv->driver_buffer_length) {
513 dev_err(adev->dev, "Invalid Firmware Type\n");
514 return -EINVAL;
517 task->addr.low = priv->driver_buffer[i].mc_addr_low;
518 task->addr.high = priv->driver_buffer[i].mc_addr_high;
519 task->size_bytes = priv->driver_buffer[i].data_size;
521 return 0;
524 static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
525 enum cz_scratch_entry firmware_enum,
526 uint8_t type, bool is_last)
528 uint8_t i;
529 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
530 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
531 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
533 task->type = type;
534 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
535 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
537 for (i = 0; i < priv->scratch_buffer_length; i++)
538 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
539 break;
541 if (i >= priv->scratch_buffer_length) {
542 dev_err(adev->dev, "Invalid Firmware Type\n");
543 return -EINVAL;
546 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
547 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
548 task->size_bytes = priv->scratch_buffer[i].data_size;
550 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
551 struct cz_ih_meta_data *pIHReg_restore =
552 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
553 pIHReg_restore->command =
554 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
557 return 0;
560 static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
562 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
563 priv->toc_entry_aram = priv->toc_entry_used_count;
564 cz_smu_populate_single_scratch_task(adev,
565 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
566 TASK_TYPE_UCODE_SAVE, true);
568 return 0;
571 static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
573 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
574 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
576 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
577 cz_smu_populate_single_scratch_task(adev,
578 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
579 TASK_TYPE_UCODE_SAVE, false);
580 cz_smu_populate_single_scratch_task(adev,
581 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
582 TASK_TYPE_UCODE_SAVE, true);
584 return 0;
587 static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
589 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
590 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
592 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
594 /* populate ucode */
595 if (adev->firmware.smu_load) {
596 cz_smu_populate_single_ucode_load_task(adev,
597 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
598 cz_smu_populate_single_ucode_load_task(adev,
599 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
600 cz_smu_populate_single_ucode_load_task(adev,
601 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
602 cz_smu_populate_single_ucode_load_task(adev,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
604 cz_smu_populate_single_ucode_load_task(adev,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
610 /* populate scratch */
611 cz_smu_populate_single_scratch_task(adev,
612 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
613 TASK_TYPE_UCODE_LOAD, false);
614 cz_smu_populate_single_scratch_task(adev,
615 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
616 TASK_TYPE_UCODE_LOAD, false);
617 cz_smu_populate_single_scratch_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
619 TASK_TYPE_UCODE_LOAD, true);
621 return 0;
624 static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
626 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
628 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
630 cz_smu_populate_single_scratch_task(adev,
631 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
632 TASK_TYPE_INITIALIZE, true);
633 return 0;
636 static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
638 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
640 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
642 if (adev->firmware.smu_load) {
643 cz_smu_populate_single_ucode_load_task(adev,
644 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
645 cz_smu_populate_single_ucode_load_task(adev,
646 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
647 cz_smu_populate_single_ucode_load_task(adev,
648 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
649 cz_smu_populate_single_ucode_load_task(adev,
650 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
651 cz_smu_populate_single_ucode_load_task(adev,
652 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
653 cz_smu_populate_single_ucode_load_task(adev,
654 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
655 cz_smu_populate_single_ucode_load_task(adev,
656 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
657 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
661 return 0;
664 static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
666 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
668 priv->toc_entry_clock_table = priv->toc_entry_used_count;
670 cz_smu_populate_single_scratch_task(adev,
671 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
672 TASK_TYPE_INITIALIZE, true);
674 return 0;
677 static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
679 int i;
680 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
681 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
683 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
684 toc->JobList[i] = (uint8_t)IGNORE_JOB;
686 return 0;
690 * cz smu uninitialization
692 int cz_smu_fini(struct amdgpu_device *adev)
694 amdgpu_bo_unref(&adev->smu.toc_buf);
695 amdgpu_bo_unref(&adev->smu.smu_buf);
696 kfree(adev->smu.priv);
697 adev->smu.priv = NULL;
698 if (adev->firmware.smu_load)
699 amdgpu_ucode_fini_bo(adev);
701 return 0;
704 int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
706 uint8_t i;
707 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
709 for (i = 0; i < priv->scratch_buffer_length; i++)
710 if (priv->scratch_buffer[i].firmware_ID ==
711 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
712 break;
714 if (i >= priv->scratch_buffer_length) {
715 dev_err(adev->dev, "Invalid Scratch Type\n");
716 return -EINVAL;
719 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
721 /* prepare buffer for pptable */
722 cz_send_msg_to_smc_with_parameter(adev,
723 PPSMC_MSG_SetClkTableAddrHi,
724 priv->scratch_buffer[i].mc_addr_high);
725 cz_send_msg_to_smc_with_parameter(adev,
726 PPSMC_MSG_SetClkTableAddrLo,
727 priv->scratch_buffer[i].mc_addr_low);
728 cz_send_msg_to_smc_with_parameter(adev,
729 PPSMC_MSG_ExecuteJob,
730 priv->toc_entry_clock_table);
732 /* actual downloading */
733 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
735 return 0;
738 int cz_smu_upload_pptable(struct amdgpu_device *adev)
740 uint8_t i;
741 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
743 for (i = 0; i < priv->scratch_buffer_length; i++)
744 if (priv->scratch_buffer[i].firmware_ID ==
745 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
746 break;
748 if (i >= priv->scratch_buffer_length) {
749 dev_err(adev->dev, "Invalid Scratch Type\n");
750 return -EINVAL;
753 /* prepare SMU */
754 cz_send_msg_to_smc_with_parameter(adev,
755 PPSMC_MSG_SetClkTableAddrHi,
756 priv->scratch_buffer[i].mc_addr_high);
757 cz_send_msg_to_smc_with_parameter(adev,
758 PPSMC_MSG_SetClkTableAddrLo,
759 priv->scratch_buffer[i].mc_addr_low);
760 cz_send_msg_to_smc_with_parameter(adev,
761 PPSMC_MSG_ExecuteJob,
762 priv->toc_entry_clock_table);
764 /* actual uploading */
765 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
767 return 0;
771 * cz smumgr functions initialization
773 static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
774 .check_fw_load_finish = cz_smu_check_finished,
775 .request_smu_load_fw = NULL,
776 .request_smu_specific_fw = NULL,
780 * cz smu initialization
782 int cz_smu_init(struct amdgpu_device *adev)
784 int ret = -EINVAL;
785 uint64_t mc_addr = 0;
786 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
787 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
788 void *toc_buf_ptr = NULL;
789 void *smu_buf_ptr = NULL;
791 struct cz_smu_private_data *priv =
792 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
793 if (priv == NULL)
794 return -ENOMEM;
796 /* allocate firmware buffers */
797 if (adev->firmware.smu_load)
798 amdgpu_ucode_init_bo(adev);
800 adev->smu.priv = priv;
801 adev->smu.fw_flags = 0;
802 priv->toc_buffer.data_size = 4096;
804 priv->smu_buffer.data_size =
805 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
806 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
807 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
808 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
809 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
811 /* prepare toc buffer and smu buffer:
812 * 1. create amdgpu_bo for toc buffer and smu buffer
813 * 2. pin mc address
814 * 3. map kernel virtual address
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
819 if (ret) {
820 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
821 return ret;
824 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
825 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
827 if (ret) {
828 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
829 return ret;
832 /* toc buffer reserve/pin/map */
833 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
834 if (ret) {
835 amdgpu_bo_unref(&adev->smu.toc_buf);
836 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
837 return ret;
840 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
841 if (ret) {
842 amdgpu_bo_unreserve(adev->smu.toc_buf);
843 amdgpu_bo_unref(&adev->smu.toc_buf);
844 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
845 return ret;
848 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
849 if (ret)
850 goto smu_init_failed;
852 amdgpu_bo_unreserve(adev->smu.toc_buf);
854 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
855 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
856 priv->toc_buffer.kaddr = toc_buf_ptr;
858 /* smu buffer reserve/pin/map */
859 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
860 if (ret) {
861 amdgpu_bo_unref(&adev->smu.smu_buf);
862 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
863 return ret;
866 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
867 if (ret) {
868 amdgpu_bo_unreserve(adev->smu.smu_buf);
869 amdgpu_bo_unref(&adev->smu.smu_buf);
870 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
871 return ret;
874 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
875 if (ret)
876 goto smu_init_failed;
878 amdgpu_bo_unreserve(adev->smu.smu_buf);
880 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
881 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
882 priv->smu_buffer.kaddr = smu_buf_ptr;
884 if (adev->firmware.smu_load) {
885 if (cz_smu_populate_single_firmware_entry(adev,
886 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
887 &priv->driver_buffer[priv->driver_buffer_length++]))
888 goto smu_init_failed;
889 if (cz_smu_populate_single_firmware_entry(adev,
890 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
891 &priv->driver_buffer[priv->driver_buffer_length++]))
892 goto smu_init_failed;
893 if (cz_smu_populate_single_firmware_entry(adev,
894 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
895 &priv->driver_buffer[priv->driver_buffer_length++]))
896 goto smu_init_failed;
897 if (cz_smu_populate_single_firmware_entry(adev,
898 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
899 &priv->driver_buffer[priv->driver_buffer_length++]))
900 goto smu_init_failed;
901 if (cz_smu_populate_single_firmware_entry(adev,
902 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
903 &priv->driver_buffer[priv->driver_buffer_length++]))
904 goto smu_init_failed;
905 if (cz_smu_populate_single_firmware_entry(adev,
906 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
907 &priv->driver_buffer[priv->driver_buffer_length++]))
908 goto smu_init_failed;
909 if (cz_smu_populate_single_firmware_entry(adev,
910 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
911 &priv->driver_buffer[priv->driver_buffer_length++]))
912 goto smu_init_failed;
913 if (cz_smu_populate_single_firmware_entry(adev,
914 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
915 &priv->driver_buffer[priv->driver_buffer_length++]))
916 goto smu_init_failed;
919 if (cz_smu_populate_single_scratch_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
921 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
922 &priv->scratch_buffer[priv->scratch_buffer_length++]))
923 goto smu_init_failed;
924 if (cz_smu_populate_single_scratch_entry(adev,
925 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
926 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
927 &priv->scratch_buffer[priv->scratch_buffer_length++]))
928 goto smu_init_failed;
929 if (cz_smu_populate_single_scratch_entry(adev,
930 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
931 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
932 &priv->scratch_buffer[priv->scratch_buffer_length++]))
933 goto smu_init_failed;
934 if (cz_smu_populate_single_scratch_entry(adev,
935 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
936 sizeof(struct SMU8_MultimediaPowerLogData),
937 &priv->scratch_buffer[priv->scratch_buffer_length++]))
938 goto smu_init_failed;
939 if (cz_smu_populate_single_scratch_entry(adev,
940 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
941 sizeof(struct SMU8_Fusion_ClkTable),
942 &priv->scratch_buffer[priv->scratch_buffer_length++]))
943 goto smu_init_failed;
945 cz_smu_initialize_toc_empty_job_list(adev);
946 cz_smu_construct_toc_for_rlc_aram_save(adev);
947 cz_smu_construct_toc_for_vddgfx_enter(adev);
948 cz_smu_construct_toc_for_vddgfx_exit(adev);
949 cz_smu_construct_toc_for_power_profiling(adev);
950 cz_smu_construct_toc_for_bootup(adev);
951 cz_smu_construct_toc_for_clock_table(adev);
952 /* init the smumgr functions */
953 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
955 return 0;
957 smu_init_failed:
958 amdgpu_bo_unref(toc_buf);
959 amdgpu_bo_unref(smu_buf);
961 return ret;