2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "smu_ucode_xfer_vi.h"
26 #include "vegam_smumgr.h"
27 #include "smu/smu_7_1_3_d.h"
28 #include "smu/smu_7_1_3_sh_mask.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
31 #include "oss/oss_3_0_d.h"
32 #include "gca/gfx_8_0_d.h"
33 #include "bif/bif_5_0_d.h"
34 #include "bif/bif_5_0_sh_mask.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
37 #include "smu7_ppsmc.h"
39 #include "smu7_dyn_defaults.h"
41 #include "smu7_hwmgr.h"
42 #include "hardwaremanager.h"
43 #include "ppatomctrl.h"
45 #include "pppcielanes.h"
47 #include "dce/dce_11_2_d.h"
48 #include "dce/dce_11_2_sh_mask.h"
50 #define PPVEGAM_TARGETACTIVITY_DFLT 50
52 #define VOLTAGE_VID_OFFSET_SCALE1 625
53 #define VOLTAGE_VID_OFFSET_SCALE2 100
54 #define POWERTUNE_DEFAULT_SET_MAX 1
55 #define VDDC_VDDCI_DELTA 200
56 #define MC_CG_ARB_FREQ_F1 0x0b
58 #define STRAP_ASIC_RO_LSB 2168
59 #define STRAP_ASIC_RO_MSB 2175
61 #define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
62 #define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305)
64 static const struct vegam_pt_defaults
65 vegam_power_tune_data_set_array
[POWERTUNE_DEFAULT_SET_MAX
] = {
66 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
67 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
68 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
69 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
70 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
73 static const sclkFcwRange_t Range_Table
[NUM_SCLK_RANGE
] = {
74 {VCO_2_4
, POSTDIV_DIV_BY_16
, 75, 160, 112},
75 {VCO_3_6
, POSTDIV_DIV_BY_16
, 112, 224, 160},
76 {VCO_2_4
, POSTDIV_DIV_BY_8
, 75, 160, 112},
77 {VCO_3_6
, POSTDIV_DIV_BY_8
, 112, 224, 160},
78 {VCO_2_4
, POSTDIV_DIV_BY_4
, 75, 160, 112},
79 {VCO_3_6
, POSTDIV_DIV_BY_4
, 112, 216, 160},
80 {VCO_2_4
, POSTDIV_DIV_BY_2
, 75, 160, 108},
81 {VCO_3_6
, POSTDIV_DIV_BY_2
, 112, 216, 160} };
83 static int vegam_smu_init(struct pp_hwmgr
*hwmgr
)
85 struct vegam_smumgr
*smu_data
;
87 smu_data
= kzalloc(sizeof(struct vegam_smumgr
), GFP_KERNEL
);
91 hwmgr
->smu_backend
= smu_data
;
93 if (smu7_init(hwmgr
)) {
101 static int vegam_start_smu_in_protection_mode(struct pp_hwmgr
*hwmgr
)
105 /* Wait for smc boot up */
106 /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
109 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
110 SMC_SYSCON_RESET_CNTL
, rst_reg
, 1);
112 result
= smu7_upload_smu_firmware_image(hwmgr
);
117 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixSMU_STATUS
, 0);
119 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
120 SMC_SYSCON_CLOCK_CNTL_0
, ck_disable
, 0);
122 /* De-assert reset */
123 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
124 SMC_SYSCON_RESET_CNTL
, rst_reg
, 0);
127 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
, RCU_UC_EVENTS
, INTERRUPTS_ENABLED
, 1);
130 /* Call Test SMU message with 0x20000 offset to trigger SMU start */
131 smu7_send_msg_to_smc_offset(hwmgr
);
133 /* Wait done bit to be set */
134 /* Check pass/failed indicator */
136 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr
, SMC_IND
, SMU_STATUS
, SMU_DONE
, 0);
138 if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
139 SMU_STATUS
, SMU_PASS
))
140 PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
142 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixFIRMWARE_FLAGS
, 0);
144 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
145 SMC_SYSCON_RESET_CNTL
, rst_reg
, 1);
147 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
148 SMC_SYSCON_RESET_CNTL
, rst_reg
, 0);
150 /* Wait for firmware to initialize */
151 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
, FIRMWARE_FLAGS
, INTERRUPTS_ENABLED
, 1);
156 static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr
*hwmgr
)
160 /* wait for smc boot up */
161 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr
, SMC_IND
, RCU_UC_EVENTS
, boot_seq_done
, 0);
163 /* Clear firmware interrupt enable flag */
164 /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
165 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
166 ixFIRMWARE_FLAGS
, 0);
168 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
169 SMC_SYSCON_RESET_CNTL
,
172 result
= smu7_upload_smu_firmware_image(hwmgr
);
176 /* Set smc instruct start point at 0x0 */
177 smu7_program_jump_on_start(hwmgr
);
179 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
180 SMC_SYSCON_CLOCK_CNTL_0
, ck_disable
, 0);
182 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
183 SMC_SYSCON_RESET_CNTL
, rst_reg
, 0);
185 /* Wait for firmware to initialize */
187 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
,
188 FIRMWARE_FLAGS
, INTERRUPTS_ENABLED
, 1);
193 static int vegam_start_smu(struct pp_hwmgr
*hwmgr
)
196 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
198 /* Only start SMC if SMC RAM is not running */
199 if (!smu7_is_smc_ram_running(hwmgr
) && hwmgr
->not_vf
) {
200 smu_data
->protected_mode
= (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr
->device
,
201 CGS_IND_REG__SMC
, SMU_FIRMWARE
, SMU_MODE
));
202 smu_data
->smu7_data
.security_hard_key
= (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(
203 hwmgr
->device
, CGS_IND_REG__SMC
, SMU_FIRMWARE
, SMU_SEL
));
205 /* Check if SMU is running in protected mode */
206 if (smu_data
->protected_mode
== 0)
207 result
= vegam_start_smu_in_non_protection_mode(hwmgr
);
209 result
= vegam_start_smu_in_protection_mode(hwmgr
);
212 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result
);
215 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
216 smu7_read_smc_sram_dword(hwmgr
,
217 SMU7_FIRMWARE_HEADER_LOCATION
+ offsetof(SMU75_Firmware_Header
, SoftRegisters
),
218 &(smu_data
->smu7_data
.soft_regs_start
),
221 result
= smu7_request_smu_load_fw(hwmgr
);
226 static int vegam_process_firmware_header(struct pp_hwmgr
*hwmgr
)
228 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
229 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
234 result
= smu7_read_smc_sram_dword(hwmgr
,
235 SMU7_FIRMWARE_HEADER_LOCATION
+
236 offsetof(SMU75_Firmware_Header
, DpmTable
),
240 smu_data
->smu7_data
.dpm_table_start
= tmp
;
242 error
|= (0 != result
);
244 result
= smu7_read_smc_sram_dword(hwmgr
,
245 SMU7_FIRMWARE_HEADER_LOCATION
+
246 offsetof(SMU75_Firmware_Header
, SoftRegisters
),
250 data
->soft_regs_start
= tmp
;
251 smu_data
->smu7_data
.soft_regs_start
= tmp
;
254 error
|= (0 != result
);
256 result
= smu7_read_smc_sram_dword(hwmgr
,
257 SMU7_FIRMWARE_HEADER_LOCATION
+
258 offsetof(SMU75_Firmware_Header
, mcRegisterTable
),
262 smu_data
->smu7_data
.mc_reg_table_start
= tmp
;
264 result
= smu7_read_smc_sram_dword(hwmgr
,
265 SMU7_FIRMWARE_HEADER_LOCATION
+
266 offsetof(SMU75_Firmware_Header
, FanTable
),
270 smu_data
->smu7_data
.fan_table_start
= tmp
;
272 error
|= (0 != result
);
274 result
= smu7_read_smc_sram_dword(hwmgr
,
275 SMU7_FIRMWARE_HEADER_LOCATION
+
276 offsetof(SMU75_Firmware_Header
, mcArbDramTimingTable
),
280 smu_data
->smu7_data
.arb_table_start
= tmp
;
282 error
|= (0 != result
);
284 result
= smu7_read_smc_sram_dword(hwmgr
,
285 SMU7_FIRMWARE_HEADER_LOCATION
+
286 offsetof(SMU75_Firmware_Header
, Version
),
290 hwmgr
->microcode_version_info
.SMC
= tmp
;
292 error
|= (0 != result
);
294 return error
? -1 : 0;
297 static bool vegam_is_dpm_running(struct pp_hwmgr
*hwmgr
)
299 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr
->device
,
300 CGS_IND_REG__SMC
, FEATURE_STATUS
, VOLTAGE_CONTROLLER_ON
))
304 static uint32_t vegam_get_mac_definition(uint32_t value
)
307 case SMU_MAX_LEVELS_GRAPHICS
:
308 return SMU75_MAX_LEVELS_GRAPHICS
;
309 case SMU_MAX_LEVELS_MEMORY
:
310 return SMU75_MAX_LEVELS_MEMORY
;
311 case SMU_MAX_LEVELS_LINK
:
312 return SMU75_MAX_LEVELS_LINK
;
313 case SMU_MAX_ENTRIES_SMIO
:
314 return SMU75_MAX_ENTRIES_SMIO
;
315 case SMU_MAX_LEVELS_VDDC
:
316 return SMU75_MAX_LEVELS_VDDC
;
317 case SMU_MAX_LEVELS_VDDGFX
:
318 return SMU75_MAX_LEVELS_VDDGFX
;
319 case SMU_MAX_LEVELS_VDDCI
:
320 return SMU75_MAX_LEVELS_VDDCI
;
321 case SMU_MAX_LEVELS_MVDD
:
322 return SMU75_MAX_LEVELS_MVDD
;
323 case SMU_UVD_MCLK_HANDSHAKE_DISABLE
:
324 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE
|
325 SMU7_VCE_MCLK_HANDSHAKE_DISABLE
;
328 pr_warn("can't get the mac of %x\n", value
);
332 static int vegam_update_uvd_smc_table(struct pp_hwmgr
*hwmgr
)
334 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
335 uint32_t mm_boot_level_offset
, mm_boot_level_value
;
336 struct phm_ppt_v1_information
*table_info
=
337 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
339 smu_data
->smc_state_table
.UvdBootLevel
= 0;
340 if (table_info
->mm_dep_table
->count
> 0)
341 smu_data
->smc_state_table
.UvdBootLevel
=
342 (uint8_t) (table_info
->mm_dep_table
->count
- 1);
343 mm_boot_level_offset
= smu_data
->smu7_data
.dpm_table_start
+ offsetof(SMU75_Discrete_DpmTable
,
345 mm_boot_level_offset
/= 4;
346 mm_boot_level_offset
*= 4;
347 mm_boot_level_value
= cgs_read_ind_register(hwmgr
->device
,
348 CGS_IND_REG__SMC
, mm_boot_level_offset
);
349 mm_boot_level_value
&= 0x00FFFFFF;
350 mm_boot_level_value
|= smu_data
->smc_state_table
.UvdBootLevel
<< 24;
351 cgs_write_ind_register(hwmgr
->device
,
352 CGS_IND_REG__SMC
, mm_boot_level_offset
, mm_boot_level_value
);
354 if (!phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
355 PHM_PlatformCaps_UVDDPM
) ||
356 phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
357 PHM_PlatformCaps_StablePState
))
358 smum_send_msg_to_smc_with_parameter(hwmgr
,
359 PPSMC_MSG_UVDDPM_SetEnabledMask
,
360 (uint32_t)(1 << smu_data
->smc_state_table
.UvdBootLevel
));
364 static int vegam_update_vce_smc_table(struct pp_hwmgr
*hwmgr
)
366 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
367 uint32_t mm_boot_level_offset
, mm_boot_level_value
;
368 struct phm_ppt_v1_information
*table_info
=
369 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
371 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
372 PHM_PlatformCaps_StablePState
))
373 smu_data
->smc_state_table
.VceBootLevel
=
374 (uint8_t) (table_info
->mm_dep_table
->count
- 1);
376 smu_data
->smc_state_table
.VceBootLevel
= 0;
378 mm_boot_level_offset
= smu_data
->smu7_data
.dpm_table_start
+
379 offsetof(SMU75_Discrete_DpmTable
, VceBootLevel
);
380 mm_boot_level_offset
/= 4;
381 mm_boot_level_offset
*= 4;
382 mm_boot_level_value
= cgs_read_ind_register(hwmgr
->device
,
383 CGS_IND_REG__SMC
, mm_boot_level_offset
);
384 mm_boot_level_value
&= 0xFF00FFFF;
385 mm_boot_level_value
|= smu_data
->smc_state_table
.VceBootLevel
<< 16;
386 cgs_write_ind_register(hwmgr
->device
,
387 CGS_IND_REG__SMC
, mm_boot_level_offset
, mm_boot_level_value
);
389 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_StablePState
))
390 smum_send_msg_to_smc_with_parameter(hwmgr
,
391 PPSMC_MSG_VCEDPM_SetEnabledMask
,
392 (uint32_t)1 << smu_data
->smc_state_table
.VceBootLevel
);
396 static int vegam_update_bif_smc_table(struct pp_hwmgr
*hwmgr
)
398 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
399 struct phm_ppt_v1_information
*table_info
=
400 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
401 struct phm_ppt_v1_pcie_table
*pcie_table
= table_info
->pcie_table
;
404 max_entry
= (SMU75_MAX_LEVELS_LINK
< pcie_table
->count
) ?
405 SMU75_MAX_LEVELS_LINK
:
407 /* Setup BIF_SCLK levels */
408 for (i
= 0; i
< max_entry
; i
++)
409 smu_data
->bif_sclk_table
[i
] = pcie_table
->entries
[i
].pcie_sclk
;
413 static int vegam_update_smc_table(struct pp_hwmgr
*hwmgr
, uint32_t type
)
417 vegam_update_uvd_smc_table(hwmgr
);
420 vegam_update_vce_smc_table(hwmgr
);
423 vegam_update_bif_smc_table(hwmgr
);
431 static void vegam_initialize_power_tune_defaults(struct pp_hwmgr
*hwmgr
)
433 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
434 struct phm_ppt_v1_information
*table_info
=
435 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
438 table_info
->cac_dtp_table
->usPowerTuneDataSetID
<= POWERTUNE_DEFAULT_SET_MAX
&&
439 table_info
->cac_dtp_table
->usPowerTuneDataSetID
)
440 smu_data
->power_tune_defaults
=
441 &vegam_power_tune_data_set_array
442 [table_info
->cac_dtp_table
->usPowerTuneDataSetID
- 1];
444 smu_data
->power_tune_defaults
= &vegam_power_tune_data_set_array
[0];
448 static int vegam_populate_smc_mvdd_table(struct pp_hwmgr
*hwmgr
,
449 SMU75_Discrete_DpmTable
*table
)
451 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
452 uint32_t count
, level
;
454 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->mvdd_control
) {
455 count
= data
->mvdd_voltage_table
.count
;
456 if (count
> SMU_MAX_SMIO_LEVELS
)
457 count
= SMU_MAX_SMIO_LEVELS
;
458 for (level
= 0; level
< count
; level
++) {
459 table
->SmioTable2
.Pattern
[level
].Voltage
= PP_HOST_TO_SMC_US(
460 data
->mvdd_voltage_table
.entries
[count
].value
* VOLTAGE_SCALE
);
461 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
462 table
->SmioTable2
.Pattern
[level
].Smio
=
464 table
->Smio
[level
] |=
465 data
->mvdd_voltage_table
.entries
[level
].smio_low
;
467 table
->SmioMask2
= data
->mvdd_voltage_table
.mask_low
;
469 table
->MvddLevelCount
= (uint32_t) PP_HOST_TO_SMC_UL(count
);
475 static int vegam_populate_smc_vddci_table(struct pp_hwmgr
*hwmgr
,
476 struct SMU75_Discrete_DpmTable
*table
)
478 uint32_t count
, level
;
479 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
481 count
= data
->vddci_voltage_table
.count
;
483 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
) {
484 if (count
> SMU_MAX_SMIO_LEVELS
)
485 count
= SMU_MAX_SMIO_LEVELS
;
486 for (level
= 0; level
< count
; ++level
) {
487 table
->SmioTable1
.Pattern
[level
].Voltage
= PP_HOST_TO_SMC_US(
488 data
->vddci_voltage_table
.entries
[level
].value
* VOLTAGE_SCALE
);
489 table
->SmioTable1
.Pattern
[level
].Smio
= (uint8_t) level
;
491 table
->Smio
[level
] |= data
->vddci_voltage_table
.entries
[level
].smio_low
;
495 table
->SmioMask1
= data
->vddci_voltage_table
.mask_low
;
500 static int vegam_populate_cac_table(struct pp_hwmgr
*hwmgr
,
501 struct SMU75_Discrete_DpmTable
*table
)
505 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
506 struct phm_ppt_v1_information
*table_info
=
507 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
508 struct phm_ppt_v1_voltage_lookup_table
*lookup_table
=
509 table_info
->vddc_lookup_table
;
510 /* tables is already swapped, so in order to use the value from it,
511 * we need to swap it back.
512 * We are populating vddc CAC data to BapmVddc table
513 * in split and merged mode
515 for (count
= 0; count
< lookup_table
->count
; count
++) {
516 index
= phm_get_voltage_index(lookup_table
,
517 data
->vddc_voltage_table
.entries
[count
].value
);
518 table
->BapmVddcVidLoSidd
[count
] =
519 convert_to_vid(lookup_table
->entries
[index
].us_cac_low
);
520 table
->BapmVddcVidHiSidd
[count
] =
521 convert_to_vid(lookup_table
->entries
[index
].us_cac_mid
);
522 table
->BapmVddcVidHiSidd2
[count
] =
523 convert_to_vid(lookup_table
->entries
[index
].us_cac_high
);
529 static int vegam_populate_smc_voltage_tables(struct pp_hwmgr
*hwmgr
,
530 struct SMU75_Discrete_DpmTable
*table
)
532 vegam_populate_smc_vddci_table(hwmgr
, table
);
533 vegam_populate_smc_mvdd_table(hwmgr
, table
);
534 vegam_populate_cac_table(hwmgr
, table
);
539 static int vegam_populate_ulv_level(struct pp_hwmgr
*hwmgr
,
540 struct SMU75_Discrete_Ulv
*state
)
542 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
543 struct phm_ppt_v1_information
*table_info
=
544 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
546 state
->CcPwrDynRm
= 0;
547 state
->CcPwrDynRm1
= 0;
549 state
->VddcOffset
= (uint16_t) table_info
->us_ulv_voltage_offset
;
550 state
->VddcOffsetVid
= (uint8_t)(table_info
->us_ulv_voltage_offset
*
551 VOLTAGE_VID_OFFSET_SCALE2
/ VOLTAGE_VID_OFFSET_SCALE1
);
553 state
->VddcPhase
= data
->vddc_phase_shed_control
^ 0x3;
555 CONVERT_FROM_HOST_TO_SMC_UL(state
->CcPwrDynRm
);
556 CONVERT_FROM_HOST_TO_SMC_UL(state
->CcPwrDynRm1
);
557 CONVERT_FROM_HOST_TO_SMC_US(state
->VddcOffset
);
562 static int vegam_populate_ulv_state(struct pp_hwmgr
*hwmgr
,
563 struct SMU75_Discrete_DpmTable
*table
)
565 return vegam_populate_ulv_level(hwmgr
, &table
->Ulv
);
568 static int vegam_populate_smc_link_level(struct pp_hwmgr
*hwmgr
,
569 struct SMU75_Discrete_DpmTable
*table
)
571 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
572 struct vegam_smumgr
*smu_data
=
573 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
574 struct smu7_dpm_table
*dpm_table
= &data
->dpm_table
;
577 /* Index (dpm_table->pcie_speed_table.count)
578 * is reserved for PCIE boot level. */
579 for (i
= 0; i
<= dpm_table
->pcie_speed_table
.count
; i
++) {
580 table
->LinkLevel
[i
].PcieGenSpeed
=
581 (uint8_t)dpm_table
->pcie_speed_table
.dpm_levels
[i
].value
;
582 table
->LinkLevel
[i
].PcieLaneCount
= (uint8_t)encode_pcie_lane_width(
583 dpm_table
->pcie_speed_table
.dpm_levels
[i
].param1
);
584 table
->LinkLevel
[i
].EnabledForActivity
= 1;
585 table
->LinkLevel
[i
].SPC
= (uint8_t)(data
->pcie_spc_cap
& 0xff);
586 table
->LinkLevel
[i
].DownThreshold
= PP_HOST_TO_SMC_UL(5);
587 table
->LinkLevel
[i
].UpThreshold
= PP_HOST_TO_SMC_UL(30);
590 smu_data
->smc_state_table
.LinkLevelCount
=
591 (uint8_t)dpm_table
->pcie_speed_table
.count
;
593 /* To Do move to hwmgr */
594 data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
=
595 phm_get_dpm_level_enable_mask_value(&dpm_table
->pcie_speed_table
);
600 static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr
*hwmgr
,
601 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
602 uint32_t clock
, SMU_VoltageLevel
*voltage
, uint32_t *mvdd
)
606 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
608 *voltage
= *mvdd
= 0;
610 /* clock - voltage dependency table is empty table */
611 if (dep_table
->count
== 0)
614 for (i
= 0; i
< dep_table
->count
; i
++) {
615 /* find first sclk bigger than request */
616 if (dep_table
->entries
[i
].clk
>= clock
) {
617 *voltage
|= (dep_table
->entries
[i
].vddc
*
618 VOLTAGE_SCALE
) << VDDC_SHIFT
;
619 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->vddci_control
)
620 *voltage
|= (data
->vbios_boot_state
.vddci_bootup_value
*
621 VOLTAGE_SCALE
) << VDDCI_SHIFT
;
622 else if (dep_table
->entries
[i
].vddci
)
623 *voltage
|= (dep_table
->entries
[i
].vddci
*
624 VOLTAGE_SCALE
) << VDDCI_SHIFT
;
626 vddci
= phm_find_closest_vddci(&(data
->vddci_voltage_table
),
627 (dep_table
->entries
[i
].vddc
-
628 (uint16_t)VDDC_VDDCI_DELTA
));
629 *voltage
|= (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
632 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->mvdd_control
)
633 *mvdd
= data
->vbios_boot_state
.mvdd_bootup_value
*
635 else if (dep_table
->entries
[i
].mvdd
)
636 *mvdd
= (uint32_t) dep_table
->entries
[i
].mvdd
*
639 *voltage
|= 1 << PHASES_SHIFT
;
644 /* sclk is bigger than max sclk in the dependence table */
645 *voltage
|= (dep_table
->entries
[i
- 1].vddc
* VOLTAGE_SCALE
) << VDDC_SHIFT
;
646 vddci
= phm_find_closest_vddci(&(data
->vddci_voltage_table
),
647 (dep_table
->entries
[i
- 1].vddc
-
648 (uint16_t)VDDC_VDDCI_DELTA
));
650 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->vddci_control
)
651 *voltage
|= (data
->vbios_boot_state
.vddci_bootup_value
*
652 VOLTAGE_SCALE
) << VDDCI_SHIFT
;
653 else if (dep_table
->entries
[i
- 1].vddci
)
654 *voltage
|= (dep_table
->entries
[i
- 1].vddci
*
655 VOLTAGE_SCALE
) << VDDC_SHIFT
;
657 *voltage
|= (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
659 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->mvdd_control
)
660 *mvdd
= data
->vbios_boot_state
.mvdd_bootup_value
* VOLTAGE_SCALE
;
661 else if (dep_table
->entries
[i
].mvdd
)
662 *mvdd
= (uint32_t) dep_table
->entries
[i
- 1].mvdd
* VOLTAGE_SCALE
;
667 static void vegam_get_sclk_range_table(struct pp_hwmgr
*hwmgr
,
668 SMU75_Discrete_DpmTable
*table
)
670 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
673 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios
= { { {0} } };
675 ref_clk
= amdgpu_asic_get_xclk((struct amdgpu_device
*)hwmgr
->adev
);
677 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr
, &range_table_from_vbios
)) {
678 for (i
= 0; i
< NUM_SCLK_RANGE
; i
++) {
679 table
->SclkFcwRangeTable
[i
].vco_setting
=
680 range_table_from_vbios
.entry
[i
].ucVco_setting
;
681 table
->SclkFcwRangeTable
[i
].postdiv
=
682 range_table_from_vbios
.entry
[i
].ucPostdiv
;
683 table
->SclkFcwRangeTable
[i
].fcw_pcc
=
684 range_table_from_vbios
.entry
[i
].usFcw_pcc
;
686 table
->SclkFcwRangeTable
[i
].fcw_trans_upper
=
687 range_table_from_vbios
.entry
[i
].usFcw_trans_upper
;
688 table
->SclkFcwRangeTable
[i
].fcw_trans_lower
=
689 range_table_from_vbios
.entry
[i
].usRcw_trans_lower
;
691 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_pcc
);
692 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_upper
);
693 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_lower
);
698 for (i
= 0; i
< NUM_SCLK_RANGE
; i
++) {
699 smu_data
->range_table
[i
].trans_lower_frequency
=
700 (ref_clk
* Range_Table
[i
].fcw_trans_lower
) >> Range_Table
[i
].postdiv
;
701 smu_data
->range_table
[i
].trans_upper_frequency
=
702 (ref_clk
* Range_Table
[i
].fcw_trans_upper
) >> Range_Table
[i
].postdiv
;
704 table
->SclkFcwRangeTable
[i
].vco_setting
= Range_Table
[i
].vco_setting
;
705 table
->SclkFcwRangeTable
[i
].postdiv
= Range_Table
[i
].postdiv
;
706 table
->SclkFcwRangeTable
[i
].fcw_pcc
= Range_Table
[i
].fcw_pcc
;
708 table
->SclkFcwRangeTable
[i
].fcw_trans_upper
= Range_Table
[i
].fcw_trans_upper
;
709 table
->SclkFcwRangeTable
[i
].fcw_trans_lower
= Range_Table
[i
].fcw_trans_lower
;
711 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_pcc
);
712 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_upper
);
713 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_lower
);
717 static int vegam_calculate_sclk_params(struct pp_hwmgr
*hwmgr
,
718 uint32_t clock
, SMU_SclkSetting
*sclk_setting
)
720 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
721 const SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
722 struct pp_atomctrl_clock_dividers_ai dividers
;
724 uint32_t pcc_target_percent
, pcc_target_freq
, ss_target_percent
, ss_target_freq
;
729 sclk_setting
->SclkFrequency
= clock
;
730 /* get the engine clock dividers for this clock value */
731 result
= atomctrl_get_engine_pll_dividers_ai(hwmgr
, clock
, ÷rs
);
733 sclk_setting
->Fcw_int
= dividers
.usSclk_fcw_int
;
734 sclk_setting
->Fcw_frac
= dividers
.usSclk_fcw_frac
;
735 sclk_setting
->Pcc_fcw_int
= dividers
.usPcc_fcw_int
;
736 sclk_setting
->PllRange
= dividers
.ucSclkPllRange
;
737 sclk_setting
->Sclk_slew_rate
= 0x400;
738 sclk_setting
->Pcc_up_slew_rate
= dividers
.usPcc_fcw_slew_frac
;
739 sclk_setting
->Pcc_down_slew_rate
= 0xffff;
740 sclk_setting
->SSc_En
= dividers
.ucSscEnable
;
741 sclk_setting
->Fcw1_int
= dividers
.usSsc_fcw1_int
;
742 sclk_setting
->Fcw1_frac
= dividers
.usSsc_fcw1_frac
;
743 sclk_setting
->Sclk_ss_slew_rate
= dividers
.usSsc_fcw_slew_frac
;
747 ref_clock
= amdgpu_asic_get_xclk((struct amdgpu_device
*)hwmgr
->adev
);
749 for (i
= 0; i
< NUM_SCLK_RANGE
; i
++) {
750 if (clock
> smu_data
->range_table
[i
].trans_lower_frequency
751 && clock
<= smu_data
->range_table
[i
].trans_upper_frequency
) {
752 sclk_setting
->PllRange
= i
;
757 sclk_setting
->Fcw_int
= (uint16_t)
758 ((clock
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
) /
760 temp
= clock
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
;
762 do_div(temp
, ref_clock
);
763 sclk_setting
->Fcw_frac
= temp
& 0xffff;
765 pcc_target_percent
= 10; /* Hardcode 10% for now. */
766 pcc_target_freq
= clock
- (clock
* pcc_target_percent
/ 100);
767 sclk_setting
->Pcc_fcw_int
= (uint16_t)
768 ((pcc_target_freq
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
) /
771 ss_target_percent
= 2; /* Hardcode 2% for now. */
772 sclk_setting
->SSc_En
= 0;
773 if (ss_target_percent
) {
774 sclk_setting
->SSc_En
= 1;
775 ss_target_freq
= clock
- (clock
* ss_target_percent
/ 100);
776 sclk_setting
->Fcw1_int
= (uint16_t)
777 ((ss_target_freq
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
) /
779 temp
= ss_target_freq
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
;
781 do_div(temp
, ref_clock
);
782 sclk_setting
->Fcw1_frac
= temp
& 0xffff;
788 static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock
,
793 uint32_t min
= max(clock_insr
, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK
);
795 PP_ASSERT_WITH_CODE((clock
>= min
),
796 "Engine clock can't satisfy stutter requirement!",
798 for (i
= 31; ; i
--) {
799 temp
= clock
/ (i
+ 1);
801 if (temp
>= min
|| i
== 0)
807 static int vegam_populate_single_graphic_level(struct pp_hwmgr
*hwmgr
,
808 uint32_t clock
, struct SMU75_Discrete_GraphicsLevel
*level
)
811 /* PP_Clocks minClocks; */
813 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
814 struct phm_ppt_v1_information
*table_info
=
815 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
816 SMU_SclkSetting curr_sclk_setting
= { 0 };
818 result
= vegam_calculate_sclk_params(hwmgr
, clock
, &curr_sclk_setting
);
820 /* populate graphics levels */
821 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
822 table_info
->vdd_dep_on_sclk
, clock
,
823 &level
->MinVoltage
, &mvdd
);
825 PP_ASSERT_WITH_CODE((0 == result
),
826 "can not find VDDC voltage value for "
827 "VDDC engine clock dependency table",
829 level
->ActivityLevel
= (uint16_t)(SclkDPMTuning_VEGAM
>> DPMTuning_Activity_Shift
);
831 level
->CcPwrDynRm
= 0;
832 level
->CcPwrDynRm1
= 0;
833 level
->EnabledForActivity
= 0;
834 level
->EnabledForThrottle
= 1;
835 level
->VoltageDownHyst
= 0;
836 level
->PowerThrottle
= 0;
837 data
->display_timing
.min_clock_in_sr
= hwmgr
->display_config
->min_core_set_clock_in_sr
;
839 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SclkDeepSleep
))
840 level
->DeepSleepDivId
= vegam_get_sleep_divider_id_from_clock(clock
,
841 hwmgr
->display_config
->min_core_set_clock_in_sr
);
843 level
->SclkSetting
= curr_sclk_setting
;
845 CONVERT_FROM_HOST_TO_SMC_UL(level
->MinVoltage
);
846 CONVERT_FROM_HOST_TO_SMC_UL(level
->CcPwrDynRm
);
847 CONVERT_FROM_HOST_TO_SMC_UL(level
->CcPwrDynRm1
);
848 CONVERT_FROM_HOST_TO_SMC_US(level
->ActivityLevel
);
849 CONVERT_FROM_HOST_TO_SMC_UL(level
->SclkSetting
.SclkFrequency
);
850 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw_int
);
851 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw_frac
);
852 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Pcc_fcw_int
);
853 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Sclk_slew_rate
);
854 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Pcc_up_slew_rate
);
855 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Pcc_down_slew_rate
);
856 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw1_int
);
857 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw1_frac
);
858 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Sclk_ss_slew_rate
);
862 static int vegam_populate_all_graphic_levels(struct pp_hwmgr
*hwmgr
)
864 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
865 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
866 struct smu7_dpm_table
*dpm_table
= &hw_data
->dpm_table
;
867 struct phm_ppt_v1_information
*table_info
=
868 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
869 struct phm_ppt_v1_pcie_table
*pcie_table
= table_info
->pcie_table
;
870 uint8_t pcie_entry_cnt
= (uint8_t) hw_data
->dpm_table
.pcie_speed_table
.count
;
872 uint32_t array
= smu_data
->smu7_data
.dpm_table_start
+
873 offsetof(SMU75_Discrete_DpmTable
, GraphicsLevel
);
874 uint32_t array_size
= sizeof(struct SMU75_Discrete_GraphicsLevel
) *
875 SMU75_MAX_LEVELS_GRAPHICS
;
876 struct SMU75_Discrete_GraphicsLevel
*levels
=
877 smu_data
->smc_state_table
.GraphicsLevel
;
878 uint32_t i
, max_entry
;
879 uint8_t hightest_pcie_level_enabled
= 0,
880 lowest_pcie_level_enabled
= 0,
881 mid_pcie_level_enabled
= 0,
884 vegam_get_sclk_range_table(hwmgr
, &(smu_data
->smc_state_table
));
886 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++) {
888 result
= vegam_populate_single_graphic_level(hwmgr
,
889 dpm_table
->sclk_table
.dpm_levels
[i
].value
,
890 &(smu_data
->smc_state_table
.GraphicsLevel
[i
]));
894 levels
[i
].UpHyst
= (uint8_t)
895 (SclkDPMTuning_VEGAM
>> DPMTuning_Uphyst_Shift
);
896 levels
[i
].DownHyst
= (uint8_t)
897 (SclkDPMTuning_VEGAM
>> DPMTuning_Downhyst_Shift
);
898 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
900 levels
[i
].DeepSleepDivId
= 0;
902 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
903 PHM_PlatformCaps_SPLLShutdownSupport
))
904 smu_data
->smc_state_table
.GraphicsLevel
[0].SclkSetting
.SSc_En
= 0;
906 smu_data
->smc_state_table
.GraphicsDpmLevelCount
=
907 (uint8_t)dpm_table
->sclk_table
.count
;
908 hw_data
->dpm_level_enable_mask
.sclk_dpm_enable_mask
=
909 phm_get_dpm_level_enable_mask_value(&dpm_table
->sclk_table
);
911 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++)
912 levels
[i
].EnabledForActivity
=
913 (hw_data
->dpm_level_enable_mask
.sclk_dpm_enable_mask
>> i
) & 0x1;
915 if (pcie_table
!= NULL
) {
916 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt
),
917 "There must be 1 or more PCIE levels defined in PPTable.",
919 max_entry
= pcie_entry_cnt
- 1;
920 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++)
921 levels
[i
].pcieDpmLevel
=
922 (uint8_t) ((i
< max_entry
) ? i
: max_entry
);
924 while (hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&&
925 ((hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&
926 (1 << (hightest_pcie_level_enabled
+ 1))) != 0))
927 hightest_pcie_level_enabled
++;
929 while (hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&&
930 ((hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&
931 (1 << lowest_pcie_level_enabled
)) == 0))
932 lowest_pcie_level_enabled
++;
934 while ((count
< hightest_pcie_level_enabled
) &&
935 ((hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&
936 (1 << (lowest_pcie_level_enabled
+ 1 + count
))) == 0))
939 mid_pcie_level_enabled
= (lowest_pcie_level_enabled
+ 1 + count
) <
940 hightest_pcie_level_enabled
?
941 (lowest_pcie_level_enabled
+ 1 + count
) :
942 hightest_pcie_level_enabled
;
944 /* set pcieDpmLevel to hightest_pcie_level_enabled */
945 for (i
= 2; i
< dpm_table
->sclk_table
.count
; i
++)
946 levels
[i
].pcieDpmLevel
= hightest_pcie_level_enabled
;
948 /* set pcieDpmLevel to lowest_pcie_level_enabled */
949 levels
[0].pcieDpmLevel
= lowest_pcie_level_enabled
;
951 /* set pcieDpmLevel to mid_pcie_level_enabled */
952 levels
[1].pcieDpmLevel
= mid_pcie_level_enabled
;
954 /* level count will send to smc once at init smc table and never change */
955 result
= smu7_copy_bytes_to_smc(hwmgr
, array
, (uint8_t *)levels
,
956 (uint32_t)array_size
, SMC_RAM_END
);
961 static int vegam_calculate_mclk_params(struct pp_hwmgr
*hwmgr
,
962 uint32_t clock
, struct SMU75_Discrete_MemoryLevel
*mem_level
)
964 struct pp_atomctrl_memory_clock_param_ai mpll_param
;
966 PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr
,
968 "Failed to retrieve memory pll parameter.",
971 mem_level
->MclkFrequency
= (uint32_t)mpll_param
.ulClock
;
972 mem_level
->Fcw_int
= (uint16_t)mpll_param
.ulMclk_fcw_int
;
973 mem_level
->Fcw_frac
= (uint16_t)mpll_param
.ulMclk_fcw_frac
;
974 mem_level
->Postdiv
= (uint8_t)mpll_param
.ulPostDiv
;
979 static int vegam_populate_single_memory_level(struct pp_hwmgr
*hwmgr
,
980 uint32_t clock
, struct SMU75_Discrete_MemoryLevel
*mem_level
)
982 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
983 struct phm_ppt_v1_information
*table_info
=
984 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
986 uint32_t mclk_stutter_mode_threshold
= 60000;
989 if (table_info
->vdd_dep_on_mclk
) {
990 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
991 table_info
->vdd_dep_on_mclk
, clock
,
992 &mem_level
->MinVoltage
, &mem_level
->MinMvdd
);
993 PP_ASSERT_WITH_CODE(!result
,
994 "can not find MinVddc voltage value from memory "
995 "VDDC voltage dependency table", return result
);
998 result
= vegam_calculate_mclk_params(hwmgr
, clock
, mem_level
);
999 PP_ASSERT_WITH_CODE(!result
,
1000 "Failed to calculate mclk params.",
1003 mem_level
->EnabledForThrottle
= 1;
1004 mem_level
->EnabledForActivity
= 0;
1005 mem_level
->VoltageDownHyst
= 0;
1006 mem_level
->ActivityLevel
= (uint16_t)
1007 (MemoryDPMTuning_VEGAM
>> DPMTuning_Activity_Shift
);
1008 mem_level
->StutterEnable
= false;
1009 mem_level
->DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
1011 data
->display_timing
.num_existing_displays
= hwmgr
->display_config
->num_display
;
1013 if (mclk_stutter_mode_threshold
&&
1014 (clock
<= mclk_stutter_mode_threshold
) &&
1015 (PHM_READ_FIELD(hwmgr
->device
, DPG_PIPE_STUTTER_CONTROL
,
1016 STUTTER_ENABLE
) & 0x1))
1017 mem_level
->StutterEnable
= true;
1020 CONVERT_FROM_HOST_TO_SMC_UL(mem_level
->MinMvdd
);
1021 CONVERT_FROM_HOST_TO_SMC_UL(mem_level
->MclkFrequency
);
1022 CONVERT_FROM_HOST_TO_SMC_US(mem_level
->Fcw_int
);
1023 CONVERT_FROM_HOST_TO_SMC_US(mem_level
->Fcw_frac
);
1024 CONVERT_FROM_HOST_TO_SMC_US(mem_level
->ActivityLevel
);
1025 CONVERT_FROM_HOST_TO_SMC_UL(mem_level
->MinVoltage
);
1031 static int vegam_populate_all_memory_levels(struct pp_hwmgr
*hwmgr
)
1033 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1034 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1035 struct smu7_dpm_table
*dpm_table
= &hw_data
->dpm_table
;
1037 /* populate MCLK dpm table to SMU7 */
1038 uint32_t array
= smu_data
->smu7_data
.dpm_table_start
+
1039 offsetof(SMU75_Discrete_DpmTable
, MemoryLevel
);
1040 uint32_t array_size
= sizeof(SMU75_Discrete_MemoryLevel
) *
1041 SMU75_MAX_LEVELS_MEMORY
;
1042 struct SMU75_Discrete_MemoryLevel
*levels
=
1043 smu_data
->smc_state_table
.MemoryLevel
;
1046 for (i
= 0; i
< dpm_table
->mclk_table
.count
; i
++) {
1047 PP_ASSERT_WITH_CODE((0 != dpm_table
->mclk_table
.dpm_levels
[i
].value
),
1048 "can not populate memory level as memory clock is zero",
1050 result
= vegam_populate_single_memory_level(hwmgr
,
1051 dpm_table
->mclk_table
.dpm_levels
[i
].value
,
1057 levels
[i
].UpHyst
= (uint8_t)
1058 (MemoryDPMTuning_VEGAM
>> DPMTuning_Uphyst_Shift
);
1059 levels
[i
].DownHyst
= (uint8_t)
1060 (MemoryDPMTuning_VEGAM
>> DPMTuning_Downhyst_Shift
);
1063 smu_data
->smc_state_table
.MemoryDpmLevelCount
=
1064 (uint8_t)dpm_table
->mclk_table
.count
;
1065 hw_data
->dpm_level_enable_mask
.mclk_dpm_enable_mask
=
1066 phm_get_dpm_level_enable_mask_value(&dpm_table
->mclk_table
);
1068 for (i
= 0; i
< dpm_table
->mclk_table
.count
; i
++)
1069 levels
[i
].EnabledForActivity
=
1070 (hw_data
->dpm_level_enable_mask
.mclk_dpm_enable_mask
>> i
) & 0x1;
1072 levels
[dpm_table
->mclk_table
.count
- 1].DisplayWatermark
=
1073 PPSMC_DISPLAY_WATERMARK_HIGH
;
1075 /* level count will send to smc once at init smc table and never change */
1076 result
= smu7_copy_bytes_to_smc(hwmgr
, array
, (uint8_t *)levels
,
1077 (uint32_t)array_size
, SMC_RAM_END
);
1082 static int vegam_populate_mvdd_value(struct pp_hwmgr
*hwmgr
,
1083 uint32_t mclk
, SMIO_Pattern
*smio_pat
)
1085 const struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1086 struct phm_ppt_v1_information
*table_info
=
1087 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1090 if (SMU7_VOLTAGE_CONTROL_NONE
!= data
->mvdd_control
) {
1091 /* find mvdd value which clock is more than request */
1092 for (i
= 0; i
< table_info
->vdd_dep_on_mclk
->count
; i
++) {
1093 if (mclk
<= table_info
->vdd_dep_on_mclk
->entries
[i
].clk
) {
1094 smio_pat
->Voltage
= data
->mvdd_voltage_table
.entries
[i
].value
;
1098 PP_ASSERT_WITH_CODE(i
< table_info
->vdd_dep_on_mclk
->count
,
1099 "MVDD Voltage is outside the supported range.",
1107 static int vegam_populate_smc_acpi_level(struct pp_hwmgr
*hwmgr
,
1108 SMU75_Discrete_DpmTable
*table
)
1111 uint32_t sclk_frequency
;
1112 const struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1113 struct phm_ppt_v1_information
*table_info
=
1114 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1115 SMIO_Pattern vol_level
;
1119 table
->ACPILevel
.Flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
1121 /* Get MinVoltage and Frequency from DPM0,
1122 * already converted to SMC_UL */
1123 sclk_frequency
= data
->vbios_boot_state
.sclk_bootup_value
;
1124 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
1125 table_info
->vdd_dep_on_sclk
,
1127 &table
->ACPILevel
.MinVoltage
, &mvdd
);
1128 PP_ASSERT_WITH_CODE(!result
,
1129 "Cannot find ACPI VDDC voltage value "
1130 "in Clock Dependency Table",
1133 result
= vegam_calculate_sclk_params(hwmgr
, sclk_frequency
,
1134 &(table
->ACPILevel
.SclkSetting
));
1135 PP_ASSERT_WITH_CODE(!result
,
1136 "Error retrieving Engine Clock dividers from VBIOS.",
1139 table
->ACPILevel
.DeepSleepDivId
= 0;
1140 table
->ACPILevel
.CcPwrDynRm
= 0;
1141 table
->ACPILevel
.CcPwrDynRm1
= 0;
1143 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.Flags
);
1144 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.MinVoltage
);
1145 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.CcPwrDynRm
);
1146 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.CcPwrDynRm1
);
1148 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.SclkSetting
.SclkFrequency
);
1149 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw_int
);
1150 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw_frac
);
1151 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Pcc_fcw_int
);
1152 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Sclk_slew_rate
);
1153 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Pcc_up_slew_rate
);
1154 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Pcc_down_slew_rate
);
1155 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw1_int
);
1156 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw1_frac
);
1157 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Sclk_ss_slew_rate
);
1160 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1161 table
->MemoryACPILevel
.MclkFrequency
= data
->vbios_boot_state
.mclk_bootup_value
;
1162 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
1163 table_info
->vdd_dep_on_mclk
,
1164 table
->MemoryACPILevel
.MclkFrequency
,
1165 &table
->MemoryACPILevel
.MinVoltage
, &mvdd
);
1166 PP_ASSERT_WITH_CODE((0 == result
),
1167 "Cannot find ACPI VDDCI voltage value "
1168 "in Clock Dependency Table",
1172 if ((SMU7_VOLTAGE_CONTROL_NONE
== data
->mvdd_control
) ||
1173 (data
->mclk_dpm_key_disabled
))
1174 us_mvdd
= data
->vbios_boot_state
.mvdd_bootup_value
;
1176 if (!vegam_populate_mvdd_value(hwmgr
,
1177 data
->dpm_table
.mclk_table
.dpm_levels
[0].value
,
1179 us_mvdd
= vol_level
.Voltage
;
1182 if (!vegam_populate_mvdd_value(hwmgr
, 0, &vol_level
))
1183 table
->MemoryACPILevel
.MinMvdd
= PP_HOST_TO_SMC_UL(vol_level
.Voltage
);
1185 table
->MemoryACPILevel
.MinMvdd
= 0;
1187 table
->MemoryACPILevel
.StutterEnable
= false;
1189 table
->MemoryACPILevel
.EnabledForThrottle
= 0;
1190 table
->MemoryACPILevel
.EnabledForActivity
= 0;
1191 table
->MemoryACPILevel
.UpHyst
= 0;
1192 table
->MemoryACPILevel
.DownHyst
= 100;
1193 table
->MemoryACPILevel
.VoltageDownHyst
= 0;
1194 table
->MemoryACPILevel
.ActivityLevel
=
1195 PP_HOST_TO_SMC_US(data
->current_profile_setting
.mclk_activity
);
1197 CONVERT_FROM_HOST_TO_SMC_UL(table
->MemoryACPILevel
.MclkFrequency
);
1198 CONVERT_FROM_HOST_TO_SMC_UL(table
->MemoryACPILevel
.MinVoltage
);
1203 static int vegam_populate_smc_vce_level(struct pp_hwmgr
*hwmgr
,
1204 SMU75_Discrete_DpmTable
*table
)
1206 int result
= -EINVAL
;
1208 struct pp_atomctrl_clock_dividers_vi dividers
;
1209 struct phm_ppt_v1_information
*table_info
=
1210 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1211 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*mm_table
=
1212 table_info
->mm_dep_table
;
1213 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1216 table
->VceLevelCount
= (uint8_t)(mm_table
->count
);
1217 table
->VceBootLevel
= 0;
1219 for (count
= 0; count
< table
->VceLevelCount
; count
++) {
1220 table
->VceLevel
[count
].Frequency
= mm_table
->entries
[count
].eclk
;
1221 table
->VceLevel
[count
].MinVoltage
= 0;
1222 table
->VceLevel
[count
].MinVoltage
|=
1223 (mm_table
->entries
[count
].vddc
* VOLTAGE_SCALE
) << VDDC_SHIFT
;
1225 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
)
1226 vddci
= (uint32_t)phm_find_closest_vddci(&(data
->vddci_voltage_table
),
1227 mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
);
1228 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->vddci_control
)
1229 vddci
= mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
;
1231 vddci
= (data
->vbios_boot_state
.vddci_bootup_value
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1234 table
->VceLevel
[count
].MinVoltage
|=
1235 (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1236 table
->VceLevel
[count
].MinVoltage
|= 1 << PHASES_SHIFT
;
1238 /*retrieve divider value for VBIOS */
1239 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
1240 table
->VceLevel
[count
].Frequency
, ÷rs
);
1241 PP_ASSERT_WITH_CODE((0 == result
),
1242 "can not find divide id for VCE engine clock",
1245 table
->VceLevel
[count
].Divider
= (uint8_t)dividers
.pll_post_divider
;
1247 CONVERT_FROM_HOST_TO_SMC_UL(table
->VceLevel
[count
].Frequency
);
1248 CONVERT_FROM_HOST_TO_SMC_UL(table
->VceLevel
[count
].MinVoltage
);
1253 static int vegam_populate_memory_timing_parameters(struct pp_hwmgr
*hwmgr
,
1254 int32_t eng_clock
, int32_t mem_clock
,
1255 SMU75_Discrete_MCArbDramTimingTableEntry
*arb_regs
)
1257 uint32_t dram_timing
;
1258 uint32_t dram_timing2
;
1259 uint32_t burst_time
;
1265 result
= atomctrl_set_engine_dram_timings_rv770(hwmgr
,
1266 eng_clock
, mem_clock
);
1267 PP_ASSERT_WITH_CODE(result
== 0,
1268 "Error calling VBIOS to set DRAM_TIMING.",
1271 dram_timing
= cgs_read_register(hwmgr
->device
, mmMC_ARB_DRAM_TIMING
);
1272 dram_timing2
= cgs_read_register(hwmgr
->device
, mmMC_ARB_DRAM_TIMING2
);
1273 burst_time
= cgs_read_register(hwmgr
->device
, mmMC_ARB_BURST_TIME
);
1274 rfsh_rate
= cgs_read_register(hwmgr
->device
, mmMC_ARB_RFSH_RATE
);
1275 misc3
= cgs_read_register(hwmgr
->device
, mmMC_ARB_MISC3
);
1277 arb_regs
->McArbDramTiming
= PP_HOST_TO_SMC_UL(dram_timing
);
1278 arb_regs
->McArbDramTiming2
= PP_HOST_TO_SMC_UL(dram_timing2
);
1279 arb_regs
->McArbBurstTime
= PP_HOST_TO_SMC_UL(burst_time
);
1280 arb_regs
->McArbRfshRate
= PP_HOST_TO_SMC_UL(rfsh_rate
);
1281 arb_regs
->McArbMisc3
= PP_HOST_TO_SMC_UL(misc3
);
1286 static int vegam_program_memory_timing_parameters(struct pp_hwmgr
*hwmgr
)
1288 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1289 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1290 struct SMU75_Discrete_MCArbDramTimingTable arb_regs
;
1294 memset(&arb_regs
, 0, sizeof(SMU75_Discrete_MCArbDramTimingTable
));
1296 for (i
= 0; i
< hw_data
->dpm_table
.sclk_table
.count
; i
++) {
1297 for (j
= 0; j
< hw_data
->dpm_table
.mclk_table
.count
; j
++) {
1298 result
= vegam_populate_memory_timing_parameters(hwmgr
,
1299 hw_data
->dpm_table
.sclk_table
.dpm_levels
[i
].value
,
1300 hw_data
->dpm_table
.mclk_table
.dpm_levels
[j
].value
,
1301 &arb_regs
.entries
[i
][j
]);
1307 result
= smu7_copy_bytes_to_smc(
1309 smu_data
->smu7_data
.arb_table_start
,
1310 (uint8_t *)&arb_regs
,
1311 sizeof(SMU75_Discrete_MCArbDramTimingTable
),
1316 static int vegam_populate_smc_uvd_level(struct pp_hwmgr
*hwmgr
,
1317 struct SMU75_Discrete_DpmTable
*table
)
1319 int result
= -EINVAL
;
1321 struct pp_atomctrl_clock_dividers_vi dividers
;
1322 struct phm_ppt_v1_information
*table_info
=
1323 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1324 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*mm_table
=
1325 table_info
->mm_dep_table
;
1326 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1329 table
->UvdLevelCount
= (uint8_t)(mm_table
->count
);
1330 table
->UvdBootLevel
= 0;
1332 for (count
= 0; count
< table
->UvdLevelCount
; count
++) {
1333 table
->UvdLevel
[count
].MinVoltage
= 0;
1334 table
->UvdLevel
[count
].VclkFrequency
= mm_table
->entries
[count
].vclk
;
1335 table
->UvdLevel
[count
].DclkFrequency
= mm_table
->entries
[count
].dclk
;
1336 table
->UvdLevel
[count
].MinVoltage
|=
1337 (mm_table
->entries
[count
].vddc
* VOLTAGE_SCALE
) << VDDC_SHIFT
;
1339 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
)
1340 vddci
= (uint32_t)phm_find_closest_vddci(&(data
->vddci_voltage_table
),
1341 mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
);
1342 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->vddci_control
)
1343 vddci
= mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
;
1345 vddci
= (data
->vbios_boot_state
.vddci_bootup_value
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1347 table
->UvdLevel
[count
].MinVoltage
|= (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1348 table
->UvdLevel
[count
].MinVoltage
|= 1 << PHASES_SHIFT
;
1350 /* retrieve divider value for VBIOS */
1351 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
1352 table
->UvdLevel
[count
].VclkFrequency
, ÷rs
);
1353 PP_ASSERT_WITH_CODE((0 == result
),
1354 "can not find divide id for Vclk clock", return result
);
1356 table
->UvdLevel
[count
].VclkDivider
= (uint8_t)dividers
.pll_post_divider
;
1358 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
1359 table
->UvdLevel
[count
].DclkFrequency
, ÷rs
);
1360 PP_ASSERT_WITH_CODE((0 == result
),
1361 "can not find divide id for Dclk clock", return result
);
1363 table
->UvdLevel
[count
].DclkDivider
= (uint8_t)dividers
.pll_post_divider
;
1365 CONVERT_FROM_HOST_TO_SMC_UL(table
->UvdLevel
[count
].VclkFrequency
);
1366 CONVERT_FROM_HOST_TO_SMC_UL(table
->UvdLevel
[count
].DclkFrequency
);
1367 CONVERT_FROM_HOST_TO_SMC_UL(table
->UvdLevel
[count
].MinVoltage
);
1373 static int vegam_populate_smc_boot_level(struct pp_hwmgr
*hwmgr
,
1374 struct SMU75_Discrete_DpmTable
*table
)
1377 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1379 table
->GraphicsBootLevel
= 0;
1380 table
->MemoryBootLevel
= 0;
1382 /* find boot level from dpm table */
1383 result
= phm_find_boot_level(&(data
->dpm_table
.sclk_table
),
1384 data
->vbios_boot_state
.sclk_bootup_value
,
1385 (uint32_t *)&(table
->GraphicsBootLevel
));
1387 result
= phm_find_boot_level(&(data
->dpm_table
.mclk_table
),
1388 data
->vbios_boot_state
.mclk_bootup_value
,
1389 (uint32_t *)&(table
->MemoryBootLevel
));
1391 table
->BootVddc
= data
->vbios_boot_state
.vddc_bootup_value
*
1393 table
->BootVddci
= data
->vbios_boot_state
.vddci_bootup_value
*
1395 table
->BootMVdd
= data
->vbios_boot_state
.mvdd_bootup_value
*
1398 CONVERT_FROM_HOST_TO_SMC_US(table
->BootVddc
);
1399 CONVERT_FROM_HOST_TO_SMC_US(table
->BootVddci
);
1400 CONVERT_FROM_HOST_TO_SMC_US(table
->BootMVdd
);
1405 static int vegam_populate_smc_initial_state(struct pp_hwmgr
*hwmgr
)
1407 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1408 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1409 struct phm_ppt_v1_information
*table_info
=
1410 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1411 uint8_t count
, level
;
1413 count
= (uint8_t)(table_info
->vdd_dep_on_sclk
->count
);
1415 for (level
= 0; level
< count
; level
++) {
1416 if (table_info
->vdd_dep_on_sclk
->entries
[level
].clk
>=
1417 hw_data
->vbios_boot_state
.sclk_bootup_value
) {
1418 smu_data
->smc_state_table
.GraphicsBootLevel
= level
;
1423 count
= (uint8_t)(table_info
->vdd_dep_on_mclk
->count
);
1424 for (level
= 0; level
< count
; level
++) {
1425 if (table_info
->vdd_dep_on_mclk
->entries
[level
].clk
>=
1426 hw_data
->vbios_boot_state
.mclk_bootup_value
) {
1427 smu_data
->smc_state_table
.MemoryBootLevel
= level
;
1435 static uint16_t scale_fan_gain_settings(uint16_t raw_setting
)
1438 tmp
= raw_setting
* 4096 / 100;
1439 return (uint16_t)tmp
;
1442 static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr
*hwmgr
)
1444 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1446 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1447 SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
1448 struct phm_ppt_v1_information
*table_info
=
1449 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1450 struct phm_cac_tdp_table
*cac_dtp_table
= table_info
->cac_dtp_table
;
1451 struct pp_advance_fan_control_parameters
*fan_table
=
1452 &hwmgr
->thermal_controller
.advanceFanControlParameters
;
1454 const uint16_t *pdef1
;
1455 const uint16_t *pdef2
;
1457 table
->DefaultTdp
= PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table
->usTDP
* 128));
1458 table
->TargetTdp
= PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table
->usTDP
* 128));
1460 PP_ASSERT_WITH_CODE(cac_dtp_table
->usTargetOperatingTemp
<= 255,
1461 "Target Operating Temp is out of Range!",
1464 table
->TemperatureLimitEdge
= PP_HOST_TO_SMC_US(
1465 cac_dtp_table
->usTargetOperatingTemp
* 256);
1466 table
->TemperatureLimitHotspot
= PP_HOST_TO_SMC_US(
1467 cac_dtp_table
->usTemperatureLimitHotspot
* 256);
1468 table
->FanGainEdge
= PP_HOST_TO_SMC_US(
1469 scale_fan_gain_settings(fan_table
->usFanGainEdge
));
1470 table
->FanGainHotspot
= PP_HOST_TO_SMC_US(
1471 scale_fan_gain_settings(fan_table
->usFanGainHotspot
));
1473 pdef1
= defaults
->BAPMTI_R
;
1474 pdef2
= defaults
->BAPMTI_RC
;
1476 for (i
= 0; i
< SMU75_DTE_ITERATIONS
; i
++) {
1477 for (j
= 0; j
< SMU75_DTE_SOURCES
; j
++) {
1478 for (k
= 0; k
< SMU75_DTE_SINKS
; k
++) {
1479 table
->BAPMTI_R
[i
][j
][k
] = PP_HOST_TO_SMC_US(*pdef1
);
1480 table
->BAPMTI_RC
[i
][j
][k
] = PP_HOST_TO_SMC_US(*pdef2
);
1490 static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr
*hwmgr
)
1492 uint32_t ro
, efuse
, volt_without_cks
, volt_with_cks
, value
, max
, min
;
1493 struct vegam_smumgr
*smu_data
=
1494 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1496 uint8_t i
, stretch_amount
, stretch_amount2
, volt_offset
= 0;
1497 struct phm_ppt_v1_information
*table_info
=
1498 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1499 struct phm_ppt_v1_clock_voltage_dependency_table
*sclk_table
=
1500 table_info
->vdd_dep_on_sclk
;
1501 uint32_t mask
= (1 << ((STRAP_ASIC_RO_MSB
- STRAP_ASIC_RO_LSB
) + 1)) - 1;
1503 stretch_amount
= (uint8_t)table_info
->cac_dtp_table
->usClockStretchAmount
;
1505 atomctrl_read_efuse(hwmgr
, STRAP_ASIC_RO_LSB
, STRAP_ASIC_RO_MSB
,
1511 ro
= efuse
* (max
- min
) / 255 + min
;
1513 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1514 for (i
= 0; i
< sclk_table
->count
; i
++) {
1515 smu_data
->smc_state_table
.Sclk_CKS_masterEn0_7
|=
1516 sclk_table
->entries
[i
].cks_enable
<< i
;
1517 volt_without_cks
= (uint32_t)((2753594000U + (sclk_table
->entries
[i
].clk
/100) *
1518 136418 - (ro
- 70) * 1000000) /
1519 (2424180 - (sclk_table
->entries
[i
].clk
/100) * 1132925/1000));
1520 volt_with_cks
= (uint32_t)((2797202000U + sclk_table
->entries
[i
].clk
/100 *
1521 3232 - (ro
- 65) * 1000000) /
1522 (2522480 - sclk_table
->entries
[i
].clk
/100 * 115764/100));
1524 if (volt_without_cks
>= volt_with_cks
)
1525 volt_offset
= (uint8_t)(((volt_without_cks
- volt_with_cks
+
1526 sclk_table
->entries
[i
].cks_voffset
) * 100 + 624) / 625);
1528 smu_data
->smc_state_table
.Sclk_voltageOffset
[i
] = volt_offset
;
1531 smu_data
->smc_state_table
.LdoRefSel
=
1532 (table_info
->cac_dtp_table
->ucCKS_LDO_REFSEL
!= 0) ?
1533 table_info
->cac_dtp_table
->ucCKS_LDO_REFSEL
: 5;
1534 /* Populate CKS Lookup Table */
1535 if (stretch_amount
== 1 || stretch_amount
== 2 || stretch_amount
== 5)
1536 stretch_amount2
= 0;
1537 else if (stretch_amount
== 3 || stretch_amount
== 4)
1538 stretch_amount2
= 1;
1540 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
1541 PHM_PlatformCaps_ClockStretcher
);
1542 PP_ASSERT_WITH_CODE(false,
1543 "Stretch Amount in PPTable not supported\n",
1547 value
= cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixPWR_CKS_CNTL
);
1548 value
&= 0xFFFFFFFE;
1549 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixPWR_CKS_CNTL
, value
);
1554 static bool vegam_is_hw_avfs_present(struct pp_hwmgr
*hwmgr
)
1558 efuse
= cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
1559 ixSMU_EFUSE_0
+ (49 * 4));
1560 efuse
&= 0x00000001;
1568 static int vegam_populate_avfs_parameters(struct pp_hwmgr
*hwmgr
)
1570 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1571 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1573 SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
1575 struct pp_atom_ctrl__avfs_parameters avfs_params
= {0};
1576 AVFS_meanNsigma_t AVFS_meanNsigma
= { {0} };
1577 AVFS_Sclk_Offset_t AVFS_SclkOffset
= { {0} };
1580 struct phm_ppt_v1_information
*table_info
=
1581 (struct phm_ppt_v1_information
*)hwmgr
->pptable
;
1582 struct phm_ppt_v1_clock_voltage_dependency_table
*sclk_table
=
1583 table_info
->vdd_dep_on_sclk
;
1585 if (!hwmgr
->avfs_supported
)
1588 result
= atomctrl_get_avfs_information(hwmgr
, &avfs_params
);
1591 table
->BTCGB_VDROOP_TABLE
[0].a0
=
1592 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSON_a0
);
1593 table
->BTCGB_VDROOP_TABLE
[0].a1
=
1594 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSON_a1
);
1595 table
->BTCGB_VDROOP_TABLE
[0].a2
=
1596 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSON_a2
);
1597 table
->BTCGB_VDROOP_TABLE
[1].a0
=
1598 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSOFF_a0
);
1599 table
->BTCGB_VDROOP_TABLE
[1].a1
=
1600 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSOFF_a1
);
1601 table
->BTCGB_VDROOP_TABLE
[1].a2
=
1602 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSOFF_a2
);
1603 table
->AVFSGB_FUSE_TABLE
[0].m1
=
1604 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSON_m1
);
1605 table
->AVFSGB_FUSE_TABLE
[0].m2
=
1606 PP_HOST_TO_SMC_US(avfs_params
.usAVFSGB_FUSE_TABLE_CKSON_m2
);
1607 table
->AVFSGB_FUSE_TABLE
[0].b
=
1608 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSON_b
);
1609 table
->AVFSGB_FUSE_TABLE
[0].m1_shift
= 24;
1610 table
->AVFSGB_FUSE_TABLE
[0].m2_shift
= 12;
1611 table
->AVFSGB_FUSE_TABLE
[1].m1
=
1612 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSOFF_m1
);
1613 table
->AVFSGB_FUSE_TABLE
[1].m2
=
1614 PP_HOST_TO_SMC_US(avfs_params
.usAVFSGB_FUSE_TABLE_CKSOFF_m2
);
1615 table
->AVFSGB_FUSE_TABLE
[1].b
=
1616 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSOFF_b
);
1617 table
->AVFSGB_FUSE_TABLE
[1].m1_shift
= 24;
1618 table
->AVFSGB_FUSE_TABLE
[1].m2_shift
= 12;
1619 table
->MaxVoltage
= PP_HOST_TO_SMC_US(avfs_params
.usMaxVoltage_0_25mv
);
1620 AVFS_meanNsigma
.Aconstant
[0] =
1621 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFS_meanNsigma_Acontant0
);
1622 AVFS_meanNsigma
.Aconstant
[1] =
1623 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFS_meanNsigma_Acontant1
);
1624 AVFS_meanNsigma
.Aconstant
[2] =
1625 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFS_meanNsigma_Acontant2
);
1626 AVFS_meanNsigma
.DC_tol_sigma
=
1627 PP_HOST_TO_SMC_US(avfs_params
.usAVFS_meanNsigma_DC_tol_sigma
);
1628 AVFS_meanNsigma
.Platform_mean
=
1629 PP_HOST_TO_SMC_US(avfs_params
.usAVFS_meanNsigma_Platform_mean
);
1630 AVFS_meanNsigma
.PSM_Age_CompFactor
=
1631 PP_HOST_TO_SMC_US(avfs_params
.usPSM_Age_ComFactor
);
1632 AVFS_meanNsigma
.Platform_sigma
=
1633 PP_HOST_TO_SMC_US(avfs_params
.usAVFS_meanNsigma_Platform_sigma
);
1635 for (i
= 0; i
< sclk_table
->count
; i
++) {
1636 AVFS_meanNsigma
.Static_Voltage_Offset
[i
] =
1637 (uint8_t)(sclk_table
->entries
[i
].cks_voffset
* 100 / 625);
1638 AVFS_SclkOffset
.Sclk_Offset
[i
] =
1639 PP_HOST_TO_SMC_US((uint16_t)
1640 (sclk_table
->entries
[i
].sclk_offset
) / 100);
1643 result
= smu7_read_smc_sram_dword(hwmgr
,
1644 SMU7_FIRMWARE_HEADER_LOCATION
+
1645 offsetof(SMU75_Firmware_Header
, AvfsMeanNSigma
),
1647 smu7_copy_bytes_to_smc(hwmgr
,
1649 (uint8_t *)&AVFS_meanNsigma
,
1650 sizeof(AVFS_meanNsigma_t
),
1653 result
= smu7_read_smc_sram_dword(hwmgr
,
1654 SMU7_FIRMWARE_HEADER_LOCATION
+
1655 offsetof(SMU75_Firmware_Header
, AvfsSclkOffsetTable
),
1657 smu7_copy_bytes_to_smc(hwmgr
,
1659 (uint8_t *)&AVFS_SclkOffset
,
1660 sizeof(AVFS_Sclk_Offset_t
),
1663 data
->avfs_vdroop_override_setting
=
1664 (avfs_params
.ucEnableGB_VDROOP_TABLE_CKSON
<< BTCGB0_Vdroop_Enable_SHIFT
) |
1665 (avfs_params
.ucEnableGB_VDROOP_TABLE_CKSOFF
<< BTCGB1_Vdroop_Enable_SHIFT
) |
1666 (avfs_params
.ucEnableGB_FUSE_TABLE_CKSON
<< AVFSGB0_Vdroop_Enable_SHIFT
) |
1667 (avfs_params
.ucEnableGB_FUSE_TABLE_CKSOFF
<< AVFSGB1_Vdroop_Enable_SHIFT
);
1668 data
->apply_avfs_cks_off_voltage
=
1669 (avfs_params
.ucEnableApplyAVFS_CKS_OFF_Voltage
== 1) ? true : false;
1674 static int vegam_populate_vr_config(struct pp_hwmgr
*hwmgr
,
1675 struct SMU75_Discrete_DpmTable
*table
)
1677 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1678 struct vegam_smumgr
*smu_data
=
1679 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1682 config
= VR_MERGED_WITH_VDDC
;
1683 table
->VRConfig
|= (config
<< VRCONF_VDDGFX_SHIFT
);
1685 /* Set Vddc Voltage Controller */
1686 if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->voltage_control
) {
1687 config
= VR_SVI2_PLANE_1
;
1688 table
->VRConfig
|= config
;
1690 PP_ASSERT_WITH_CODE(false,
1691 "VDDC should be on SVI2 control in merged mode!",
1694 /* Set Vddci Voltage Controller */
1695 if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->vddci_control
) {
1696 config
= VR_SVI2_PLANE_2
; /* only in merged mode */
1697 table
->VRConfig
|= (config
<< VRCONF_VDDCI_SHIFT
);
1698 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
) {
1699 config
= VR_SMIO_PATTERN_1
;
1700 table
->VRConfig
|= (config
<< VRCONF_VDDCI_SHIFT
);
1702 config
= VR_STATIC_VOLTAGE
;
1703 table
->VRConfig
|= (config
<< VRCONF_VDDCI_SHIFT
);
1705 /* Set Mvdd Voltage Controller */
1706 if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->mvdd_control
) {
1707 if (config
!= VR_SVI2_PLANE_2
) {
1708 config
= VR_SVI2_PLANE_2
;
1709 table
->VRConfig
|= (config
<< VRCONF_MVDD_SHIFT
);
1710 cgs_write_ind_register(hwmgr
->device
,
1712 smu_data
->smu7_data
.soft_regs_start
+
1713 offsetof(SMU75_SoftRegisters
, AllowMvddSwitch
),
1716 PP_ASSERT_WITH_CODE(false,
1717 "SVI2 Plane 2 is already taken, set MVDD as Static",);
1718 config
= VR_STATIC_VOLTAGE
;
1719 table
->VRConfig
= (config
<< VRCONF_MVDD_SHIFT
);
1721 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->mvdd_control
) {
1722 config
= VR_SMIO_PATTERN_2
;
1723 table
->VRConfig
= (config
<< VRCONF_MVDD_SHIFT
);
1724 cgs_write_ind_register(hwmgr
->device
,
1726 smu_data
->smu7_data
.soft_regs_start
+
1727 offsetof(SMU75_SoftRegisters
, AllowMvddSwitch
),
1730 config
= VR_STATIC_VOLTAGE
;
1731 table
->VRConfig
|= (config
<< VRCONF_MVDD_SHIFT
);
1737 static int vegam_populate_svi_load_line(struct pp_hwmgr
*hwmgr
)
1739 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1740 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1742 smu_data
->power_tune_table
.SviLoadLineEn
= defaults
->SviLoadLineEn
;
1743 smu_data
->power_tune_table
.SviLoadLineVddC
= defaults
->SviLoadLineVddC
;
1744 smu_data
->power_tune_table
.SviLoadLineTrimVddC
= 3;
1745 smu_data
->power_tune_table
.SviLoadLineOffsetVddC
= 0;
1750 static int vegam_populate_tdc_limit(struct pp_hwmgr
*hwmgr
)
1753 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1754 struct phm_ppt_v1_information
*table_info
=
1755 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1756 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1758 tdc_limit
= (uint16_t)(table_info
->cac_dtp_table
->usTDC
* 128);
1759 smu_data
->power_tune_table
.TDC_VDDC_PkgLimit
=
1760 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit
);
1761 smu_data
->power_tune_table
.TDC_VDDC_ThrottleReleaseLimitPerc
=
1762 defaults
->TDC_VDDC_ThrottleReleaseLimitPerc
;
1763 smu_data
->power_tune_table
.TDC_MAWt
= defaults
->TDC_MAWt
;
1768 static int vegam_populate_dw8(struct pp_hwmgr
*hwmgr
, uint32_t fuse_table_offset
)
1770 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1771 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1774 if (smu7_read_smc_sram_dword(hwmgr
,
1776 offsetof(SMU75_Discrete_PmFuses
, TdcWaterfallCtl
),
1777 (uint32_t *)&temp
, SMC_RAM_END
))
1778 PP_ASSERT_WITH_CODE(false,
1779 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
1782 smu_data
->power_tune_table
.TdcWaterfallCtl
= defaults
->TdcWaterfallCtl
;
1783 smu_data
->power_tune_table
.LPMLTemperatureMin
=
1784 (uint8_t)((temp
>> 16) & 0xff);
1785 smu_data
->power_tune_table
.LPMLTemperatureMax
=
1786 (uint8_t)((temp
>> 8) & 0xff);
1787 smu_data
->power_tune_table
.Reserved
= (uint8_t)(temp
& 0xff);
1792 static int vegam_populate_temperature_scaler(struct pp_hwmgr
*hwmgr
)
1795 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1797 /* Currently not used. Set all to zero. */
1798 for (i
= 0; i
< 16; i
++)
1799 smu_data
->power_tune_table
.LPMLTemperatureScaler
[i
] = 0;
1804 static int vegam_populate_fuzzy_fan(struct pp_hwmgr
*hwmgr
)
1806 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1808 /* TO DO move to hwmgr */
1809 if ((hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
& (1 << 15))
1810 || 0 == hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
)
1811 hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
=
1812 hwmgr
->thermal_controller
.advanceFanControlParameters
.usDefaultFanOutputSensitivity
;
1814 smu_data
->power_tune_table
.FuzzyFan_PwmSetDelta
= PP_HOST_TO_SMC_US(
1815 hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
);
1819 static int vegam_populate_gnb_lpml(struct pp_hwmgr
*hwmgr
)
1822 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1824 /* Currently not used. Set all to zero. */
1825 for (i
= 0; i
< 16; i
++)
1826 smu_data
->power_tune_table
.GnbLPML
[i
] = 0;
1831 static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr
*hwmgr
)
1833 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1834 struct phm_ppt_v1_information
*table_info
=
1835 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1836 uint16_t hi_sidd
= smu_data
->power_tune_table
.BapmVddCBaseLeakageHiSidd
;
1837 uint16_t lo_sidd
= smu_data
->power_tune_table
.BapmVddCBaseLeakageLoSidd
;
1838 struct phm_cac_tdp_table
*cac_table
= table_info
->cac_dtp_table
;
1840 hi_sidd
= (uint16_t)(cac_table
->usHighCACLeakage
/ 100 * 256);
1841 lo_sidd
= (uint16_t)(cac_table
->usLowCACLeakage
/ 100 * 256);
1843 smu_data
->power_tune_table
.BapmVddCBaseLeakageHiSidd
=
1844 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd
);
1845 smu_data
->power_tune_table
.BapmVddCBaseLeakageLoSidd
=
1846 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd
);
1851 static int vegam_populate_pm_fuses(struct pp_hwmgr
*hwmgr
)
1853 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1854 uint32_t pm_fuse_table_offset
;
1856 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1857 PHM_PlatformCaps_PowerContainment
)) {
1858 if (smu7_read_smc_sram_dword(hwmgr
,
1859 SMU7_FIRMWARE_HEADER_LOCATION
+
1860 offsetof(SMU75_Firmware_Header
, PmFuseTable
),
1861 &pm_fuse_table_offset
, SMC_RAM_END
))
1862 PP_ASSERT_WITH_CODE(false,
1863 "Attempt to get pm_fuse_table_offset Failed!",
1866 if (vegam_populate_svi_load_line(hwmgr
))
1867 PP_ASSERT_WITH_CODE(false,
1868 "Attempt to populate SviLoadLine Failed!",
1871 if (vegam_populate_tdc_limit(hwmgr
))
1872 PP_ASSERT_WITH_CODE(false,
1873 "Attempt to populate TDCLimit Failed!", return -EINVAL
);
1875 if (vegam_populate_dw8(hwmgr
, pm_fuse_table_offset
))
1876 PP_ASSERT_WITH_CODE(false,
1877 "Attempt to populate TdcWaterfallCtl, "
1878 "LPMLTemperature Min and Max Failed!",
1881 if (0 != vegam_populate_temperature_scaler(hwmgr
))
1882 PP_ASSERT_WITH_CODE(false,
1883 "Attempt to populate LPMLTemperatureScaler Failed!",
1886 if (vegam_populate_fuzzy_fan(hwmgr
))
1887 PP_ASSERT_WITH_CODE(false,
1888 "Attempt to populate Fuzzy Fan Control parameters Failed!",
1891 if (vegam_populate_gnb_lpml(hwmgr
))
1892 PP_ASSERT_WITH_CODE(false,
1893 "Attempt to populate GnbLPML Failed!",
1896 if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr
))
1897 PP_ASSERT_WITH_CODE(false,
1898 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
1899 "Sidd Failed!", return -EINVAL
);
1901 if (smu7_copy_bytes_to_smc(hwmgr
, pm_fuse_table_offset
,
1902 (uint8_t *)&smu_data
->power_tune_table
,
1903 (sizeof(struct SMU75_Discrete_PmFuses
) - PMFUSES_AVFSSIZE
),
1905 PP_ASSERT_WITH_CODE(false,
1906 "Attempt to download PmFuseTable Failed!",
1912 static int vegam_enable_reconfig_cus(struct pp_hwmgr
*hwmgr
)
1914 struct amdgpu_device
*adev
= hwmgr
->adev
;
1916 smum_send_msg_to_smc_with_parameter(hwmgr
,
1917 PPSMC_MSG_EnableModeSwitchRLCNotification
,
1918 adev
->gfx
.cu_info
.number
);
1923 static int vegam_init_smc_table(struct pp_hwmgr
*hwmgr
)
1926 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1927 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1929 struct phm_ppt_v1_information
*table_info
=
1930 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1931 struct SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
1933 struct pp_atomctrl_gpio_pin_assignment gpio_pin
;
1934 struct phm_ppt_v1_gpio_table
*gpio_table
=
1935 (struct phm_ppt_v1_gpio_table
*)table_info
->gpio_table
;
1936 pp_atomctrl_clock_dividers_vi dividers
;
1938 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
1939 PHM_PlatformCaps_AutomaticDCTransition
);
1941 vegam_initialize_power_tune_defaults(hwmgr
);
1943 if (SMU7_VOLTAGE_CONTROL_NONE
!= hw_data
->voltage_control
)
1944 vegam_populate_smc_voltage_tables(hwmgr
, table
);
1946 table
->SystemFlags
= 0;
1947 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1948 PHM_PlatformCaps_AutomaticDCTransition
))
1949 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
1951 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1952 PHM_PlatformCaps_StepVddc
))
1953 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
1955 if (hw_data
->is_memory_gddr5
)
1956 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
1958 if (hw_data
->ulv_supported
&& table_info
->us_ulv_voltage_offset
) {
1959 result
= vegam_populate_ulv_state(hwmgr
, table
);
1960 PP_ASSERT_WITH_CODE(!result
,
1961 "Failed to initialize ULV state!", return result
);
1962 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
1963 ixCG_ULV_PARAMETER
, SMU7_CGULVPARAMETER_DFLT
);
1966 result
= vegam_populate_smc_link_level(hwmgr
, table
);
1967 PP_ASSERT_WITH_CODE(!result
,
1968 "Failed to initialize Link Level!", return result
);
1970 result
= vegam_populate_all_graphic_levels(hwmgr
);
1971 PP_ASSERT_WITH_CODE(!result
,
1972 "Failed to initialize Graphics Level!", return result
);
1974 result
= vegam_populate_all_memory_levels(hwmgr
);
1975 PP_ASSERT_WITH_CODE(!result
,
1976 "Failed to initialize Memory Level!", return result
);
1978 result
= vegam_populate_smc_acpi_level(hwmgr
, table
);
1979 PP_ASSERT_WITH_CODE(!result
,
1980 "Failed to initialize ACPI Level!", return result
);
1982 result
= vegam_populate_smc_vce_level(hwmgr
, table
);
1983 PP_ASSERT_WITH_CODE(!result
,
1984 "Failed to initialize VCE Level!", return result
);
1986 /* Since only the initial state is completely set up at this point
1987 * (the other states are just copies of the boot state) we only
1988 * need to populate the ARB settings for the initial state.
1990 result
= vegam_program_memory_timing_parameters(hwmgr
);
1991 PP_ASSERT_WITH_CODE(!result
,
1992 "Failed to Write ARB settings for the initial state.", return result
);
1994 result
= vegam_populate_smc_uvd_level(hwmgr
, table
);
1995 PP_ASSERT_WITH_CODE(!result
,
1996 "Failed to initialize UVD Level!", return result
);
1998 result
= vegam_populate_smc_boot_level(hwmgr
, table
);
1999 PP_ASSERT_WITH_CODE(!result
,
2000 "Failed to initialize Boot Level!", return result
);
2002 result
= vegam_populate_smc_initial_state(hwmgr
);
2003 PP_ASSERT_WITH_CODE(!result
,
2004 "Failed to initialize Boot State!", return result
);
2006 result
= vegam_populate_bapm_parameters_in_dpm_table(hwmgr
);
2007 PP_ASSERT_WITH_CODE(!result
,
2008 "Failed to populate BAPM Parameters!", return result
);
2010 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2011 PHM_PlatformCaps_ClockStretcher
)) {
2012 result
= vegam_populate_clock_stretcher_data_table(hwmgr
);
2013 PP_ASSERT_WITH_CODE(!result
,
2014 "Failed to populate Clock Stretcher Data Table!",
2018 result
= vegam_populate_avfs_parameters(hwmgr
);
2019 PP_ASSERT_WITH_CODE(!result
,
2020 "Failed to populate AVFS Parameters!", return result
;);
2022 table
->CurrSclkPllRange
= 0xff;
2023 table
->GraphicsVoltageChangeEnable
= 1;
2024 table
->GraphicsThermThrottleEnable
= 1;
2025 table
->GraphicsInterval
= 1;
2026 table
->VoltageInterval
= 1;
2027 table
->ThermalInterval
= 1;
2028 table
->TemperatureLimitHigh
=
2029 table_info
->cac_dtp_table
->usTargetOperatingTemp
*
2030 SMU7_Q88_FORMAT_CONVERSION_UNIT
;
2031 table
->TemperatureLimitLow
=
2032 (table_info
->cac_dtp_table
->usTargetOperatingTemp
- 1) *
2033 SMU7_Q88_FORMAT_CONVERSION_UNIT
;
2034 table
->MemoryVoltageChangeEnable
= 1;
2035 table
->MemoryInterval
= 1;
2036 table
->VoltageResponseTime
= 0;
2037 table
->PhaseResponseTime
= 0;
2038 table
->MemoryThermThrottleEnable
= 1;
2040 PP_ASSERT_WITH_CODE(hw_data
->dpm_table
.pcie_speed_table
.count
>= 1,
2041 "There must be 1 or more PCIE levels defined in PPTable.",
2043 table
->PCIeBootLinkLevel
=
2044 hw_data
->dpm_table
.pcie_speed_table
.count
;
2045 table
->PCIeGenInterval
= 1;
2046 table
->VRConfig
= 0;
2048 result
= vegam_populate_vr_config(hwmgr
, table
);
2049 PP_ASSERT_WITH_CODE(!result
,
2050 "Failed to populate VRConfig setting!", return result
);
2052 table
->ThermGpio
= 17;
2053 table
->SclkStepSize
= 0x4000;
2055 if (atomctrl_get_pp_assign_pin(hwmgr
,
2056 VDDC_VRHOT_GPIO_PINID
, &gpio_pin
)) {
2057 table
->VRHotGpio
= gpio_pin
.uc_gpio_pin_bit_shift
;
2060 table_info
->gpio_table
->vrhot_triggered_sclk_dpm_index
;
2062 table
->VRHotGpio
= SMU7_UNUSED_GPIO_PIN
;
2063 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
2064 PHM_PlatformCaps_RegulatorHot
);
2067 if (atomctrl_get_pp_assign_pin(hwmgr
,
2068 PP_AC_DC_SWITCH_GPIO_PINID
, &gpio_pin
)) {
2069 table
->AcDcGpio
= gpio_pin
.uc_gpio_pin_bit_shift
;
2070 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2071 PHM_PlatformCaps_AutomaticDCTransition
) &&
2072 !smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_UseNewGPIOScheme
))
2073 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
2074 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
2076 table
->AcDcGpio
= SMU7_UNUSED_GPIO_PIN
;
2077 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
2078 PHM_PlatformCaps_AutomaticDCTransition
);
2081 /* Thermal Output GPIO */
2082 if (atomctrl_get_pp_assign_pin(hwmgr
,
2083 THERMAL_INT_OUTPUT_GPIO_PINID
, &gpio_pin
)) {
2084 table
->ThermOutGpio
= gpio_pin
.uc_gpio_pin_bit_shift
;
2086 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2087 * since VBIOS will program this register to set 'inactive state',
2088 * driver can then determine 'active state' from this and
2089 * program SMU with correct polarity
2091 table
->ThermOutPolarity
=
2092 (0 == (cgs_read_register(hwmgr
->device
, mmGPIOPAD_A
) &
2093 (1 << gpio_pin
.uc_gpio_pin_bit_shift
))) ? 1:0;
2094 table
->ThermOutMode
= SMU7_THERM_OUT_MODE_THERM_ONLY
;
2096 /* if required, combine VRHot/PCC with thermal out GPIO */
2097 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2098 PHM_PlatformCaps_RegulatorHot
) &&
2099 phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2100 PHM_PlatformCaps_CombinePCCWithThermalSignal
))
2101 table
->ThermOutMode
= SMU7_THERM_OUT_MODE_THERM_VRHOT
;
2103 table
->ThermOutGpio
= 17;
2104 table
->ThermOutPolarity
= 1;
2105 table
->ThermOutMode
= SMU7_THERM_OUT_MODE_DISABLE
;
2108 /* Populate BIF_SCLK levels into SMC DPM table */
2109 for (i
= 0; i
<= hw_data
->dpm_table
.pcie_speed_table
.count
; i
++) {
2110 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
2111 smu_data
->bif_sclk_table
[i
], ÷rs
);
2112 PP_ASSERT_WITH_CODE(!result
,
2113 "Can not find DFS divide id for Sclk",
2117 table
->Ulv
.BifSclkDfs
=
2118 PP_HOST_TO_SMC_US((uint16_t)(dividers
.pll_post_divider
));
2120 table
->LinkLevel
[i
- 1].BifSclkDfs
=
2121 PP_HOST_TO_SMC_US((uint16_t)(dividers
.pll_post_divider
));
2124 for (i
= 0; i
< SMU75_MAX_ENTRIES_SMIO
; i
++)
2125 table
->Smio
[i
] = PP_HOST_TO_SMC_UL(table
->Smio
[i
]);
2127 CONVERT_FROM_HOST_TO_SMC_UL(table
->SystemFlags
);
2128 CONVERT_FROM_HOST_TO_SMC_UL(table
->VRConfig
);
2129 CONVERT_FROM_HOST_TO_SMC_UL(table
->SmioMask1
);
2130 CONVERT_FROM_HOST_TO_SMC_UL(table
->SmioMask2
);
2131 CONVERT_FROM_HOST_TO_SMC_UL(table
->SclkStepSize
);
2132 CONVERT_FROM_HOST_TO_SMC_UL(table
->CurrSclkPllRange
);
2133 CONVERT_FROM_HOST_TO_SMC_US(table
->TemperatureLimitHigh
);
2134 CONVERT_FROM_HOST_TO_SMC_US(table
->TemperatureLimitLow
);
2135 CONVERT_FROM_HOST_TO_SMC_US(table
->VoltageResponseTime
);
2136 CONVERT_FROM_HOST_TO_SMC_US(table
->PhaseResponseTime
);
2138 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2139 result
= smu7_copy_bytes_to_smc(hwmgr
,
2140 smu_data
->smu7_data
.dpm_table_start
+
2141 offsetof(SMU75_Discrete_DpmTable
, SystemFlags
),
2142 (uint8_t *)&(table
->SystemFlags
),
2143 sizeof(SMU75_Discrete_DpmTable
) - 3 * sizeof(SMU75_PIDController
),
2145 PP_ASSERT_WITH_CODE(!result
,
2146 "Failed to upload dpm data to SMC memory!", return result
);
2148 result
= vegam_populate_pm_fuses(hwmgr
);
2149 PP_ASSERT_WITH_CODE(!result
,
2150 "Failed to populate PM fuses to SMC memory!", return result
);
2152 result
= vegam_enable_reconfig_cus(hwmgr
);
2153 PP_ASSERT_WITH_CODE(!result
,
2154 "Failed to enable reconfigurable CUs!", return result
);
2159 static uint32_t vegam_get_offsetof(uint32_t type
, uint32_t member
)
2162 case SMU_SoftRegisters
:
2164 case HandshakeDisables
:
2165 return offsetof(SMU75_SoftRegisters
, HandshakeDisables
);
2166 case VoltageChangeTimeout
:
2167 return offsetof(SMU75_SoftRegisters
, VoltageChangeTimeout
);
2168 case AverageGraphicsActivity
:
2169 return offsetof(SMU75_SoftRegisters
, AverageGraphicsActivity
);
2171 return offsetof(SMU75_SoftRegisters
, PreVBlankGap
);
2173 return offsetof(SMU75_SoftRegisters
, VBlankTimeout
);
2174 case UcodeLoadStatus
:
2175 return offsetof(SMU75_SoftRegisters
, UcodeLoadStatus
);
2176 case DRAM_LOG_ADDR_H
:
2177 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_ADDR_H
);
2178 case DRAM_LOG_ADDR_L
:
2179 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_ADDR_L
);
2180 case DRAM_LOG_PHY_ADDR_H
:
2181 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_PHY_ADDR_H
);
2182 case DRAM_LOG_PHY_ADDR_L
:
2183 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_PHY_ADDR_L
);
2184 case DRAM_LOG_BUFF_SIZE
:
2185 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_BUFF_SIZE
);
2188 case SMU_Discrete_DpmTable
:
2191 return offsetof(SMU75_Discrete_DpmTable
, UvdBootLevel
);
2193 return offsetof(SMU75_Discrete_DpmTable
, VceBootLevel
);
2194 case LowSclkInterruptThreshold
:
2195 return offsetof(SMU75_Discrete_DpmTable
, LowSclkInterruptThreshold
);
2199 pr_warn("can't get the offset of type %x member %x\n", type
, member
);
2203 static int vegam_program_mem_timing_parameters(struct pp_hwmgr
*hwmgr
)
2205 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
2207 if (data
->need_update_smu7_dpm_table
&
2208 (DPMTABLE_OD_UPDATE_SCLK
+
2209 DPMTABLE_UPDATE_SCLK
+
2210 DPMTABLE_UPDATE_MCLK
))
2211 return vegam_program_memory_timing_parameters(hwmgr
);
2216 static int vegam_update_sclk_threshold(struct pp_hwmgr
*hwmgr
)
2218 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
2219 struct vegam_smumgr
*smu_data
=
2220 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
2222 uint32_t low_sclk_interrupt_threshold
= 0;
2224 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2225 PHM_PlatformCaps_SclkThrottleLowNotification
)
2226 && (data
->low_sclk_interrupt_threshold
!= 0)) {
2227 low_sclk_interrupt_threshold
=
2228 data
->low_sclk_interrupt_threshold
;
2230 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold
);
2232 result
= smu7_copy_bytes_to_smc(
2234 smu_data
->smu7_data
.dpm_table_start
+
2235 offsetof(SMU75_Discrete_DpmTable
,
2236 LowSclkInterruptThreshold
),
2237 (uint8_t *)&low_sclk_interrupt_threshold
,
2241 PP_ASSERT_WITH_CODE((result
== 0),
2242 "Failed to update SCLK threshold!", return result
);
2244 result
= vegam_program_mem_timing_parameters(hwmgr
);
2245 PP_ASSERT_WITH_CODE((result
== 0),
2246 "Failed to program memory timing parameters!",
2252 int vegam_thermal_avfs_enable(struct pp_hwmgr
*hwmgr
)
2254 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
2257 if (!hwmgr
->avfs_supported
)
2260 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_EnableAvfs
);
2262 if (data
->apply_avfs_cks_off_voltage
)
2263 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ApplyAvfsCksOffVoltage
);
2269 static int vegam_thermal_setup_fan_table(struct pp_hwmgr
*hwmgr
)
2271 PP_ASSERT_WITH_CODE(hwmgr
->thermal_controller
.fanInfo
.bNoFan
,
2272 "VBIOS fan info is not correct!",
2274 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
2275 PHM_PlatformCaps_MicrocodeFanControl
);
2279 const struct pp_smumgr_func vegam_smu_funcs
= {
2280 .smu_init
= vegam_smu_init
,
2281 .smu_fini
= smu7_smu_fini
,
2282 .start_smu
= vegam_start_smu
,
2283 .check_fw_load_finish
= smu7_check_fw_load_finish
,
2284 .request_smu_load_fw
= smu7_reload_firmware
,
2285 .request_smu_load_specific_fw
= NULL
,
2286 .send_msg_to_smc
= smu7_send_msg_to_smc
,
2287 .send_msg_to_smc_with_parameter
= smu7_send_msg_to_smc_with_parameter
,
2288 .process_firmware_header
= vegam_process_firmware_header
,
2289 .is_dpm_running
= vegam_is_dpm_running
,
2290 .get_mac_definition
= vegam_get_mac_definition
,
2291 .update_smc_table
= vegam_update_smc_table
,
2292 .init_smc_table
= vegam_init_smc_table
,
2293 .get_offsetof
= vegam_get_offsetof
,
2294 .populate_all_graphic_levels
= vegam_populate_all_graphic_levels
,
2295 .populate_all_memory_levels
= vegam_populate_all_memory_levels
,
2296 .update_sclk_threshold
= vegam_update_sclk_threshold
,
2297 .is_hw_avfs_present
= vegam_is_hw_avfs_present
,
2298 .thermal_avfs_enable
= vegam_thermal_avfs_enable
,
2299 .thermal_setup_fan_table
= vegam_thermal_setup_fan_table
,