2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "smu_ucode_xfer_vi.h"
26 #include "vegam_smumgr.h"
27 #include "smu/smu_7_1_3_d.h"
28 #include "smu/smu_7_1_3_sh_mask.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
31 #include "oss/oss_3_0_d.h"
32 #include "gca/gfx_8_0_d.h"
33 #include "bif/bif_5_0_d.h"
34 #include "bif/bif_5_0_sh_mask.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
37 #include "smu7_ppsmc.h"
39 #include "smu7_dyn_defaults.h"
41 #include "smu7_hwmgr.h"
42 #include "hardwaremanager.h"
44 #include "pppcielanes.h"
46 #include "dce/dce_11_2_d.h"
47 #include "dce/dce_11_2_sh_mask.h"
49 #define PPVEGAM_TARGETACTIVITY_DFLT 50
51 #define VOLTAGE_VID_OFFSET_SCALE1 625
52 #define VOLTAGE_VID_OFFSET_SCALE2 100
53 #define POWERTUNE_DEFAULT_SET_MAX 1
54 #define VDDC_VDDCI_DELTA 200
55 #define MC_CG_ARB_FREQ_F1 0x0b
57 #define STRAP_ASIC_RO_LSB 2168
58 #define STRAP_ASIC_RO_MSB 2175
60 #define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
61 #define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305)
63 static const struct vegam_pt_defaults
64 vegam_power_tune_data_set_array
[POWERTUNE_DEFAULT_SET_MAX
] = {
65 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
66 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
67 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
68 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
69 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
72 static const sclkFcwRange_t Range_Table
[NUM_SCLK_RANGE
] = {
73 {VCO_2_4
, POSTDIV_DIV_BY_16
, 75, 160, 112},
74 {VCO_3_6
, POSTDIV_DIV_BY_16
, 112, 224, 160},
75 {VCO_2_4
, POSTDIV_DIV_BY_8
, 75, 160, 112},
76 {VCO_3_6
, POSTDIV_DIV_BY_8
, 112, 224, 160},
77 {VCO_2_4
, POSTDIV_DIV_BY_4
, 75, 160, 112},
78 {VCO_3_6
, POSTDIV_DIV_BY_4
, 112, 216, 160},
79 {VCO_2_4
, POSTDIV_DIV_BY_2
, 75, 160, 108},
80 {VCO_3_6
, POSTDIV_DIV_BY_2
, 112, 216, 160} };
82 static int vegam_smu_init(struct pp_hwmgr
*hwmgr
)
84 struct vegam_smumgr
*smu_data
;
86 smu_data
= kzalloc(sizeof(struct vegam_smumgr
), GFP_KERNEL
);
90 hwmgr
->smu_backend
= smu_data
;
92 if (smu7_init(hwmgr
)) {
100 static int vegam_start_smu_in_protection_mode(struct pp_hwmgr
*hwmgr
)
104 /* Wait for smc boot up */
105 /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
108 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
109 SMC_SYSCON_RESET_CNTL
, rst_reg
, 1);
111 result
= smu7_upload_smu_firmware_image(hwmgr
);
116 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixSMU_STATUS
, 0);
118 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
119 SMC_SYSCON_CLOCK_CNTL_0
, ck_disable
, 0);
121 /* De-assert reset */
122 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
123 SMC_SYSCON_RESET_CNTL
, rst_reg
, 0);
126 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
, RCU_UC_EVENTS
, INTERRUPTS_ENABLED
, 1);
129 /* Call Test SMU message with 0x20000 offset to trigger SMU start */
130 smu7_send_msg_to_smc_offset(hwmgr
);
132 /* Wait done bit to be set */
133 /* Check pass/failed indicator */
135 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr
, SMC_IND
, SMU_STATUS
, SMU_DONE
, 0);
137 if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
138 SMU_STATUS
, SMU_PASS
))
139 PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
141 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixFIRMWARE_FLAGS
, 0);
143 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
144 SMC_SYSCON_RESET_CNTL
, rst_reg
, 1);
146 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
147 SMC_SYSCON_RESET_CNTL
, rst_reg
, 0);
149 /* Wait for firmware to initialize */
150 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
, FIRMWARE_FLAGS
, INTERRUPTS_ENABLED
, 1);
155 static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr
*hwmgr
)
159 /* wait for smc boot up */
160 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr
, SMC_IND
, RCU_UC_EVENTS
, boot_seq_done
, 0);
162 /* Clear firmware interrupt enable flag */
163 /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
164 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
165 ixFIRMWARE_FLAGS
, 0);
167 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
168 SMC_SYSCON_RESET_CNTL
,
171 result
= smu7_upload_smu_firmware_image(hwmgr
);
175 /* Set smc instruct start point at 0x0 */
176 smu7_program_jump_on_start(hwmgr
);
178 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
179 SMC_SYSCON_CLOCK_CNTL_0
, ck_disable
, 0);
181 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
,
182 SMC_SYSCON_RESET_CNTL
, rst_reg
, 0);
184 /* Wait for firmware to initialize */
186 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
,
187 FIRMWARE_FLAGS
, INTERRUPTS_ENABLED
, 1);
192 static int vegam_start_smu(struct pp_hwmgr
*hwmgr
)
195 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
197 /* Only start SMC if SMC RAM is not running */
198 if (!smu7_is_smc_ram_running(hwmgr
) && hwmgr
->not_vf
) {
199 smu_data
->protected_mode
= (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr
->device
,
200 CGS_IND_REG__SMC
, SMU_FIRMWARE
, SMU_MODE
));
201 smu_data
->smu7_data
.security_hard_key
= (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(
202 hwmgr
->device
, CGS_IND_REG__SMC
, SMU_FIRMWARE
, SMU_SEL
));
204 /* Check if SMU is running in protected mode */
205 if (smu_data
->protected_mode
== 0)
206 result
= vegam_start_smu_in_non_protection_mode(hwmgr
);
208 result
= vegam_start_smu_in_protection_mode(hwmgr
);
211 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result
);
214 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
215 smu7_read_smc_sram_dword(hwmgr
,
216 SMU7_FIRMWARE_HEADER_LOCATION
+ offsetof(SMU75_Firmware_Header
, SoftRegisters
),
217 &(smu_data
->smu7_data
.soft_regs_start
),
220 result
= smu7_request_smu_load_fw(hwmgr
);
225 static int vegam_process_firmware_header(struct pp_hwmgr
*hwmgr
)
227 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
228 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
233 result
= smu7_read_smc_sram_dword(hwmgr
,
234 SMU7_FIRMWARE_HEADER_LOCATION
+
235 offsetof(SMU75_Firmware_Header
, DpmTable
),
239 smu_data
->smu7_data
.dpm_table_start
= tmp
;
241 error
|= (0 != result
);
243 result
= smu7_read_smc_sram_dword(hwmgr
,
244 SMU7_FIRMWARE_HEADER_LOCATION
+
245 offsetof(SMU75_Firmware_Header
, SoftRegisters
),
249 data
->soft_regs_start
= tmp
;
250 smu_data
->smu7_data
.soft_regs_start
= tmp
;
253 error
|= (0 != result
);
255 result
= smu7_read_smc_sram_dword(hwmgr
,
256 SMU7_FIRMWARE_HEADER_LOCATION
+
257 offsetof(SMU75_Firmware_Header
, mcRegisterTable
),
261 smu_data
->smu7_data
.mc_reg_table_start
= tmp
;
263 result
= smu7_read_smc_sram_dword(hwmgr
,
264 SMU7_FIRMWARE_HEADER_LOCATION
+
265 offsetof(SMU75_Firmware_Header
, FanTable
),
269 smu_data
->smu7_data
.fan_table_start
= tmp
;
271 error
|= (0 != result
);
273 result
= smu7_read_smc_sram_dword(hwmgr
,
274 SMU7_FIRMWARE_HEADER_LOCATION
+
275 offsetof(SMU75_Firmware_Header
, mcArbDramTimingTable
),
279 smu_data
->smu7_data
.arb_table_start
= tmp
;
281 error
|= (0 != result
);
283 result
= smu7_read_smc_sram_dword(hwmgr
,
284 SMU7_FIRMWARE_HEADER_LOCATION
+
285 offsetof(SMU75_Firmware_Header
, Version
),
289 hwmgr
->microcode_version_info
.SMC
= tmp
;
291 error
|= (0 != result
);
293 return error
? -1 : 0;
296 static bool vegam_is_dpm_running(struct pp_hwmgr
*hwmgr
)
298 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr
->device
,
299 CGS_IND_REG__SMC
, FEATURE_STATUS
, VOLTAGE_CONTROLLER_ON
))
303 static uint32_t vegam_get_mac_definition(uint32_t value
)
306 case SMU_MAX_LEVELS_GRAPHICS
:
307 return SMU75_MAX_LEVELS_GRAPHICS
;
308 case SMU_MAX_LEVELS_MEMORY
:
309 return SMU75_MAX_LEVELS_MEMORY
;
310 case SMU_MAX_LEVELS_LINK
:
311 return SMU75_MAX_LEVELS_LINK
;
312 case SMU_MAX_ENTRIES_SMIO
:
313 return SMU75_MAX_ENTRIES_SMIO
;
314 case SMU_MAX_LEVELS_VDDC
:
315 return SMU75_MAX_LEVELS_VDDC
;
316 case SMU_MAX_LEVELS_VDDGFX
:
317 return SMU75_MAX_LEVELS_VDDGFX
;
318 case SMU_MAX_LEVELS_VDDCI
:
319 return SMU75_MAX_LEVELS_VDDCI
;
320 case SMU_MAX_LEVELS_MVDD
:
321 return SMU75_MAX_LEVELS_MVDD
;
322 case SMU_UVD_MCLK_HANDSHAKE_DISABLE
:
323 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE
|
324 SMU7_VCE_MCLK_HANDSHAKE_DISABLE
;
327 pr_warn("can't get the mac of %x\n", value
);
331 static int vegam_update_uvd_smc_table(struct pp_hwmgr
*hwmgr
)
333 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
334 uint32_t mm_boot_level_offset
, mm_boot_level_value
;
335 struct phm_ppt_v1_information
*table_info
=
336 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
338 smu_data
->smc_state_table
.UvdBootLevel
= 0;
339 if (table_info
->mm_dep_table
->count
> 0)
340 smu_data
->smc_state_table
.UvdBootLevel
=
341 (uint8_t) (table_info
->mm_dep_table
->count
- 1);
342 mm_boot_level_offset
= smu_data
->smu7_data
.dpm_table_start
+ offsetof(SMU75_Discrete_DpmTable
,
344 mm_boot_level_offset
/= 4;
345 mm_boot_level_offset
*= 4;
346 mm_boot_level_value
= cgs_read_ind_register(hwmgr
->device
,
347 CGS_IND_REG__SMC
, mm_boot_level_offset
);
348 mm_boot_level_value
&= 0x00FFFFFF;
349 mm_boot_level_value
|= smu_data
->smc_state_table
.UvdBootLevel
<< 24;
350 cgs_write_ind_register(hwmgr
->device
,
351 CGS_IND_REG__SMC
, mm_boot_level_offset
, mm_boot_level_value
);
353 if (!phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
354 PHM_PlatformCaps_UVDDPM
) ||
355 phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
356 PHM_PlatformCaps_StablePState
))
357 smum_send_msg_to_smc_with_parameter(hwmgr
,
358 PPSMC_MSG_UVDDPM_SetEnabledMask
,
359 (uint32_t)(1 << smu_data
->smc_state_table
.UvdBootLevel
));
363 static int vegam_update_vce_smc_table(struct pp_hwmgr
*hwmgr
)
365 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
366 uint32_t mm_boot_level_offset
, mm_boot_level_value
;
367 struct phm_ppt_v1_information
*table_info
=
368 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
370 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
371 PHM_PlatformCaps_StablePState
))
372 smu_data
->smc_state_table
.VceBootLevel
=
373 (uint8_t) (table_info
->mm_dep_table
->count
- 1);
375 smu_data
->smc_state_table
.VceBootLevel
= 0;
377 mm_boot_level_offset
= smu_data
->smu7_data
.dpm_table_start
+
378 offsetof(SMU75_Discrete_DpmTable
, VceBootLevel
);
379 mm_boot_level_offset
/= 4;
380 mm_boot_level_offset
*= 4;
381 mm_boot_level_value
= cgs_read_ind_register(hwmgr
->device
,
382 CGS_IND_REG__SMC
, mm_boot_level_offset
);
383 mm_boot_level_value
&= 0xFF00FFFF;
384 mm_boot_level_value
|= smu_data
->smc_state_table
.VceBootLevel
<< 16;
385 cgs_write_ind_register(hwmgr
->device
,
386 CGS_IND_REG__SMC
, mm_boot_level_offset
, mm_boot_level_value
);
388 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_StablePState
))
389 smum_send_msg_to_smc_with_parameter(hwmgr
,
390 PPSMC_MSG_VCEDPM_SetEnabledMask
,
391 (uint32_t)1 << smu_data
->smc_state_table
.VceBootLevel
);
395 static int vegam_update_bif_smc_table(struct pp_hwmgr
*hwmgr
)
397 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
398 struct phm_ppt_v1_information
*table_info
=
399 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
400 struct phm_ppt_v1_pcie_table
*pcie_table
= table_info
->pcie_table
;
403 max_entry
= (SMU75_MAX_LEVELS_LINK
< pcie_table
->count
) ?
404 SMU75_MAX_LEVELS_LINK
:
406 /* Setup BIF_SCLK levels */
407 for (i
= 0; i
< max_entry
; i
++)
408 smu_data
->bif_sclk_table
[i
] = pcie_table
->entries
[i
].pcie_sclk
;
412 static int vegam_update_smc_table(struct pp_hwmgr
*hwmgr
, uint32_t type
)
416 vegam_update_uvd_smc_table(hwmgr
);
419 vegam_update_vce_smc_table(hwmgr
);
422 vegam_update_bif_smc_table(hwmgr
);
430 static void vegam_initialize_power_tune_defaults(struct pp_hwmgr
*hwmgr
)
432 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
433 struct phm_ppt_v1_information
*table_info
=
434 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
437 table_info
->cac_dtp_table
->usPowerTuneDataSetID
<= POWERTUNE_DEFAULT_SET_MAX
&&
438 table_info
->cac_dtp_table
->usPowerTuneDataSetID
)
439 smu_data
->power_tune_defaults
=
440 &vegam_power_tune_data_set_array
441 [table_info
->cac_dtp_table
->usPowerTuneDataSetID
- 1];
443 smu_data
->power_tune_defaults
= &vegam_power_tune_data_set_array
[0];
447 static int vegam_populate_smc_mvdd_table(struct pp_hwmgr
*hwmgr
,
448 SMU75_Discrete_DpmTable
*table
)
450 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
451 uint32_t count
, level
;
453 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->mvdd_control
) {
454 count
= data
->mvdd_voltage_table
.count
;
455 if (count
> SMU_MAX_SMIO_LEVELS
)
456 count
= SMU_MAX_SMIO_LEVELS
;
457 for (level
= 0; level
< count
; level
++) {
458 table
->SmioTable2
.Pattern
[level
].Voltage
= PP_HOST_TO_SMC_US(
459 data
->mvdd_voltage_table
.entries
[level
].value
* VOLTAGE_SCALE
);
460 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
461 table
->SmioTable2
.Pattern
[level
].Smio
=
463 table
->Smio
[level
] |=
464 data
->mvdd_voltage_table
.entries
[level
].smio_low
;
466 table
->SmioMask2
= data
->mvdd_voltage_table
.mask_low
;
468 table
->MvddLevelCount
= (uint32_t) PP_HOST_TO_SMC_UL(count
);
474 static int vegam_populate_smc_vddci_table(struct pp_hwmgr
*hwmgr
,
475 struct SMU75_Discrete_DpmTable
*table
)
477 uint32_t count
, level
;
478 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
480 count
= data
->vddci_voltage_table
.count
;
482 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
) {
483 if (count
> SMU_MAX_SMIO_LEVELS
)
484 count
= SMU_MAX_SMIO_LEVELS
;
485 for (level
= 0; level
< count
; ++level
) {
486 table
->SmioTable1
.Pattern
[level
].Voltage
= PP_HOST_TO_SMC_US(
487 data
->vddci_voltage_table
.entries
[level
].value
* VOLTAGE_SCALE
);
488 table
->SmioTable1
.Pattern
[level
].Smio
= (uint8_t) level
;
490 table
->Smio
[level
] |= data
->vddci_voltage_table
.entries
[level
].smio_low
;
494 table
->SmioMask1
= data
->vddci_voltage_table
.mask_low
;
499 static int vegam_populate_cac_table(struct pp_hwmgr
*hwmgr
,
500 struct SMU75_Discrete_DpmTable
*table
)
504 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
505 struct phm_ppt_v1_information
*table_info
=
506 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
507 struct phm_ppt_v1_voltage_lookup_table
*lookup_table
=
508 table_info
->vddc_lookup_table
;
509 /* tables is already swapped, so in order to use the value from it,
510 * we need to swap it back.
511 * We are populating vddc CAC data to BapmVddc table
512 * in split and merged mode
514 for (count
= 0; count
< lookup_table
->count
; count
++) {
515 index
= phm_get_voltage_index(lookup_table
,
516 data
->vddc_voltage_table
.entries
[count
].value
);
517 table
->BapmVddcVidLoSidd
[count
] =
518 convert_to_vid(lookup_table
->entries
[index
].us_cac_low
);
519 table
->BapmVddcVidHiSidd
[count
] =
520 convert_to_vid(lookup_table
->entries
[index
].us_cac_mid
);
521 table
->BapmVddcVidHiSidd2
[count
] =
522 convert_to_vid(lookup_table
->entries
[index
].us_cac_high
);
528 static int vegam_populate_smc_voltage_tables(struct pp_hwmgr
*hwmgr
,
529 struct SMU75_Discrete_DpmTable
*table
)
531 vegam_populate_smc_vddci_table(hwmgr
, table
);
532 vegam_populate_smc_mvdd_table(hwmgr
, table
);
533 vegam_populate_cac_table(hwmgr
, table
);
538 static int vegam_populate_ulv_level(struct pp_hwmgr
*hwmgr
,
539 struct SMU75_Discrete_Ulv
*state
)
541 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
542 struct phm_ppt_v1_information
*table_info
=
543 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
545 state
->CcPwrDynRm
= 0;
546 state
->CcPwrDynRm1
= 0;
548 state
->VddcOffset
= (uint16_t) table_info
->us_ulv_voltage_offset
;
549 state
->VddcOffsetVid
= (uint8_t)(table_info
->us_ulv_voltage_offset
*
550 VOLTAGE_VID_OFFSET_SCALE2
/ VOLTAGE_VID_OFFSET_SCALE1
);
552 state
->VddcPhase
= data
->vddc_phase_shed_control
^ 0x3;
554 CONVERT_FROM_HOST_TO_SMC_UL(state
->CcPwrDynRm
);
555 CONVERT_FROM_HOST_TO_SMC_UL(state
->CcPwrDynRm1
);
556 CONVERT_FROM_HOST_TO_SMC_US(state
->VddcOffset
);
561 static int vegam_populate_ulv_state(struct pp_hwmgr
*hwmgr
,
562 struct SMU75_Discrete_DpmTable
*table
)
564 return vegam_populate_ulv_level(hwmgr
, &table
->Ulv
);
567 static int vegam_populate_smc_link_level(struct pp_hwmgr
*hwmgr
,
568 struct SMU75_Discrete_DpmTable
*table
)
570 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
571 struct vegam_smumgr
*smu_data
=
572 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
573 struct smu7_dpm_table
*dpm_table
= &data
->dpm_table
;
576 /* Index (dpm_table->pcie_speed_table.count)
577 * is reserved for PCIE boot level. */
578 for (i
= 0; i
<= dpm_table
->pcie_speed_table
.count
; i
++) {
579 table
->LinkLevel
[i
].PcieGenSpeed
=
580 (uint8_t)dpm_table
->pcie_speed_table
.dpm_levels
[i
].value
;
581 table
->LinkLevel
[i
].PcieLaneCount
= (uint8_t)encode_pcie_lane_width(
582 dpm_table
->pcie_speed_table
.dpm_levels
[i
].param1
);
583 table
->LinkLevel
[i
].EnabledForActivity
= 1;
584 table
->LinkLevel
[i
].SPC
= (uint8_t)(data
->pcie_spc_cap
& 0xff);
585 table
->LinkLevel
[i
].DownThreshold
= PP_HOST_TO_SMC_UL(5);
586 table
->LinkLevel
[i
].UpThreshold
= PP_HOST_TO_SMC_UL(30);
589 smu_data
->smc_state_table
.LinkLevelCount
=
590 (uint8_t)dpm_table
->pcie_speed_table
.count
;
592 /* To Do move to hwmgr */
593 data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
=
594 phm_get_dpm_level_enable_mask_value(&dpm_table
->pcie_speed_table
);
599 static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr
*hwmgr
,
600 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
601 uint32_t clock
, SMU_VoltageLevel
*voltage
, uint32_t *mvdd
)
605 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
607 *voltage
= *mvdd
= 0;
609 /* clock - voltage dependency table is empty table */
610 if (dep_table
->count
== 0)
613 for (i
= 0; i
< dep_table
->count
; i
++) {
614 /* find first sclk bigger than request */
615 if (dep_table
->entries
[i
].clk
>= clock
) {
616 *voltage
|= (dep_table
->entries
[i
].vddc
*
617 VOLTAGE_SCALE
) << VDDC_SHIFT
;
618 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->vddci_control
)
619 *voltage
|= (data
->vbios_boot_state
.vddci_bootup_value
*
620 VOLTAGE_SCALE
) << VDDCI_SHIFT
;
621 else if (dep_table
->entries
[i
].vddci
)
622 *voltage
|= (dep_table
->entries
[i
].vddci
*
623 VOLTAGE_SCALE
) << VDDCI_SHIFT
;
625 vddci
= phm_find_closest_vddci(&(data
->vddci_voltage_table
),
626 (dep_table
->entries
[i
].vddc
-
627 (uint16_t)VDDC_VDDCI_DELTA
));
628 *voltage
|= (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
631 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->mvdd_control
)
632 *mvdd
= data
->vbios_boot_state
.mvdd_bootup_value
*
634 else if (dep_table
->entries
[i
].mvdd
)
635 *mvdd
= (uint32_t) dep_table
->entries
[i
].mvdd
*
638 *voltage
|= 1 << PHASES_SHIFT
;
643 /* sclk is bigger than max sclk in the dependence table */
644 *voltage
|= (dep_table
->entries
[i
- 1].vddc
* VOLTAGE_SCALE
) << VDDC_SHIFT
;
645 vddci
= phm_find_closest_vddci(&(data
->vddci_voltage_table
),
646 (dep_table
->entries
[i
- 1].vddc
-
647 (uint16_t)VDDC_VDDCI_DELTA
));
649 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->vddci_control
)
650 *voltage
|= (data
->vbios_boot_state
.vddci_bootup_value
*
651 VOLTAGE_SCALE
) << VDDCI_SHIFT
;
652 else if (dep_table
->entries
[i
- 1].vddci
)
653 *voltage
|= (dep_table
->entries
[i
- 1].vddci
*
654 VOLTAGE_SCALE
) << VDDC_SHIFT
;
656 *voltage
|= (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
658 if (SMU7_VOLTAGE_CONTROL_NONE
== data
->mvdd_control
)
659 *mvdd
= data
->vbios_boot_state
.mvdd_bootup_value
* VOLTAGE_SCALE
;
660 else if (dep_table
->entries
[i
].mvdd
)
661 *mvdd
= (uint32_t) dep_table
->entries
[i
- 1].mvdd
* VOLTAGE_SCALE
;
666 static void vegam_get_sclk_range_table(struct pp_hwmgr
*hwmgr
,
667 SMU75_Discrete_DpmTable
*table
)
669 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
672 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios
= { { {0} } };
674 ref_clk
= amdgpu_asic_get_xclk((struct amdgpu_device
*)hwmgr
->adev
);
676 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr
, &range_table_from_vbios
)) {
677 for (i
= 0; i
< NUM_SCLK_RANGE
; i
++) {
678 table
->SclkFcwRangeTable
[i
].vco_setting
=
679 range_table_from_vbios
.entry
[i
].ucVco_setting
;
680 table
->SclkFcwRangeTable
[i
].postdiv
=
681 range_table_from_vbios
.entry
[i
].ucPostdiv
;
682 table
->SclkFcwRangeTable
[i
].fcw_pcc
=
683 range_table_from_vbios
.entry
[i
].usFcw_pcc
;
685 table
->SclkFcwRangeTable
[i
].fcw_trans_upper
=
686 range_table_from_vbios
.entry
[i
].usFcw_trans_upper
;
687 table
->SclkFcwRangeTable
[i
].fcw_trans_lower
=
688 range_table_from_vbios
.entry
[i
].usRcw_trans_lower
;
690 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_pcc
);
691 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_upper
);
692 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_lower
);
697 for (i
= 0; i
< NUM_SCLK_RANGE
; i
++) {
698 smu_data
->range_table
[i
].trans_lower_frequency
=
699 (ref_clk
* Range_Table
[i
].fcw_trans_lower
) >> Range_Table
[i
].postdiv
;
700 smu_data
->range_table
[i
].trans_upper_frequency
=
701 (ref_clk
* Range_Table
[i
].fcw_trans_upper
) >> Range_Table
[i
].postdiv
;
703 table
->SclkFcwRangeTable
[i
].vco_setting
= Range_Table
[i
].vco_setting
;
704 table
->SclkFcwRangeTable
[i
].postdiv
= Range_Table
[i
].postdiv
;
705 table
->SclkFcwRangeTable
[i
].fcw_pcc
= Range_Table
[i
].fcw_pcc
;
707 table
->SclkFcwRangeTable
[i
].fcw_trans_upper
= Range_Table
[i
].fcw_trans_upper
;
708 table
->SclkFcwRangeTable
[i
].fcw_trans_lower
= Range_Table
[i
].fcw_trans_lower
;
710 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_pcc
);
711 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_upper
);
712 CONVERT_FROM_HOST_TO_SMC_US(table
->SclkFcwRangeTable
[i
].fcw_trans_lower
);
716 static int vegam_calculate_sclk_params(struct pp_hwmgr
*hwmgr
,
717 uint32_t clock
, SMU_SclkSetting
*sclk_setting
)
719 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
720 const SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
721 struct pp_atomctrl_clock_dividers_ai dividers
;
723 uint32_t pcc_target_percent
, pcc_target_freq
, ss_target_percent
, ss_target_freq
;
728 sclk_setting
->SclkFrequency
= clock
;
729 /* get the engine clock dividers for this clock value */
730 result
= atomctrl_get_engine_pll_dividers_ai(hwmgr
, clock
, ÷rs
);
732 sclk_setting
->Fcw_int
= dividers
.usSclk_fcw_int
;
733 sclk_setting
->Fcw_frac
= dividers
.usSclk_fcw_frac
;
734 sclk_setting
->Pcc_fcw_int
= dividers
.usPcc_fcw_int
;
735 sclk_setting
->PllRange
= dividers
.ucSclkPllRange
;
736 sclk_setting
->Sclk_slew_rate
= 0x400;
737 sclk_setting
->Pcc_up_slew_rate
= dividers
.usPcc_fcw_slew_frac
;
738 sclk_setting
->Pcc_down_slew_rate
= 0xffff;
739 sclk_setting
->SSc_En
= dividers
.ucSscEnable
;
740 sclk_setting
->Fcw1_int
= dividers
.usSsc_fcw1_int
;
741 sclk_setting
->Fcw1_frac
= dividers
.usSsc_fcw1_frac
;
742 sclk_setting
->Sclk_ss_slew_rate
= dividers
.usSsc_fcw_slew_frac
;
746 ref_clock
= amdgpu_asic_get_xclk((struct amdgpu_device
*)hwmgr
->adev
);
748 for (i
= 0; i
< NUM_SCLK_RANGE
; i
++) {
749 if (clock
> smu_data
->range_table
[i
].trans_lower_frequency
750 && clock
<= smu_data
->range_table
[i
].trans_upper_frequency
) {
751 sclk_setting
->PllRange
= i
;
756 sclk_setting
->Fcw_int
= (uint16_t)
757 ((clock
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
) /
759 temp
= clock
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
;
761 do_div(temp
, ref_clock
);
762 sclk_setting
->Fcw_frac
= temp
& 0xffff;
764 pcc_target_percent
= 10; /* Hardcode 10% for now. */
765 pcc_target_freq
= clock
- (clock
* pcc_target_percent
/ 100);
766 sclk_setting
->Pcc_fcw_int
= (uint16_t)
767 ((pcc_target_freq
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
) /
770 ss_target_percent
= 2; /* Hardcode 2% for now. */
771 sclk_setting
->SSc_En
= 0;
772 if (ss_target_percent
) {
773 sclk_setting
->SSc_En
= 1;
774 ss_target_freq
= clock
- (clock
* ss_target_percent
/ 100);
775 sclk_setting
->Fcw1_int
= (uint16_t)
776 ((ss_target_freq
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
) /
778 temp
= ss_target_freq
<< table
->SclkFcwRangeTable
[sclk_setting
->PllRange
].postdiv
;
780 do_div(temp
, ref_clock
);
781 sclk_setting
->Fcw1_frac
= temp
& 0xffff;
787 static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock
,
792 uint32_t min
= max(clock_insr
, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK
);
794 PP_ASSERT_WITH_CODE((clock
>= min
),
795 "Engine clock can't satisfy stutter requirement!",
797 for (i
= 31; ; i
--) {
798 temp
= clock
/ (i
+ 1);
800 if (temp
>= min
|| i
== 0)
806 static int vegam_populate_single_graphic_level(struct pp_hwmgr
*hwmgr
,
807 uint32_t clock
, struct SMU75_Discrete_GraphicsLevel
*level
)
810 /* PP_Clocks minClocks; */
812 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
813 struct phm_ppt_v1_information
*table_info
=
814 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
815 SMU_SclkSetting curr_sclk_setting
= { 0 };
817 result
= vegam_calculate_sclk_params(hwmgr
, clock
, &curr_sclk_setting
);
819 /* populate graphics levels */
820 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
821 table_info
->vdd_dep_on_sclk
, clock
,
822 &level
->MinVoltage
, &mvdd
);
824 PP_ASSERT_WITH_CODE((0 == result
),
825 "can not find VDDC voltage value for "
826 "VDDC engine clock dependency table",
828 level
->ActivityLevel
= (uint16_t)(SclkDPMTuning_VEGAM
>> DPMTuning_Activity_Shift
);
830 level
->CcPwrDynRm
= 0;
831 level
->CcPwrDynRm1
= 0;
832 level
->EnabledForActivity
= 0;
833 level
->EnabledForThrottle
= 1;
834 level
->VoltageDownHyst
= 0;
835 level
->PowerThrottle
= 0;
836 data
->display_timing
.min_clock_in_sr
= hwmgr
->display_config
->min_core_set_clock_in_sr
;
838 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SclkDeepSleep
))
839 level
->DeepSleepDivId
= vegam_get_sleep_divider_id_from_clock(clock
,
840 hwmgr
->display_config
->min_core_set_clock_in_sr
);
842 level
->SclkSetting
= curr_sclk_setting
;
844 CONVERT_FROM_HOST_TO_SMC_UL(level
->MinVoltage
);
845 CONVERT_FROM_HOST_TO_SMC_UL(level
->CcPwrDynRm
);
846 CONVERT_FROM_HOST_TO_SMC_UL(level
->CcPwrDynRm1
);
847 CONVERT_FROM_HOST_TO_SMC_US(level
->ActivityLevel
);
848 CONVERT_FROM_HOST_TO_SMC_UL(level
->SclkSetting
.SclkFrequency
);
849 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw_int
);
850 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw_frac
);
851 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Pcc_fcw_int
);
852 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Sclk_slew_rate
);
853 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Pcc_up_slew_rate
);
854 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Pcc_down_slew_rate
);
855 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw1_int
);
856 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Fcw1_frac
);
857 CONVERT_FROM_HOST_TO_SMC_US(level
->SclkSetting
.Sclk_ss_slew_rate
);
861 static int vegam_populate_all_graphic_levels(struct pp_hwmgr
*hwmgr
)
863 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
864 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
865 struct smu7_dpm_table
*dpm_table
= &hw_data
->dpm_table
;
866 struct phm_ppt_v1_information
*table_info
=
867 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
868 struct phm_ppt_v1_pcie_table
*pcie_table
= table_info
->pcie_table
;
869 uint8_t pcie_entry_cnt
= (uint8_t) hw_data
->dpm_table
.pcie_speed_table
.count
;
871 uint32_t array
= smu_data
->smu7_data
.dpm_table_start
+
872 offsetof(SMU75_Discrete_DpmTable
, GraphicsLevel
);
873 uint32_t array_size
= sizeof(struct SMU75_Discrete_GraphicsLevel
) *
874 SMU75_MAX_LEVELS_GRAPHICS
;
875 struct SMU75_Discrete_GraphicsLevel
*levels
=
876 smu_data
->smc_state_table
.GraphicsLevel
;
877 uint32_t i
, max_entry
;
878 uint8_t hightest_pcie_level_enabled
= 0,
879 lowest_pcie_level_enabled
= 0,
880 mid_pcie_level_enabled
= 0,
883 vegam_get_sclk_range_table(hwmgr
, &(smu_data
->smc_state_table
));
885 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++) {
887 result
= vegam_populate_single_graphic_level(hwmgr
,
888 dpm_table
->sclk_table
.dpm_levels
[i
].value
,
889 &(smu_data
->smc_state_table
.GraphicsLevel
[i
]));
893 levels
[i
].UpHyst
= (uint8_t)
894 (SclkDPMTuning_VEGAM
>> DPMTuning_Uphyst_Shift
);
895 levels
[i
].DownHyst
= (uint8_t)
896 (SclkDPMTuning_VEGAM
>> DPMTuning_Downhyst_Shift
);
897 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
899 levels
[i
].DeepSleepDivId
= 0;
901 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
902 PHM_PlatformCaps_SPLLShutdownSupport
))
903 smu_data
->smc_state_table
.GraphicsLevel
[0].SclkSetting
.SSc_En
= 0;
905 smu_data
->smc_state_table
.GraphicsDpmLevelCount
=
906 (uint8_t)dpm_table
->sclk_table
.count
;
907 hw_data
->dpm_level_enable_mask
.sclk_dpm_enable_mask
=
908 phm_get_dpm_level_enable_mask_value(&dpm_table
->sclk_table
);
910 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++)
911 levels
[i
].EnabledForActivity
=
912 (hw_data
->dpm_level_enable_mask
.sclk_dpm_enable_mask
>> i
) & 0x1;
914 if (pcie_table
!= NULL
) {
915 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt
),
916 "There must be 1 or more PCIE levels defined in PPTable.",
918 max_entry
= pcie_entry_cnt
- 1;
919 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++)
920 levels
[i
].pcieDpmLevel
=
921 (uint8_t) ((i
< max_entry
) ? i
: max_entry
);
923 while (hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&&
924 ((hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&
925 (1 << (hightest_pcie_level_enabled
+ 1))) != 0))
926 hightest_pcie_level_enabled
++;
928 while (hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&&
929 ((hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&
930 (1 << lowest_pcie_level_enabled
)) == 0))
931 lowest_pcie_level_enabled
++;
933 while ((count
< hightest_pcie_level_enabled
) &&
934 ((hw_data
->dpm_level_enable_mask
.pcie_dpm_enable_mask
&
935 (1 << (lowest_pcie_level_enabled
+ 1 + count
))) == 0))
938 mid_pcie_level_enabled
= (lowest_pcie_level_enabled
+ 1 + count
) <
939 hightest_pcie_level_enabled
?
940 (lowest_pcie_level_enabled
+ 1 + count
) :
941 hightest_pcie_level_enabled
;
943 /* set pcieDpmLevel to hightest_pcie_level_enabled */
944 for (i
= 2; i
< dpm_table
->sclk_table
.count
; i
++)
945 levels
[i
].pcieDpmLevel
= hightest_pcie_level_enabled
;
947 /* set pcieDpmLevel to lowest_pcie_level_enabled */
948 levels
[0].pcieDpmLevel
= lowest_pcie_level_enabled
;
950 /* set pcieDpmLevel to mid_pcie_level_enabled */
951 levels
[1].pcieDpmLevel
= mid_pcie_level_enabled
;
953 /* level count will send to smc once at init smc table and never change */
954 result
= smu7_copy_bytes_to_smc(hwmgr
, array
, (uint8_t *)levels
,
955 (uint32_t)array_size
, SMC_RAM_END
);
960 static int vegam_calculate_mclk_params(struct pp_hwmgr
*hwmgr
,
961 uint32_t clock
, struct SMU75_Discrete_MemoryLevel
*mem_level
)
963 struct pp_atomctrl_memory_clock_param_ai mpll_param
;
965 PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr
,
967 "Failed to retrieve memory pll parameter.",
970 mem_level
->MclkFrequency
= (uint32_t)mpll_param
.ulClock
;
971 mem_level
->Fcw_int
= (uint16_t)mpll_param
.ulMclk_fcw_int
;
972 mem_level
->Fcw_frac
= (uint16_t)mpll_param
.ulMclk_fcw_frac
;
973 mem_level
->Postdiv
= (uint8_t)mpll_param
.ulPostDiv
;
978 static int vegam_populate_single_memory_level(struct pp_hwmgr
*hwmgr
,
979 uint32_t clock
, struct SMU75_Discrete_MemoryLevel
*mem_level
)
981 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
982 struct phm_ppt_v1_information
*table_info
=
983 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
985 uint32_t mclk_stutter_mode_threshold
= 60000;
988 if (table_info
->vdd_dep_on_mclk
) {
989 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
990 table_info
->vdd_dep_on_mclk
, clock
,
991 &mem_level
->MinVoltage
, &mem_level
->MinMvdd
);
992 PP_ASSERT_WITH_CODE(!result
,
993 "can not find MinVddc voltage value from memory "
994 "VDDC voltage dependency table", return result
);
997 result
= vegam_calculate_mclk_params(hwmgr
, clock
, mem_level
);
998 PP_ASSERT_WITH_CODE(!result
,
999 "Failed to calculate mclk params.",
1002 mem_level
->EnabledForThrottle
= 1;
1003 mem_level
->EnabledForActivity
= 0;
1004 mem_level
->VoltageDownHyst
= 0;
1005 mem_level
->ActivityLevel
= (uint16_t)
1006 (MemoryDPMTuning_VEGAM
>> DPMTuning_Activity_Shift
);
1007 mem_level
->StutterEnable
= false;
1008 mem_level
->DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
1010 data
->display_timing
.num_existing_displays
= hwmgr
->display_config
->num_display
;
1011 data
->display_timing
.vrefresh
= hwmgr
->display_config
->vrefresh
;
1013 if (mclk_stutter_mode_threshold
&&
1014 (clock
<= mclk_stutter_mode_threshold
) &&
1015 (PHM_READ_FIELD(hwmgr
->device
, DPG_PIPE_STUTTER_CONTROL
,
1016 STUTTER_ENABLE
) & 0x1))
1017 mem_level
->StutterEnable
= true;
1020 CONVERT_FROM_HOST_TO_SMC_UL(mem_level
->MinMvdd
);
1021 CONVERT_FROM_HOST_TO_SMC_UL(mem_level
->MclkFrequency
);
1022 CONVERT_FROM_HOST_TO_SMC_US(mem_level
->Fcw_int
);
1023 CONVERT_FROM_HOST_TO_SMC_US(mem_level
->Fcw_frac
);
1024 CONVERT_FROM_HOST_TO_SMC_US(mem_level
->ActivityLevel
);
1025 CONVERT_FROM_HOST_TO_SMC_UL(mem_level
->MinVoltage
);
1031 static int vegam_populate_all_memory_levels(struct pp_hwmgr
*hwmgr
)
1033 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1034 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1035 struct smu7_dpm_table
*dpm_table
= &hw_data
->dpm_table
;
1037 /* populate MCLK dpm table to SMU7 */
1038 uint32_t array
= smu_data
->smu7_data
.dpm_table_start
+
1039 offsetof(SMU75_Discrete_DpmTable
, MemoryLevel
);
1040 uint32_t array_size
= sizeof(SMU75_Discrete_MemoryLevel
) *
1041 SMU75_MAX_LEVELS_MEMORY
;
1042 struct SMU75_Discrete_MemoryLevel
*levels
=
1043 smu_data
->smc_state_table
.MemoryLevel
;
1046 for (i
= 0; i
< dpm_table
->mclk_table
.count
; i
++) {
1047 PP_ASSERT_WITH_CODE((0 != dpm_table
->mclk_table
.dpm_levels
[i
].value
),
1048 "can not populate memory level as memory clock is zero",
1050 result
= vegam_populate_single_memory_level(hwmgr
,
1051 dpm_table
->mclk_table
.dpm_levels
[i
].value
,
1057 levels
[i
].UpHyst
= (uint8_t)
1058 (MemoryDPMTuning_VEGAM
>> DPMTuning_Uphyst_Shift
);
1059 levels
[i
].DownHyst
= (uint8_t)
1060 (MemoryDPMTuning_VEGAM
>> DPMTuning_Downhyst_Shift
);
1063 smu_data
->smc_state_table
.MemoryDpmLevelCount
=
1064 (uint8_t)dpm_table
->mclk_table
.count
;
1065 hw_data
->dpm_level_enable_mask
.mclk_dpm_enable_mask
=
1066 phm_get_dpm_level_enable_mask_value(&dpm_table
->mclk_table
);
1068 for (i
= 0; i
< dpm_table
->mclk_table
.count
; i
++)
1069 levels
[i
].EnabledForActivity
=
1070 (hw_data
->dpm_level_enable_mask
.mclk_dpm_enable_mask
>> i
) & 0x1;
1072 levels
[dpm_table
->mclk_table
.count
- 1].DisplayWatermark
=
1073 PPSMC_DISPLAY_WATERMARK_HIGH
;
1075 /* level count will send to smc once at init smc table and never change */
1076 result
= smu7_copy_bytes_to_smc(hwmgr
, array
, (uint8_t *)levels
,
1077 (uint32_t)array_size
, SMC_RAM_END
);
1082 static int vegam_populate_mvdd_value(struct pp_hwmgr
*hwmgr
,
1083 uint32_t mclk
, SMIO_Pattern
*smio_pat
)
1085 const struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1086 struct phm_ppt_v1_information
*table_info
=
1087 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1090 if (SMU7_VOLTAGE_CONTROL_NONE
!= data
->mvdd_control
) {
1091 /* find mvdd value which clock is more than request */
1092 for (i
= 0; i
< table_info
->vdd_dep_on_mclk
->count
; i
++) {
1093 if (mclk
<= table_info
->vdd_dep_on_mclk
->entries
[i
].clk
) {
1094 smio_pat
->Voltage
= data
->mvdd_voltage_table
.entries
[i
].value
;
1098 PP_ASSERT_WITH_CODE(i
< table_info
->vdd_dep_on_mclk
->count
,
1099 "MVDD Voltage is outside the supported range.",
1107 static int vegam_populate_smc_acpi_level(struct pp_hwmgr
*hwmgr
,
1108 SMU75_Discrete_DpmTable
*table
)
1111 uint32_t sclk_frequency
;
1112 const struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1113 struct phm_ppt_v1_information
*table_info
=
1114 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1115 SMIO_Pattern vol_level
;
1118 table
->ACPILevel
.Flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
1120 /* Get MinVoltage and Frequency from DPM0,
1121 * already converted to SMC_UL */
1122 sclk_frequency
= data
->vbios_boot_state
.sclk_bootup_value
;
1123 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
1124 table_info
->vdd_dep_on_sclk
,
1126 &table
->ACPILevel
.MinVoltage
, &mvdd
);
1127 PP_ASSERT_WITH_CODE(!result
,
1128 "Cannot find ACPI VDDC voltage value "
1129 "in Clock Dependency Table",
1132 result
= vegam_calculate_sclk_params(hwmgr
, sclk_frequency
,
1133 &(table
->ACPILevel
.SclkSetting
));
1134 PP_ASSERT_WITH_CODE(!result
,
1135 "Error retrieving Engine Clock dividers from VBIOS.",
1138 table
->ACPILevel
.DeepSleepDivId
= 0;
1139 table
->ACPILevel
.CcPwrDynRm
= 0;
1140 table
->ACPILevel
.CcPwrDynRm1
= 0;
1142 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.Flags
);
1143 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.MinVoltage
);
1144 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.CcPwrDynRm
);
1145 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.CcPwrDynRm1
);
1147 CONVERT_FROM_HOST_TO_SMC_UL(table
->ACPILevel
.SclkSetting
.SclkFrequency
);
1148 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw_int
);
1149 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw_frac
);
1150 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Pcc_fcw_int
);
1151 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Sclk_slew_rate
);
1152 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Pcc_up_slew_rate
);
1153 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Pcc_down_slew_rate
);
1154 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw1_int
);
1155 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Fcw1_frac
);
1156 CONVERT_FROM_HOST_TO_SMC_US(table
->ACPILevel
.SclkSetting
.Sclk_ss_slew_rate
);
1159 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1160 table
->MemoryACPILevel
.MclkFrequency
= data
->vbios_boot_state
.mclk_bootup_value
;
1161 result
= vegam_get_dependency_volt_by_clk(hwmgr
,
1162 table_info
->vdd_dep_on_mclk
,
1163 table
->MemoryACPILevel
.MclkFrequency
,
1164 &table
->MemoryACPILevel
.MinVoltage
, &mvdd
);
1165 PP_ASSERT_WITH_CODE((0 == result
),
1166 "Cannot find ACPI VDDCI voltage value "
1167 "in Clock Dependency Table",
1170 if (!vegam_populate_mvdd_value(hwmgr
, 0, &vol_level
))
1171 table
->MemoryACPILevel
.MinMvdd
= PP_HOST_TO_SMC_UL(vol_level
.Voltage
);
1173 table
->MemoryACPILevel
.MinMvdd
= 0;
1175 table
->MemoryACPILevel
.StutterEnable
= false;
1177 table
->MemoryACPILevel
.EnabledForThrottle
= 0;
1178 table
->MemoryACPILevel
.EnabledForActivity
= 0;
1179 table
->MemoryACPILevel
.UpHyst
= 0;
1180 table
->MemoryACPILevel
.DownHyst
= 100;
1181 table
->MemoryACPILevel
.VoltageDownHyst
= 0;
1182 table
->MemoryACPILevel
.ActivityLevel
=
1183 PP_HOST_TO_SMC_US(data
->current_profile_setting
.mclk_activity
);
1185 CONVERT_FROM_HOST_TO_SMC_UL(table
->MemoryACPILevel
.MclkFrequency
);
1186 CONVERT_FROM_HOST_TO_SMC_UL(table
->MemoryACPILevel
.MinVoltage
);
1191 static int vegam_populate_smc_vce_level(struct pp_hwmgr
*hwmgr
,
1192 SMU75_Discrete_DpmTable
*table
)
1194 int result
= -EINVAL
;
1196 struct pp_atomctrl_clock_dividers_vi dividers
;
1197 struct phm_ppt_v1_information
*table_info
=
1198 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1199 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*mm_table
=
1200 table_info
->mm_dep_table
;
1201 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1204 table
->VceLevelCount
= (uint8_t)(mm_table
->count
);
1205 table
->VceBootLevel
= 0;
1207 for (count
= 0; count
< table
->VceLevelCount
; count
++) {
1208 table
->VceLevel
[count
].Frequency
= mm_table
->entries
[count
].eclk
;
1209 table
->VceLevel
[count
].MinVoltage
= 0;
1210 table
->VceLevel
[count
].MinVoltage
|=
1211 (mm_table
->entries
[count
].vddc
* VOLTAGE_SCALE
) << VDDC_SHIFT
;
1213 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
)
1214 vddci
= (uint32_t)phm_find_closest_vddci(&(data
->vddci_voltage_table
),
1215 mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
);
1216 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->vddci_control
)
1217 vddci
= mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
;
1219 vddci
= (data
->vbios_boot_state
.vddci_bootup_value
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1222 table
->VceLevel
[count
].MinVoltage
|=
1223 (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1224 table
->VceLevel
[count
].MinVoltage
|= 1 << PHASES_SHIFT
;
1226 /*retrieve divider value for VBIOS */
1227 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
1228 table
->VceLevel
[count
].Frequency
, ÷rs
);
1229 PP_ASSERT_WITH_CODE((0 == result
),
1230 "can not find divide id for VCE engine clock",
1233 table
->VceLevel
[count
].Divider
= (uint8_t)dividers
.pll_post_divider
;
1235 CONVERT_FROM_HOST_TO_SMC_UL(table
->VceLevel
[count
].Frequency
);
1236 CONVERT_FROM_HOST_TO_SMC_UL(table
->VceLevel
[count
].MinVoltage
);
1241 static int vegam_populate_memory_timing_parameters(struct pp_hwmgr
*hwmgr
,
1242 int32_t eng_clock
, int32_t mem_clock
,
1243 SMU75_Discrete_MCArbDramTimingTableEntry
*arb_regs
)
1245 uint32_t dram_timing
;
1246 uint32_t dram_timing2
;
1247 uint32_t burst_time
;
1253 result
= atomctrl_set_engine_dram_timings_rv770(hwmgr
,
1254 eng_clock
, mem_clock
);
1255 PP_ASSERT_WITH_CODE(result
== 0,
1256 "Error calling VBIOS to set DRAM_TIMING.",
1259 dram_timing
= cgs_read_register(hwmgr
->device
, mmMC_ARB_DRAM_TIMING
);
1260 dram_timing2
= cgs_read_register(hwmgr
->device
, mmMC_ARB_DRAM_TIMING2
);
1261 burst_time
= cgs_read_register(hwmgr
->device
, mmMC_ARB_BURST_TIME
);
1262 rfsh_rate
= cgs_read_register(hwmgr
->device
, mmMC_ARB_RFSH_RATE
);
1263 misc3
= cgs_read_register(hwmgr
->device
, mmMC_ARB_MISC3
);
1265 arb_regs
->McArbDramTiming
= PP_HOST_TO_SMC_UL(dram_timing
);
1266 arb_regs
->McArbDramTiming2
= PP_HOST_TO_SMC_UL(dram_timing2
);
1267 arb_regs
->McArbBurstTime
= PP_HOST_TO_SMC_UL(burst_time
);
1268 arb_regs
->McArbRfshRate
= PP_HOST_TO_SMC_UL(rfsh_rate
);
1269 arb_regs
->McArbMisc3
= PP_HOST_TO_SMC_UL(misc3
);
1274 static int vegam_program_memory_timing_parameters(struct pp_hwmgr
*hwmgr
)
1276 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1277 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1278 struct SMU75_Discrete_MCArbDramTimingTable arb_regs
;
1282 memset(&arb_regs
, 0, sizeof(SMU75_Discrete_MCArbDramTimingTable
));
1284 for (i
= 0; i
< hw_data
->dpm_table
.sclk_table
.count
; i
++) {
1285 for (j
= 0; j
< hw_data
->dpm_table
.mclk_table
.count
; j
++) {
1286 result
= vegam_populate_memory_timing_parameters(hwmgr
,
1287 hw_data
->dpm_table
.sclk_table
.dpm_levels
[i
].value
,
1288 hw_data
->dpm_table
.mclk_table
.dpm_levels
[j
].value
,
1289 &arb_regs
.entries
[i
][j
]);
1295 result
= smu7_copy_bytes_to_smc(
1297 smu_data
->smu7_data
.arb_table_start
,
1298 (uint8_t *)&arb_regs
,
1299 sizeof(SMU75_Discrete_MCArbDramTimingTable
),
1304 static int vegam_populate_smc_uvd_level(struct pp_hwmgr
*hwmgr
,
1305 struct SMU75_Discrete_DpmTable
*table
)
1307 int result
= -EINVAL
;
1309 struct pp_atomctrl_clock_dividers_vi dividers
;
1310 struct phm_ppt_v1_information
*table_info
=
1311 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1312 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*mm_table
=
1313 table_info
->mm_dep_table
;
1314 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1317 table
->UvdLevelCount
= (uint8_t)(mm_table
->count
);
1318 table
->UvdBootLevel
= 0;
1320 for (count
= 0; count
< table
->UvdLevelCount
; count
++) {
1321 table
->UvdLevel
[count
].MinVoltage
= 0;
1322 table
->UvdLevel
[count
].VclkFrequency
= mm_table
->entries
[count
].vclk
;
1323 table
->UvdLevel
[count
].DclkFrequency
= mm_table
->entries
[count
].dclk
;
1324 table
->UvdLevel
[count
].MinVoltage
|=
1325 (mm_table
->entries
[count
].vddc
* VOLTAGE_SCALE
) << VDDC_SHIFT
;
1327 if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
)
1328 vddci
= (uint32_t)phm_find_closest_vddci(&(data
->vddci_voltage_table
),
1329 mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
);
1330 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->vddci_control
)
1331 vddci
= mm_table
->entries
[count
].vddc
- VDDC_VDDCI_DELTA
;
1333 vddci
= (data
->vbios_boot_state
.vddci_bootup_value
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1335 table
->UvdLevel
[count
].MinVoltage
|= (vddci
* VOLTAGE_SCALE
) << VDDCI_SHIFT
;
1336 table
->UvdLevel
[count
].MinVoltage
|= 1 << PHASES_SHIFT
;
1338 /* retrieve divider value for VBIOS */
1339 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
1340 table
->UvdLevel
[count
].VclkFrequency
, ÷rs
);
1341 PP_ASSERT_WITH_CODE((0 == result
),
1342 "can not find divide id for Vclk clock", return result
);
1344 table
->UvdLevel
[count
].VclkDivider
= (uint8_t)dividers
.pll_post_divider
;
1346 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
1347 table
->UvdLevel
[count
].DclkFrequency
, ÷rs
);
1348 PP_ASSERT_WITH_CODE((0 == result
),
1349 "can not find divide id for Dclk clock", return result
);
1351 table
->UvdLevel
[count
].DclkDivider
= (uint8_t)dividers
.pll_post_divider
;
1353 CONVERT_FROM_HOST_TO_SMC_UL(table
->UvdLevel
[count
].VclkFrequency
);
1354 CONVERT_FROM_HOST_TO_SMC_UL(table
->UvdLevel
[count
].DclkFrequency
);
1355 CONVERT_FROM_HOST_TO_SMC_UL(table
->UvdLevel
[count
].MinVoltage
);
1361 static int vegam_populate_smc_boot_level(struct pp_hwmgr
*hwmgr
,
1362 struct SMU75_Discrete_DpmTable
*table
)
1365 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1367 table
->GraphicsBootLevel
= 0;
1368 table
->MemoryBootLevel
= 0;
1370 /* find boot level from dpm table */
1371 result
= phm_find_boot_level(&(data
->dpm_table
.sclk_table
),
1372 data
->vbios_boot_state
.sclk_bootup_value
,
1373 (uint32_t *)&(table
->GraphicsBootLevel
));
1377 result
= phm_find_boot_level(&(data
->dpm_table
.mclk_table
),
1378 data
->vbios_boot_state
.mclk_bootup_value
,
1379 (uint32_t *)&(table
->MemoryBootLevel
));
1384 table
->BootVddc
= data
->vbios_boot_state
.vddc_bootup_value
*
1386 table
->BootVddci
= data
->vbios_boot_state
.vddci_bootup_value
*
1388 table
->BootMVdd
= data
->vbios_boot_state
.mvdd_bootup_value
*
1391 CONVERT_FROM_HOST_TO_SMC_US(table
->BootVddc
);
1392 CONVERT_FROM_HOST_TO_SMC_US(table
->BootVddci
);
1393 CONVERT_FROM_HOST_TO_SMC_US(table
->BootMVdd
);
1398 static int vegam_populate_smc_initial_state(struct pp_hwmgr
*hwmgr
)
1400 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1401 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1402 struct phm_ppt_v1_information
*table_info
=
1403 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1404 uint8_t count
, level
;
1406 count
= (uint8_t)(table_info
->vdd_dep_on_sclk
->count
);
1408 for (level
= 0; level
< count
; level
++) {
1409 if (table_info
->vdd_dep_on_sclk
->entries
[level
].clk
>=
1410 hw_data
->vbios_boot_state
.sclk_bootup_value
) {
1411 smu_data
->smc_state_table
.GraphicsBootLevel
= level
;
1416 count
= (uint8_t)(table_info
->vdd_dep_on_mclk
->count
);
1417 for (level
= 0; level
< count
; level
++) {
1418 if (table_info
->vdd_dep_on_mclk
->entries
[level
].clk
>=
1419 hw_data
->vbios_boot_state
.mclk_bootup_value
) {
1420 smu_data
->smc_state_table
.MemoryBootLevel
= level
;
1428 static uint16_t scale_fan_gain_settings(uint16_t raw_setting
)
1431 tmp
= raw_setting
* 4096 / 100;
1432 return (uint16_t)tmp
;
1435 static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr
*hwmgr
)
1437 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1439 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1440 SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
1441 struct phm_ppt_v1_information
*table_info
=
1442 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1443 struct phm_cac_tdp_table
*cac_dtp_table
= table_info
->cac_dtp_table
;
1444 struct pp_advance_fan_control_parameters
*fan_table
=
1445 &hwmgr
->thermal_controller
.advanceFanControlParameters
;
1447 const uint16_t *pdef1
;
1448 const uint16_t *pdef2
;
1450 table
->DefaultTdp
= PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table
->usTDP
* 128));
1451 table
->TargetTdp
= PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table
->usTDP
* 128));
1453 PP_ASSERT_WITH_CODE(cac_dtp_table
->usTargetOperatingTemp
<= 255,
1454 "Target Operating Temp is out of Range!",
1457 table
->TemperatureLimitEdge
= PP_HOST_TO_SMC_US(
1458 cac_dtp_table
->usTargetOperatingTemp
* 256);
1459 table
->TemperatureLimitHotspot
= PP_HOST_TO_SMC_US(
1460 cac_dtp_table
->usTemperatureLimitHotspot
* 256);
1461 table
->FanGainEdge
= PP_HOST_TO_SMC_US(
1462 scale_fan_gain_settings(fan_table
->usFanGainEdge
));
1463 table
->FanGainHotspot
= PP_HOST_TO_SMC_US(
1464 scale_fan_gain_settings(fan_table
->usFanGainHotspot
));
1466 pdef1
= defaults
->BAPMTI_R
;
1467 pdef2
= defaults
->BAPMTI_RC
;
1469 for (i
= 0; i
< SMU75_DTE_ITERATIONS
; i
++) {
1470 for (j
= 0; j
< SMU75_DTE_SOURCES
; j
++) {
1471 for (k
= 0; k
< SMU75_DTE_SINKS
; k
++) {
1472 table
->BAPMTI_R
[i
][j
][k
] = PP_HOST_TO_SMC_US(*pdef1
);
1473 table
->BAPMTI_RC
[i
][j
][k
] = PP_HOST_TO_SMC_US(*pdef2
);
1483 static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr
*hwmgr
)
1485 uint32_t ro
, efuse
, volt_without_cks
, volt_with_cks
, value
, max
, min
;
1486 struct vegam_smumgr
*smu_data
=
1487 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1489 uint8_t i
, stretch_amount
, volt_offset
= 0;
1490 struct phm_ppt_v1_information
*table_info
=
1491 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1492 struct phm_ppt_v1_clock_voltage_dependency_table
*sclk_table
=
1493 table_info
->vdd_dep_on_sclk
;
1494 uint32_t mask
= (1 << ((STRAP_ASIC_RO_MSB
- STRAP_ASIC_RO_LSB
) + 1)) - 1;
1496 stretch_amount
= (uint8_t)table_info
->cac_dtp_table
->usClockStretchAmount
;
1498 atomctrl_read_efuse(hwmgr
, STRAP_ASIC_RO_LSB
, STRAP_ASIC_RO_MSB
,
1504 ro
= efuse
* (max
- min
) / 255 + min
;
1506 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1507 for (i
= 0; i
< sclk_table
->count
; i
++) {
1508 smu_data
->smc_state_table
.Sclk_CKS_masterEn0_7
|=
1509 sclk_table
->entries
[i
].cks_enable
<< i
;
1510 volt_without_cks
= (uint32_t)((2753594000U + (sclk_table
->entries
[i
].clk
/100) *
1511 136418 - (ro
- 70) * 1000000) /
1512 (2424180 - (sclk_table
->entries
[i
].clk
/100) * 1132925/1000));
1513 volt_with_cks
= (uint32_t)((2797202000U + sclk_table
->entries
[i
].clk
/100 *
1514 3232 - (ro
- 65) * 1000000) /
1515 (2522480 - sclk_table
->entries
[i
].clk
/100 * 115764/100));
1517 if (volt_without_cks
>= volt_with_cks
)
1518 volt_offset
= (uint8_t)(((volt_without_cks
- volt_with_cks
+
1519 sclk_table
->entries
[i
].cks_voffset
) * 100 + 624) / 625);
1521 smu_data
->smc_state_table
.Sclk_voltageOffset
[i
] = volt_offset
;
1524 smu_data
->smc_state_table
.LdoRefSel
=
1525 (table_info
->cac_dtp_table
->ucCKS_LDO_REFSEL
!= 0) ?
1526 table_info
->cac_dtp_table
->ucCKS_LDO_REFSEL
: 5;
1527 /* Populate CKS Lookup Table */
1528 if (!(stretch_amount
== 1 || stretch_amount
== 2 ||
1529 stretch_amount
== 5 || stretch_amount
== 3 ||
1530 stretch_amount
== 4)) {
1531 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
1532 PHM_PlatformCaps_ClockStretcher
);
1533 PP_ASSERT_WITH_CODE(false,
1534 "Stretch Amount in PPTable not supported\n",
1538 value
= cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixPWR_CKS_CNTL
);
1539 value
&= 0xFFFFFFFE;
1540 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixPWR_CKS_CNTL
, value
);
1545 static bool vegam_is_hw_avfs_present(struct pp_hwmgr
*hwmgr
)
1549 efuse
= cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
1550 ixSMU_EFUSE_0
+ (49 * 4));
1551 efuse
&= 0x00000001;
1559 static int vegam_populate_avfs_parameters(struct pp_hwmgr
*hwmgr
)
1561 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1562 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1564 SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
1566 struct pp_atom_ctrl__avfs_parameters avfs_params
= {0};
1567 AVFS_meanNsigma_t AVFS_meanNsigma
= { {0} };
1568 AVFS_Sclk_Offset_t AVFS_SclkOffset
= { {0} };
1571 struct phm_ppt_v1_information
*table_info
=
1572 (struct phm_ppt_v1_information
*)hwmgr
->pptable
;
1573 struct phm_ppt_v1_clock_voltage_dependency_table
*sclk_table
=
1574 table_info
->vdd_dep_on_sclk
;
1576 if (!hwmgr
->avfs_supported
)
1579 result
= atomctrl_get_avfs_information(hwmgr
, &avfs_params
);
1582 table
->BTCGB_VDROOP_TABLE
[0].a0
=
1583 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSON_a0
);
1584 table
->BTCGB_VDROOP_TABLE
[0].a1
=
1585 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSON_a1
);
1586 table
->BTCGB_VDROOP_TABLE
[0].a2
=
1587 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSON_a2
);
1588 table
->BTCGB_VDROOP_TABLE
[1].a0
=
1589 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSOFF_a0
);
1590 table
->BTCGB_VDROOP_TABLE
[1].a1
=
1591 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSOFF_a1
);
1592 table
->BTCGB_VDROOP_TABLE
[1].a2
=
1593 PP_HOST_TO_SMC_UL(avfs_params
.ulGB_VDROOP_TABLE_CKSOFF_a2
);
1594 table
->AVFSGB_FUSE_TABLE
[0].m1
=
1595 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSON_m1
);
1596 table
->AVFSGB_FUSE_TABLE
[0].m2
=
1597 PP_HOST_TO_SMC_US(avfs_params
.usAVFSGB_FUSE_TABLE_CKSON_m2
);
1598 table
->AVFSGB_FUSE_TABLE
[0].b
=
1599 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSON_b
);
1600 table
->AVFSGB_FUSE_TABLE
[0].m1_shift
= 24;
1601 table
->AVFSGB_FUSE_TABLE
[0].m2_shift
= 12;
1602 table
->AVFSGB_FUSE_TABLE
[1].m1
=
1603 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSOFF_m1
);
1604 table
->AVFSGB_FUSE_TABLE
[1].m2
=
1605 PP_HOST_TO_SMC_US(avfs_params
.usAVFSGB_FUSE_TABLE_CKSOFF_m2
);
1606 table
->AVFSGB_FUSE_TABLE
[1].b
=
1607 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFSGB_FUSE_TABLE_CKSOFF_b
);
1608 table
->AVFSGB_FUSE_TABLE
[1].m1_shift
= 24;
1609 table
->AVFSGB_FUSE_TABLE
[1].m2_shift
= 12;
1610 table
->MaxVoltage
= PP_HOST_TO_SMC_US(avfs_params
.usMaxVoltage_0_25mv
);
1611 AVFS_meanNsigma
.Aconstant
[0] =
1612 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFS_meanNsigma_Acontant0
);
1613 AVFS_meanNsigma
.Aconstant
[1] =
1614 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFS_meanNsigma_Acontant1
);
1615 AVFS_meanNsigma
.Aconstant
[2] =
1616 PP_HOST_TO_SMC_UL(avfs_params
.ulAVFS_meanNsigma_Acontant2
);
1617 AVFS_meanNsigma
.DC_tol_sigma
=
1618 PP_HOST_TO_SMC_US(avfs_params
.usAVFS_meanNsigma_DC_tol_sigma
);
1619 AVFS_meanNsigma
.Platform_mean
=
1620 PP_HOST_TO_SMC_US(avfs_params
.usAVFS_meanNsigma_Platform_mean
);
1621 AVFS_meanNsigma
.PSM_Age_CompFactor
=
1622 PP_HOST_TO_SMC_US(avfs_params
.usPSM_Age_ComFactor
);
1623 AVFS_meanNsigma
.Platform_sigma
=
1624 PP_HOST_TO_SMC_US(avfs_params
.usAVFS_meanNsigma_Platform_sigma
);
1626 for (i
= 0; i
< sclk_table
->count
; i
++) {
1627 AVFS_meanNsigma
.Static_Voltage_Offset
[i
] =
1628 (uint8_t)(sclk_table
->entries
[i
].cks_voffset
* 100 / 625);
1629 AVFS_SclkOffset
.Sclk_Offset
[i
] =
1630 PP_HOST_TO_SMC_US((uint16_t)
1631 (sclk_table
->entries
[i
].sclk_offset
) / 100);
1634 result
= smu7_read_smc_sram_dword(hwmgr
,
1635 SMU7_FIRMWARE_HEADER_LOCATION
+
1636 offsetof(SMU75_Firmware_Header
, AvfsMeanNSigma
),
1638 smu7_copy_bytes_to_smc(hwmgr
,
1640 (uint8_t *)&AVFS_meanNsigma
,
1641 sizeof(AVFS_meanNsigma_t
),
1644 result
= smu7_read_smc_sram_dword(hwmgr
,
1645 SMU7_FIRMWARE_HEADER_LOCATION
+
1646 offsetof(SMU75_Firmware_Header
, AvfsSclkOffsetTable
),
1648 smu7_copy_bytes_to_smc(hwmgr
,
1650 (uint8_t *)&AVFS_SclkOffset
,
1651 sizeof(AVFS_Sclk_Offset_t
),
1654 data
->avfs_vdroop_override_setting
=
1655 (avfs_params
.ucEnableGB_VDROOP_TABLE_CKSON
<< BTCGB0_Vdroop_Enable_SHIFT
) |
1656 (avfs_params
.ucEnableGB_VDROOP_TABLE_CKSOFF
<< BTCGB1_Vdroop_Enable_SHIFT
) |
1657 (avfs_params
.ucEnableGB_FUSE_TABLE_CKSON
<< AVFSGB0_Vdroop_Enable_SHIFT
) |
1658 (avfs_params
.ucEnableGB_FUSE_TABLE_CKSOFF
<< AVFSGB1_Vdroop_Enable_SHIFT
);
1659 data
->apply_avfs_cks_off_voltage
=
1660 (avfs_params
.ucEnableApplyAVFS_CKS_OFF_Voltage
== 1) ? true : false;
1665 static int vegam_populate_vr_config(struct pp_hwmgr
*hwmgr
,
1666 struct SMU75_Discrete_DpmTable
*table
)
1668 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1669 struct vegam_smumgr
*smu_data
=
1670 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1673 config
= VR_MERGED_WITH_VDDC
;
1674 table
->VRConfig
|= (config
<< VRCONF_VDDGFX_SHIFT
);
1676 /* Set Vddc Voltage Controller */
1677 if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->voltage_control
) {
1678 config
= VR_SVI2_PLANE_1
;
1679 table
->VRConfig
|= config
;
1681 PP_ASSERT_WITH_CODE(false,
1682 "VDDC should be on SVI2 control in merged mode!",
1685 /* Set Vddci Voltage Controller */
1686 if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->vddci_control
) {
1687 config
= VR_SVI2_PLANE_2
; /* only in merged mode */
1688 table
->VRConfig
|= (config
<< VRCONF_VDDCI_SHIFT
);
1689 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->vddci_control
) {
1690 config
= VR_SMIO_PATTERN_1
;
1691 table
->VRConfig
|= (config
<< VRCONF_VDDCI_SHIFT
);
1693 config
= VR_STATIC_VOLTAGE
;
1694 table
->VRConfig
|= (config
<< VRCONF_VDDCI_SHIFT
);
1696 /* Set Mvdd Voltage Controller */
1697 if (SMU7_VOLTAGE_CONTROL_BY_SVID2
== data
->mvdd_control
) {
1698 if (config
!= VR_SVI2_PLANE_2
) {
1699 config
= VR_SVI2_PLANE_2
;
1700 table
->VRConfig
|= (config
<< VRCONF_MVDD_SHIFT
);
1701 cgs_write_ind_register(hwmgr
->device
,
1703 smu_data
->smu7_data
.soft_regs_start
+
1704 offsetof(SMU75_SoftRegisters
, AllowMvddSwitch
),
1707 PP_ASSERT_WITH_CODE(false,
1708 "SVI2 Plane 2 is already taken, set MVDD as Static",);
1709 config
= VR_STATIC_VOLTAGE
;
1710 table
->VRConfig
= (config
<< VRCONF_MVDD_SHIFT
);
1712 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO
== data
->mvdd_control
) {
1713 config
= VR_SMIO_PATTERN_2
;
1714 table
->VRConfig
= (config
<< VRCONF_MVDD_SHIFT
);
1715 cgs_write_ind_register(hwmgr
->device
,
1717 smu_data
->smu7_data
.soft_regs_start
+
1718 offsetof(SMU75_SoftRegisters
, AllowMvddSwitch
),
1721 config
= VR_STATIC_VOLTAGE
;
1722 table
->VRConfig
|= (config
<< VRCONF_MVDD_SHIFT
);
1728 static int vegam_populate_svi_load_line(struct pp_hwmgr
*hwmgr
)
1730 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1731 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1733 smu_data
->power_tune_table
.SviLoadLineEn
= defaults
->SviLoadLineEn
;
1734 smu_data
->power_tune_table
.SviLoadLineVddC
= defaults
->SviLoadLineVddC
;
1735 smu_data
->power_tune_table
.SviLoadLineTrimVddC
= 3;
1736 smu_data
->power_tune_table
.SviLoadLineOffsetVddC
= 0;
1741 static int vegam_populate_tdc_limit(struct pp_hwmgr
*hwmgr
)
1744 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1745 struct phm_ppt_v1_information
*table_info
=
1746 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1747 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1749 tdc_limit
= (uint16_t)(table_info
->cac_dtp_table
->usTDC
* 128);
1750 smu_data
->power_tune_table
.TDC_VDDC_PkgLimit
=
1751 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit
);
1752 smu_data
->power_tune_table
.TDC_VDDC_ThrottleReleaseLimitPerc
=
1753 defaults
->TDC_VDDC_ThrottleReleaseLimitPerc
;
1754 smu_data
->power_tune_table
.TDC_MAWt
= defaults
->TDC_MAWt
;
1759 static int vegam_populate_dw8(struct pp_hwmgr
*hwmgr
, uint32_t fuse_table_offset
)
1761 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1762 const struct vegam_pt_defaults
*defaults
= smu_data
->power_tune_defaults
;
1765 if (smu7_read_smc_sram_dword(hwmgr
,
1767 offsetof(SMU75_Discrete_PmFuses
, TdcWaterfallCtl
),
1768 (uint32_t *)&temp
, SMC_RAM_END
))
1769 PP_ASSERT_WITH_CODE(false,
1770 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
1773 smu_data
->power_tune_table
.TdcWaterfallCtl
= defaults
->TdcWaterfallCtl
;
1774 smu_data
->power_tune_table
.LPMLTemperatureMin
=
1775 (uint8_t)((temp
>> 16) & 0xff);
1776 smu_data
->power_tune_table
.LPMLTemperatureMax
=
1777 (uint8_t)((temp
>> 8) & 0xff);
1778 smu_data
->power_tune_table
.Reserved
= (uint8_t)(temp
& 0xff);
1783 static int vegam_populate_temperature_scaler(struct pp_hwmgr
*hwmgr
)
1786 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1788 /* Currently not used. Set all to zero. */
1789 for (i
= 0; i
< 16; i
++)
1790 smu_data
->power_tune_table
.LPMLTemperatureScaler
[i
] = 0;
1795 static int vegam_populate_fuzzy_fan(struct pp_hwmgr
*hwmgr
)
1797 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1799 /* TO DO move to hwmgr */
1800 if ((hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
& (1 << 15))
1801 || 0 == hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
)
1802 hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
=
1803 hwmgr
->thermal_controller
.advanceFanControlParameters
.usDefaultFanOutputSensitivity
;
1805 smu_data
->power_tune_table
.FuzzyFan_PwmSetDelta
= PP_HOST_TO_SMC_US(
1806 hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanOutputSensitivity
);
1810 static int vegam_populate_gnb_lpml(struct pp_hwmgr
*hwmgr
)
1813 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1815 /* Currently not used. Set all to zero. */
1816 for (i
= 0; i
< 16; i
++)
1817 smu_data
->power_tune_table
.GnbLPML
[i
] = 0;
1822 static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr
*hwmgr
)
1824 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1825 struct phm_ppt_v1_information
*table_info
=
1826 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1827 uint16_t hi_sidd
= smu_data
->power_tune_table
.BapmVddCBaseLeakageHiSidd
;
1828 uint16_t lo_sidd
= smu_data
->power_tune_table
.BapmVddCBaseLeakageLoSidd
;
1829 struct phm_cac_tdp_table
*cac_table
= table_info
->cac_dtp_table
;
1831 hi_sidd
= (uint16_t)(cac_table
->usHighCACLeakage
/ 100 * 256);
1832 lo_sidd
= (uint16_t)(cac_table
->usLowCACLeakage
/ 100 * 256);
1834 smu_data
->power_tune_table
.BapmVddCBaseLeakageHiSidd
=
1835 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd
);
1836 smu_data
->power_tune_table
.BapmVddCBaseLeakageLoSidd
=
1837 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd
);
1842 static int vegam_populate_pm_fuses(struct pp_hwmgr
*hwmgr
)
1844 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1845 uint32_t pm_fuse_table_offset
;
1847 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1848 PHM_PlatformCaps_PowerContainment
)) {
1849 if (smu7_read_smc_sram_dword(hwmgr
,
1850 SMU7_FIRMWARE_HEADER_LOCATION
+
1851 offsetof(SMU75_Firmware_Header
, PmFuseTable
),
1852 &pm_fuse_table_offset
, SMC_RAM_END
))
1853 PP_ASSERT_WITH_CODE(false,
1854 "Attempt to get pm_fuse_table_offset Failed!",
1857 if (vegam_populate_svi_load_line(hwmgr
))
1858 PP_ASSERT_WITH_CODE(false,
1859 "Attempt to populate SviLoadLine Failed!",
1862 if (vegam_populate_tdc_limit(hwmgr
))
1863 PP_ASSERT_WITH_CODE(false,
1864 "Attempt to populate TDCLimit Failed!", return -EINVAL
);
1866 if (vegam_populate_dw8(hwmgr
, pm_fuse_table_offset
))
1867 PP_ASSERT_WITH_CODE(false,
1868 "Attempt to populate TdcWaterfallCtl, "
1869 "LPMLTemperature Min and Max Failed!",
1872 if (0 != vegam_populate_temperature_scaler(hwmgr
))
1873 PP_ASSERT_WITH_CODE(false,
1874 "Attempt to populate LPMLTemperatureScaler Failed!",
1877 if (vegam_populate_fuzzy_fan(hwmgr
))
1878 PP_ASSERT_WITH_CODE(false,
1879 "Attempt to populate Fuzzy Fan Control parameters Failed!",
1882 if (vegam_populate_gnb_lpml(hwmgr
))
1883 PP_ASSERT_WITH_CODE(false,
1884 "Attempt to populate GnbLPML Failed!",
1887 if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr
))
1888 PP_ASSERT_WITH_CODE(false,
1889 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
1890 "Sidd Failed!", return -EINVAL
);
1892 if (smu7_copy_bytes_to_smc(hwmgr
, pm_fuse_table_offset
,
1893 (uint8_t *)&smu_data
->power_tune_table
,
1894 (sizeof(struct SMU75_Discrete_PmFuses
) - PMFUSES_AVFSSIZE
),
1896 PP_ASSERT_WITH_CODE(false,
1897 "Attempt to download PmFuseTable Failed!",
1903 static int vegam_enable_reconfig_cus(struct pp_hwmgr
*hwmgr
)
1905 struct amdgpu_device
*adev
= hwmgr
->adev
;
1907 smum_send_msg_to_smc_with_parameter(hwmgr
,
1908 PPSMC_MSG_EnableModeSwitchRLCNotification
,
1909 adev
->gfx
.cu_info
.number
);
1914 static int vegam_init_smc_table(struct pp_hwmgr
*hwmgr
)
1917 struct smu7_hwmgr
*hw_data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
1918 struct vegam_smumgr
*smu_data
= (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
1920 struct phm_ppt_v1_information
*table_info
=
1921 (struct phm_ppt_v1_information
*)(hwmgr
->pptable
);
1922 struct SMU75_Discrete_DpmTable
*table
= &(smu_data
->smc_state_table
);
1924 struct pp_atomctrl_gpio_pin_assignment gpio_pin
;
1925 struct phm_ppt_v1_gpio_table
*gpio_table
=
1926 (struct phm_ppt_v1_gpio_table
*)table_info
->gpio_table
;
1927 pp_atomctrl_clock_dividers_vi dividers
;
1929 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
1930 PHM_PlatformCaps_AutomaticDCTransition
);
1932 vegam_initialize_power_tune_defaults(hwmgr
);
1934 if (SMU7_VOLTAGE_CONTROL_NONE
!= hw_data
->voltage_control
)
1935 vegam_populate_smc_voltage_tables(hwmgr
, table
);
1937 table
->SystemFlags
= 0;
1938 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1939 PHM_PlatformCaps_AutomaticDCTransition
))
1940 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
1942 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1943 PHM_PlatformCaps_StepVddc
))
1944 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
1946 if (hw_data
->is_memory_gddr5
)
1947 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
1949 if (hw_data
->ulv_supported
&& table_info
->us_ulv_voltage_offset
) {
1950 result
= vegam_populate_ulv_state(hwmgr
, table
);
1951 PP_ASSERT_WITH_CODE(!result
,
1952 "Failed to initialize ULV state!", return result
);
1953 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
1954 ixCG_ULV_PARAMETER
, SMU7_CGULVPARAMETER_DFLT
);
1957 result
= vegam_populate_smc_link_level(hwmgr
, table
);
1958 PP_ASSERT_WITH_CODE(!result
,
1959 "Failed to initialize Link Level!", return result
);
1961 result
= vegam_populate_all_graphic_levels(hwmgr
);
1962 PP_ASSERT_WITH_CODE(!result
,
1963 "Failed to initialize Graphics Level!", return result
);
1965 result
= vegam_populate_all_memory_levels(hwmgr
);
1966 PP_ASSERT_WITH_CODE(!result
,
1967 "Failed to initialize Memory Level!", return result
);
1969 result
= vegam_populate_smc_acpi_level(hwmgr
, table
);
1970 PP_ASSERT_WITH_CODE(!result
,
1971 "Failed to initialize ACPI Level!", return result
);
1973 result
= vegam_populate_smc_vce_level(hwmgr
, table
);
1974 PP_ASSERT_WITH_CODE(!result
,
1975 "Failed to initialize VCE Level!", return result
);
1977 /* Since only the initial state is completely set up at this point
1978 * (the other states are just copies of the boot state) we only
1979 * need to populate the ARB settings for the initial state.
1981 result
= vegam_program_memory_timing_parameters(hwmgr
);
1982 PP_ASSERT_WITH_CODE(!result
,
1983 "Failed to Write ARB settings for the initial state.", return result
);
1985 result
= vegam_populate_smc_uvd_level(hwmgr
, table
);
1986 PP_ASSERT_WITH_CODE(!result
,
1987 "Failed to initialize UVD Level!", return result
);
1989 result
= vegam_populate_smc_boot_level(hwmgr
, table
);
1990 PP_ASSERT_WITH_CODE(!result
,
1991 "Failed to initialize Boot Level!", return result
);
1993 result
= vegam_populate_smc_initial_state(hwmgr
);
1994 PP_ASSERT_WITH_CODE(!result
,
1995 "Failed to initialize Boot State!", return result
);
1997 result
= vegam_populate_bapm_parameters_in_dpm_table(hwmgr
);
1998 PP_ASSERT_WITH_CODE(!result
,
1999 "Failed to populate BAPM Parameters!", return result
);
2001 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2002 PHM_PlatformCaps_ClockStretcher
)) {
2003 result
= vegam_populate_clock_stretcher_data_table(hwmgr
);
2004 PP_ASSERT_WITH_CODE(!result
,
2005 "Failed to populate Clock Stretcher Data Table!",
2009 result
= vegam_populate_avfs_parameters(hwmgr
);
2010 PP_ASSERT_WITH_CODE(!result
,
2011 "Failed to populate AVFS Parameters!", return result
;);
2013 table
->CurrSclkPllRange
= 0xff;
2014 table
->GraphicsVoltageChangeEnable
= 1;
2015 table
->GraphicsThermThrottleEnable
= 1;
2016 table
->GraphicsInterval
= 1;
2017 table
->VoltageInterval
= 1;
2018 table
->ThermalInterval
= 1;
2019 table
->TemperatureLimitHigh
=
2020 table_info
->cac_dtp_table
->usTargetOperatingTemp
*
2021 SMU7_Q88_FORMAT_CONVERSION_UNIT
;
2022 table
->TemperatureLimitLow
=
2023 (table_info
->cac_dtp_table
->usTargetOperatingTemp
- 1) *
2024 SMU7_Q88_FORMAT_CONVERSION_UNIT
;
2025 table
->MemoryVoltageChangeEnable
= 1;
2026 table
->MemoryInterval
= 1;
2027 table
->VoltageResponseTime
= 0;
2028 table
->PhaseResponseTime
= 0;
2029 table
->MemoryThermThrottleEnable
= 1;
2031 PP_ASSERT_WITH_CODE(hw_data
->dpm_table
.pcie_speed_table
.count
>= 1,
2032 "There must be 1 or more PCIE levels defined in PPTable.",
2034 table
->PCIeBootLinkLevel
=
2035 hw_data
->dpm_table
.pcie_speed_table
.count
;
2036 table
->PCIeGenInterval
= 1;
2037 table
->VRConfig
= 0;
2039 result
= vegam_populate_vr_config(hwmgr
, table
);
2040 PP_ASSERT_WITH_CODE(!result
,
2041 "Failed to populate VRConfig setting!", return result
);
2043 table
->ThermGpio
= 17;
2044 table
->SclkStepSize
= 0x4000;
2046 if (atomctrl_get_pp_assign_pin(hwmgr
,
2047 VDDC_VRHOT_GPIO_PINID
, &gpio_pin
)) {
2048 table
->VRHotGpio
= gpio_pin
.uc_gpio_pin_bit_shift
;
2051 table_info
->gpio_table
->vrhot_triggered_sclk_dpm_index
;
2053 table
->VRHotGpio
= SMU7_UNUSED_GPIO_PIN
;
2054 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
2055 PHM_PlatformCaps_RegulatorHot
);
2058 if (atomctrl_get_pp_assign_pin(hwmgr
,
2059 PP_AC_DC_SWITCH_GPIO_PINID
, &gpio_pin
)) {
2060 table
->AcDcGpio
= gpio_pin
.uc_gpio_pin_bit_shift
;
2061 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2062 PHM_PlatformCaps_AutomaticDCTransition
) &&
2063 !smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_UseNewGPIOScheme
))
2064 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
2065 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
2067 table
->AcDcGpio
= SMU7_UNUSED_GPIO_PIN
;
2068 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
2069 PHM_PlatformCaps_AutomaticDCTransition
);
2072 /* Thermal Output GPIO */
2073 if (atomctrl_get_pp_assign_pin(hwmgr
,
2074 THERMAL_INT_OUTPUT_GPIO_PINID
, &gpio_pin
)) {
2075 table
->ThermOutGpio
= gpio_pin
.uc_gpio_pin_bit_shift
;
2077 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2078 * since VBIOS will program this register to set 'inactive state',
2079 * driver can then determine 'active state' from this and
2080 * program SMU with correct polarity
2082 table
->ThermOutPolarity
=
2083 (0 == (cgs_read_register(hwmgr
->device
, mmGPIOPAD_A
) &
2084 (1 << gpio_pin
.uc_gpio_pin_bit_shift
))) ? 1:0;
2085 table
->ThermOutMode
= SMU7_THERM_OUT_MODE_THERM_ONLY
;
2087 /* if required, combine VRHot/PCC with thermal out GPIO */
2088 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2089 PHM_PlatformCaps_RegulatorHot
) &&
2090 phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2091 PHM_PlatformCaps_CombinePCCWithThermalSignal
))
2092 table
->ThermOutMode
= SMU7_THERM_OUT_MODE_THERM_VRHOT
;
2094 table
->ThermOutGpio
= 17;
2095 table
->ThermOutPolarity
= 1;
2096 table
->ThermOutMode
= SMU7_THERM_OUT_MODE_DISABLE
;
2099 /* Populate BIF_SCLK levels into SMC DPM table */
2100 for (i
= 0; i
<= hw_data
->dpm_table
.pcie_speed_table
.count
; i
++) {
2101 result
= atomctrl_get_dfs_pll_dividers_vi(hwmgr
,
2102 smu_data
->bif_sclk_table
[i
], ÷rs
);
2103 PP_ASSERT_WITH_CODE(!result
,
2104 "Can not find DFS divide id for Sclk",
2108 table
->Ulv
.BifSclkDfs
=
2109 PP_HOST_TO_SMC_US((uint16_t)(dividers
.pll_post_divider
));
2111 table
->LinkLevel
[i
- 1].BifSclkDfs
=
2112 PP_HOST_TO_SMC_US((uint16_t)(dividers
.pll_post_divider
));
2115 for (i
= 0; i
< SMU75_MAX_ENTRIES_SMIO
; i
++)
2116 table
->Smio
[i
] = PP_HOST_TO_SMC_UL(table
->Smio
[i
]);
2118 CONVERT_FROM_HOST_TO_SMC_UL(table
->SystemFlags
);
2119 CONVERT_FROM_HOST_TO_SMC_UL(table
->VRConfig
);
2120 CONVERT_FROM_HOST_TO_SMC_UL(table
->SmioMask1
);
2121 CONVERT_FROM_HOST_TO_SMC_UL(table
->SmioMask2
);
2122 CONVERT_FROM_HOST_TO_SMC_UL(table
->SclkStepSize
);
2123 CONVERT_FROM_HOST_TO_SMC_UL(table
->CurrSclkPllRange
);
2124 CONVERT_FROM_HOST_TO_SMC_US(table
->TemperatureLimitHigh
);
2125 CONVERT_FROM_HOST_TO_SMC_US(table
->TemperatureLimitLow
);
2126 CONVERT_FROM_HOST_TO_SMC_US(table
->VoltageResponseTime
);
2127 CONVERT_FROM_HOST_TO_SMC_US(table
->PhaseResponseTime
);
2129 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2130 result
= smu7_copy_bytes_to_smc(hwmgr
,
2131 smu_data
->smu7_data
.dpm_table_start
+
2132 offsetof(SMU75_Discrete_DpmTable
, SystemFlags
),
2133 (uint8_t *)&(table
->SystemFlags
),
2134 sizeof(SMU75_Discrete_DpmTable
) - 3 * sizeof(SMU75_PIDController
),
2136 PP_ASSERT_WITH_CODE(!result
,
2137 "Failed to upload dpm data to SMC memory!", return result
);
2139 result
= vegam_populate_pm_fuses(hwmgr
);
2140 PP_ASSERT_WITH_CODE(!result
,
2141 "Failed to populate PM fuses to SMC memory!", return result
);
2143 result
= vegam_enable_reconfig_cus(hwmgr
);
2144 PP_ASSERT_WITH_CODE(!result
,
2145 "Failed to enable reconfigurable CUs!", return result
);
2150 static uint32_t vegam_get_offsetof(uint32_t type
, uint32_t member
)
2153 case SMU_SoftRegisters
:
2155 case HandshakeDisables
:
2156 return offsetof(SMU75_SoftRegisters
, HandshakeDisables
);
2157 case VoltageChangeTimeout
:
2158 return offsetof(SMU75_SoftRegisters
, VoltageChangeTimeout
);
2159 case AverageGraphicsActivity
:
2160 return offsetof(SMU75_SoftRegisters
, AverageGraphicsActivity
);
2161 case AverageMemoryActivity
:
2162 return offsetof(SMU75_SoftRegisters
, AverageMemoryActivity
);
2164 return offsetof(SMU75_SoftRegisters
, PreVBlankGap
);
2166 return offsetof(SMU75_SoftRegisters
, VBlankTimeout
);
2167 case UcodeLoadStatus
:
2168 return offsetof(SMU75_SoftRegisters
, UcodeLoadStatus
);
2169 case DRAM_LOG_ADDR_H
:
2170 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_ADDR_H
);
2171 case DRAM_LOG_ADDR_L
:
2172 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_ADDR_L
);
2173 case DRAM_LOG_PHY_ADDR_H
:
2174 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_PHY_ADDR_H
);
2175 case DRAM_LOG_PHY_ADDR_L
:
2176 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_PHY_ADDR_L
);
2177 case DRAM_LOG_BUFF_SIZE
:
2178 return offsetof(SMU75_SoftRegisters
, DRAM_LOG_BUFF_SIZE
);
2181 case SMU_Discrete_DpmTable
:
2184 return offsetof(SMU75_Discrete_DpmTable
, UvdBootLevel
);
2186 return offsetof(SMU75_Discrete_DpmTable
, VceBootLevel
);
2187 case LowSclkInterruptThreshold
:
2188 return offsetof(SMU75_Discrete_DpmTable
, LowSclkInterruptThreshold
);
2192 pr_warn("can't get the offset of type %x member %x\n", type
, member
);
2196 static int vegam_program_mem_timing_parameters(struct pp_hwmgr
*hwmgr
)
2198 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
2200 if (data
->need_update_smu7_dpm_table
&
2201 (DPMTABLE_OD_UPDATE_SCLK
+
2202 DPMTABLE_UPDATE_SCLK
+
2203 DPMTABLE_UPDATE_MCLK
))
2204 return vegam_program_memory_timing_parameters(hwmgr
);
2209 static int vegam_update_sclk_threshold(struct pp_hwmgr
*hwmgr
)
2211 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
2212 struct vegam_smumgr
*smu_data
=
2213 (struct vegam_smumgr
*)(hwmgr
->smu_backend
);
2215 uint32_t low_sclk_interrupt_threshold
= 0;
2217 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
2218 PHM_PlatformCaps_SclkThrottleLowNotification
)
2219 && (data
->low_sclk_interrupt_threshold
!= 0)) {
2220 low_sclk_interrupt_threshold
=
2221 data
->low_sclk_interrupt_threshold
;
2223 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold
);
2225 result
= smu7_copy_bytes_to_smc(
2227 smu_data
->smu7_data
.dpm_table_start
+
2228 offsetof(SMU75_Discrete_DpmTable
,
2229 LowSclkInterruptThreshold
),
2230 (uint8_t *)&low_sclk_interrupt_threshold
,
2234 PP_ASSERT_WITH_CODE((result
== 0),
2235 "Failed to update SCLK threshold!", return result
);
2237 result
= vegam_program_mem_timing_parameters(hwmgr
);
2238 PP_ASSERT_WITH_CODE((result
== 0),
2239 "Failed to program memory timing parameters!",
2245 int vegam_thermal_avfs_enable(struct pp_hwmgr
*hwmgr
)
2247 struct smu7_hwmgr
*data
= (struct smu7_hwmgr
*)(hwmgr
->backend
);
2250 if (!hwmgr
->avfs_supported
)
2253 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_EnableAvfs
);
2255 if (data
->apply_avfs_cks_off_voltage
)
2256 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ApplyAvfsCksOffVoltage
);
2262 static int vegam_thermal_setup_fan_table(struct pp_hwmgr
*hwmgr
)
2264 PP_ASSERT_WITH_CODE(hwmgr
->thermal_controller
.fanInfo
.bNoFan
,
2265 "VBIOS fan info is not correct!",
2267 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
2268 PHM_PlatformCaps_MicrocodeFanControl
);
2272 const struct pp_smumgr_func vegam_smu_funcs
= {
2273 .name
= "vegam_smu",
2274 .smu_init
= vegam_smu_init
,
2275 .smu_fini
= smu7_smu_fini
,
2276 .start_smu
= vegam_start_smu
,
2277 .check_fw_load_finish
= smu7_check_fw_load_finish
,
2278 .request_smu_load_fw
= smu7_reload_firmware
,
2279 .request_smu_load_specific_fw
= NULL
,
2280 .send_msg_to_smc
= smu7_send_msg_to_smc
,
2281 .send_msg_to_smc_with_parameter
= smu7_send_msg_to_smc_with_parameter
,
2282 .process_firmware_header
= vegam_process_firmware_header
,
2283 .is_dpm_running
= vegam_is_dpm_running
,
2284 .get_mac_definition
= vegam_get_mac_definition
,
2285 .update_smc_table
= vegam_update_smc_table
,
2286 .init_smc_table
= vegam_init_smc_table
,
2287 .get_offsetof
= vegam_get_offsetof
,
2288 .populate_all_graphic_levels
= vegam_populate_all_graphic_levels
,
2289 .populate_all_memory_levels
= vegam_populate_all_memory_levels
,
2290 .update_sclk_threshold
= vegam_update_sclk_threshold
,
2291 .is_hw_avfs_present
= vegam_is_hw_avfs_present
,
2292 .thermal_avfs_enable
= vegam_thermal_avfs_enable
,
2293 .thermal_setup_fan_table
= vegam_thermal_setup_fan_table
,