2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
28 #include "amdgpu_smu.h"
29 #include "smu_internal.h"
30 #include "atomfirmware.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "smu_v11_0.h"
33 #include "smu11_driver_if_navi10.h"
34 #include "soc15_common.h"
36 #include "navi10_ppt.h"
37 #include "smu_v11_0_pptable.h"
38 #include "smu_v11_0_ppsmc.h"
39 #include "nbio/nbio_7_4_sh_mask.h"
41 #include "asic_reg/mp/mp_11_0_sh_mask.h"
43 #define FEATURE_MASK(feature) (1ULL << feature)
44 #define SMC_DPM_FEATURE ( \
45 FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
46 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
47 FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \
48 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
49 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
50 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \
51 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
52 FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
54 #define MSG_MAP(msg, index) \
55 [SMU_MSG_##msg] = {1, (index)}
57 static struct smu_11_0_cmn2aisc_mapping navi10_message_map
[SMU_MSG_MAX_COUNT
] = {
58 MSG_MAP(TestMessage
, PPSMC_MSG_TestMessage
),
59 MSG_MAP(GetSmuVersion
, PPSMC_MSG_GetSmuVersion
),
60 MSG_MAP(GetDriverIfVersion
, PPSMC_MSG_GetDriverIfVersion
),
61 MSG_MAP(SetAllowedFeaturesMaskLow
, PPSMC_MSG_SetAllowedFeaturesMaskLow
),
62 MSG_MAP(SetAllowedFeaturesMaskHigh
, PPSMC_MSG_SetAllowedFeaturesMaskHigh
),
63 MSG_MAP(EnableAllSmuFeatures
, PPSMC_MSG_EnableAllSmuFeatures
),
64 MSG_MAP(DisableAllSmuFeatures
, PPSMC_MSG_DisableAllSmuFeatures
),
65 MSG_MAP(EnableSmuFeaturesLow
, PPSMC_MSG_EnableSmuFeaturesLow
),
66 MSG_MAP(EnableSmuFeaturesHigh
, PPSMC_MSG_EnableSmuFeaturesHigh
),
67 MSG_MAP(DisableSmuFeaturesLow
, PPSMC_MSG_DisableSmuFeaturesLow
),
68 MSG_MAP(DisableSmuFeaturesHigh
, PPSMC_MSG_DisableSmuFeaturesHigh
),
69 MSG_MAP(GetEnabledSmuFeaturesLow
, PPSMC_MSG_GetEnabledSmuFeaturesLow
),
70 MSG_MAP(GetEnabledSmuFeaturesHigh
, PPSMC_MSG_GetEnabledSmuFeaturesHigh
),
71 MSG_MAP(SetWorkloadMask
, PPSMC_MSG_SetWorkloadMask
),
72 MSG_MAP(SetPptLimit
, PPSMC_MSG_SetPptLimit
),
73 MSG_MAP(SetDriverDramAddrHigh
, PPSMC_MSG_SetDriverDramAddrHigh
),
74 MSG_MAP(SetDriverDramAddrLow
, PPSMC_MSG_SetDriverDramAddrLow
),
75 MSG_MAP(SetToolsDramAddrHigh
, PPSMC_MSG_SetToolsDramAddrHigh
),
76 MSG_MAP(SetToolsDramAddrLow
, PPSMC_MSG_SetToolsDramAddrLow
),
77 MSG_MAP(TransferTableSmu2Dram
, PPSMC_MSG_TransferTableSmu2Dram
),
78 MSG_MAP(TransferTableDram2Smu
, PPSMC_MSG_TransferTableDram2Smu
),
79 MSG_MAP(UseDefaultPPTable
, PPSMC_MSG_UseDefaultPPTable
),
80 MSG_MAP(UseBackupPPTable
, PPSMC_MSG_UseBackupPPTable
),
81 MSG_MAP(RunBtc
, PPSMC_MSG_RunBtc
),
82 MSG_MAP(EnterBaco
, PPSMC_MSG_EnterBaco
),
83 MSG_MAP(SetSoftMinByFreq
, PPSMC_MSG_SetSoftMinByFreq
),
84 MSG_MAP(SetSoftMaxByFreq
, PPSMC_MSG_SetSoftMaxByFreq
),
85 MSG_MAP(SetHardMinByFreq
, PPSMC_MSG_SetHardMinByFreq
),
86 MSG_MAP(SetHardMaxByFreq
, PPSMC_MSG_SetHardMaxByFreq
),
87 MSG_MAP(GetMinDpmFreq
, PPSMC_MSG_GetMinDpmFreq
),
88 MSG_MAP(GetMaxDpmFreq
, PPSMC_MSG_GetMaxDpmFreq
),
89 MSG_MAP(GetDpmFreqByIndex
, PPSMC_MSG_GetDpmFreqByIndex
),
90 MSG_MAP(SetMemoryChannelConfig
, PPSMC_MSG_SetMemoryChannelConfig
),
91 MSG_MAP(SetGeminiMode
, PPSMC_MSG_SetGeminiMode
),
92 MSG_MAP(SetGeminiApertureHigh
, PPSMC_MSG_SetGeminiApertureHigh
),
93 MSG_MAP(SetGeminiApertureLow
, PPSMC_MSG_SetGeminiApertureLow
),
94 MSG_MAP(OverridePcieParameters
, PPSMC_MSG_OverridePcieParameters
),
95 MSG_MAP(SetMinDeepSleepDcefclk
, PPSMC_MSG_SetMinDeepSleepDcefclk
),
96 MSG_MAP(ReenableAcDcInterrupt
, PPSMC_MSG_ReenableAcDcInterrupt
),
97 MSG_MAP(NotifyPowerSource
, PPSMC_MSG_NotifyPowerSource
),
98 MSG_MAP(SetUclkFastSwitch
, PPSMC_MSG_SetUclkFastSwitch
),
99 MSG_MAP(SetVideoFps
, PPSMC_MSG_SetVideoFps
),
100 MSG_MAP(PrepareMp1ForUnload
, PPSMC_MSG_PrepareMp1ForUnload
),
101 MSG_MAP(DramLogSetDramAddrHigh
, PPSMC_MSG_DramLogSetDramAddrHigh
),
102 MSG_MAP(DramLogSetDramAddrLow
, PPSMC_MSG_DramLogSetDramAddrLow
),
103 MSG_MAP(DramLogSetDramSize
, PPSMC_MSG_DramLogSetDramSize
),
104 MSG_MAP(ConfigureGfxDidt
, PPSMC_MSG_ConfigureGfxDidt
),
105 MSG_MAP(NumOfDisplays
, PPSMC_MSG_NumOfDisplays
),
106 MSG_MAP(SetSystemVirtualDramAddrHigh
, PPSMC_MSG_SetSystemVirtualDramAddrHigh
),
107 MSG_MAP(SetSystemVirtualDramAddrLow
, PPSMC_MSG_SetSystemVirtualDramAddrLow
),
108 MSG_MAP(AllowGfxOff
, PPSMC_MSG_AllowGfxOff
),
109 MSG_MAP(DisallowGfxOff
, PPSMC_MSG_DisallowGfxOff
),
110 MSG_MAP(GetPptLimit
, PPSMC_MSG_GetPptLimit
),
111 MSG_MAP(GetDcModeMaxDpmFreq
, PPSMC_MSG_GetDcModeMaxDpmFreq
),
112 MSG_MAP(GetDebugData
, PPSMC_MSG_GetDebugData
),
113 MSG_MAP(ExitBaco
, PPSMC_MSG_ExitBaco
),
114 MSG_MAP(PrepareMp1ForReset
, PPSMC_MSG_PrepareMp1ForReset
),
115 MSG_MAP(PrepareMp1ForShutdown
, PPSMC_MSG_PrepareMp1ForShutdown
),
116 MSG_MAP(PowerUpVcn
, PPSMC_MSG_PowerUpVcn
),
117 MSG_MAP(PowerDownVcn
, PPSMC_MSG_PowerDownVcn
),
118 MSG_MAP(PowerUpJpeg
, PPSMC_MSG_PowerUpJpeg
),
119 MSG_MAP(PowerDownJpeg
, PPSMC_MSG_PowerDownJpeg
),
120 MSG_MAP(BacoAudioD3PME
, PPSMC_MSG_BacoAudioD3PME
),
121 MSG_MAP(ArmD3
, PPSMC_MSG_ArmD3
),
124 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map
[SMU_CLK_COUNT
] = {
125 CLK_MAP(GFXCLK
, PPCLK_GFXCLK
),
126 CLK_MAP(SCLK
, PPCLK_GFXCLK
),
127 CLK_MAP(SOCCLK
, PPCLK_SOCCLK
),
128 CLK_MAP(FCLK
, PPCLK_SOCCLK
),
129 CLK_MAP(UCLK
, PPCLK_UCLK
),
130 CLK_MAP(MCLK
, PPCLK_UCLK
),
131 CLK_MAP(DCLK
, PPCLK_DCLK
),
132 CLK_MAP(VCLK
, PPCLK_VCLK
),
133 CLK_MAP(DCEFCLK
, PPCLK_DCEFCLK
),
134 CLK_MAP(DISPCLK
, PPCLK_DISPCLK
),
135 CLK_MAP(PIXCLK
, PPCLK_PIXCLK
),
136 CLK_MAP(PHYCLK
, PPCLK_PHYCLK
),
139 static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map
[SMU_FEATURE_COUNT
] = {
140 FEA_MAP(DPM_PREFETCHER
),
142 FEA_MAP(DPM_GFX_PACE
),
147 FEA_MAP(DPM_DCEFCLK
),
148 FEA_MAP(MEM_VDDCI_SCALING
),
149 FEA_MAP(MEM_MVDD_SCALING
),
162 FEA_MAP(RSMU_SMN_CG
),
172 FEA_MAP(FAN_CONTROL
),
176 FEA_MAP(LED_DISPLAY
),
178 FEA_MAP(OUT_OF_BAND_MONITOR
),
179 FEA_MAP(TEMP_DEPENDENT_VMIN
),
185 static struct smu_11_0_cmn2aisc_mapping navi10_table_map
[SMU_TABLE_COUNT
] = {
189 TAB_MAP(AVFS_PSM_DEBUG
),
190 TAB_MAP(AVFS_FUSE_OVERRIDE
),
191 TAB_MAP(PMSTATUSLOG
),
192 TAB_MAP(SMU_METRICS
),
193 TAB_MAP(DRIVER_SMU_CONFIG
),
194 TAB_MAP(ACTIVITY_MONITOR_COEFF
),
196 TAB_MAP(I2C_COMMANDS
),
200 static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map
[SMU_POWER_SOURCE_COUNT
] = {
205 static struct smu_11_0_cmn2aisc_mapping navi10_workload_map
[PP_SMC_POWER_PROFILE_COUNT
] = {
206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
, WORKLOAD_PPLIB_DEFAULT_BIT
),
207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D
, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT
),
208 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING
, WORKLOAD_PPLIB_POWER_SAVING_BIT
),
209 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO
, WORKLOAD_PPLIB_VIDEO_BIT
),
210 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR
, WORKLOAD_PPLIB_VR_BIT
),
211 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE
, WORKLOAD_PPLIB_COMPUTE_BIT
),
212 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM
, WORKLOAD_PPLIB_CUSTOM_BIT
),
215 static int navi10_get_smu_msg_index(struct smu_context
*smc
, uint32_t index
)
217 struct smu_11_0_cmn2aisc_mapping mapping
;
219 if (index
>= SMU_MSG_MAX_COUNT
)
222 mapping
= navi10_message_map
[index
];
223 if (!(mapping
.valid_mapping
)) {
227 return mapping
.map_to
;
230 static int navi10_get_smu_clk_index(struct smu_context
*smc
, uint32_t index
)
232 struct smu_11_0_cmn2aisc_mapping mapping
;
234 if (index
>= SMU_CLK_COUNT
)
237 mapping
= navi10_clk_map
[index
];
238 if (!(mapping
.valid_mapping
)) {
242 return mapping
.map_to
;
245 static int navi10_get_smu_feature_index(struct smu_context
*smc
, uint32_t index
)
247 struct smu_11_0_cmn2aisc_mapping mapping
;
249 if (index
>= SMU_FEATURE_COUNT
)
252 mapping
= navi10_feature_mask_map
[index
];
253 if (!(mapping
.valid_mapping
)) {
257 return mapping
.map_to
;
260 static int navi10_get_smu_table_index(struct smu_context
*smc
, uint32_t index
)
262 struct smu_11_0_cmn2aisc_mapping mapping
;
264 if (index
>= SMU_TABLE_COUNT
)
267 mapping
= navi10_table_map
[index
];
268 if (!(mapping
.valid_mapping
)) {
272 return mapping
.map_to
;
275 static int navi10_get_pwr_src_index(struct smu_context
*smc
, uint32_t index
)
277 struct smu_11_0_cmn2aisc_mapping mapping
;
279 if (index
>= SMU_POWER_SOURCE_COUNT
)
282 mapping
= navi10_pwr_src_map
[index
];
283 if (!(mapping
.valid_mapping
)) {
287 return mapping
.map_to
;
291 static int navi10_get_workload_type(struct smu_context
*smu
, enum PP_SMC_POWER_PROFILE profile
)
293 struct smu_11_0_cmn2aisc_mapping mapping
;
295 if (profile
> PP_SMC_POWER_PROFILE_CUSTOM
)
298 mapping
= navi10_workload_map
[profile
];
299 if (!(mapping
.valid_mapping
)) {
303 return mapping
.map_to
;
306 static bool is_asic_secure(struct smu_context
*smu
)
308 struct amdgpu_device
*adev
= smu
->adev
;
309 bool is_secure
= true;
310 uint32_t mp0_fw_intf
;
312 mp0_fw_intf
= RREG32_PCIE(MP0_Public
|
313 (smnMP0_FW_INTF
& 0xffffffff));
315 if (!(mp0_fw_intf
& (1 << 19)))
322 navi10_get_allowed_feature_mask(struct smu_context
*smu
,
323 uint32_t *feature_mask
, uint32_t num
)
325 struct amdgpu_device
*adev
= smu
->adev
;
330 memset(feature_mask
, 0, sizeof(uint32_t) * num
);
332 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT
)
333 | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT
)
334 | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT
)
335 | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT
)
336 | FEATURE_MASK(FEATURE_PPT_BIT
)
337 | FEATURE_MASK(FEATURE_TDC_BIT
)
338 | FEATURE_MASK(FEATURE_GFX_EDC_BIT
)
339 | FEATURE_MASK(FEATURE_APCC_PLUS_BIT
)
340 | FEATURE_MASK(FEATURE_VR0HOT_BIT
)
341 | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT
)
342 | FEATURE_MASK(FEATURE_THERMAL_BIT
)
343 | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT
)
344 | FEATURE_MASK(FEATURE_DS_LCLK_BIT
)
345 | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT
)
346 | FEATURE_MASK(FEATURE_FW_DSTATE_BIT
)
347 | FEATURE_MASK(FEATURE_BACO_BIT
)
348 | FEATURE_MASK(FEATURE_ACDC_BIT
)
349 | FEATURE_MASK(FEATURE_GFX_SS_BIT
)
350 | FEATURE_MASK(FEATURE_APCC_DFLL_BIT
)
351 | FEATURE_MASK(FEATURE_FW_CTF_BIT
)
352 | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT
);
354 if (adev
->pm
.pp_feature
& PP_SOCCLK_DPM_MASK
)
355 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT
);
357 if (adev
->pm
.pp_feature
& PP_SCLK_DPM_MASK
)
358 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT
);
360 if (adev
->pm
.pp_feature
& PP_PCIE_DPM_MASK
)
361 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_LINK_BIT
);
363 if (adev
->pm
.pp_feature
& PP_DCEFCLK_DPM_MASK
)
364 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT
);
366 if (adev
->pm
.pp_feature
& PP_MCLK_DPM_MASK
)
367 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_UCLK_BIT
)
368 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT
)
369 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT
);
371 if (adev
->pm
.pp_feature
& PP_ULV_MASK
)
372 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_GFX_ULV_BIT
);
374 if (adev
->pm
.pp_feature
& PP_SCLK_DEEP_SLEEP_MASK
)
375 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT
);
377 if (adev
->pm
.pp_feature
& PP_GFXOFF_MASK
)
378 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_GFXOFF_BIT
);
380 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_MMHUB
)
381 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_MMHUB_PG_BIT
);
383 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_ATHUB
)
384 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_ATHUB_PG_BIT
);
386 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_VCN
)
387 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_VCN_PG_BIT
);
389 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_JPEG
)
390 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_JPEG_PG_BIT
);
392 /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
393 if (is_asic_secure(smu
)) {
394 /* only for navi10 A0 */
395 if ((adev
->asic_type
== CHIP_NAVI10
) &&
396 (adev
->rev_id
== 0)) {
397 *(uint64_t *)feature_mask
&=
398 ~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT
)
399 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT
)
400 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT
));
401 *(uint64_t *)feature_mask
&=
402 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT
);
409 static int navi10_check_powerplay_table(struct smu_context
*smu
)
414 static int navi10_append_powerplay_table(struct smu_context
*smu
)
416 struct amdgpu_device
*adev
= smu
->adev
;
417 struct smu_table_context
*table_context
= &smu
->smu_table
;
418 PPTable_t
*smc_pptable
= table_context
->driver_pptable
;
419 struct atom_smc_dpm_info_v4_5
*smc_dpm_table
;
422 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
425 ret
= smu_get_atom_data_table(smu
, index
, NULL
, NULL
, NULL
,
426 (uint8_t **)&smc_dpm_table
);
430 memcpy(smc_pptable
->I2cControllers
, smc_dpm_table
->I2cControllers
,
431 sizeof(I2cControllerConfig_t
) * NUM_I2C_CONTROLLERS
);
433 /* SVI2 Board Parameters */
434 smc_pptable
->MaxVoltageStepGfx
= smc_dpm_table
->MaxVoltageStepGfx
;
435 smc_pptable
->MaxVoltageStepSoc
= smc_dpm_table
->MaxVoltageStepSoc
;
436 smc_pptable
->VddGfxVrMapping
= smc_dpm_table
->VddGfxVrMapping
;
437 smc_pptable
->VddSocVrMapping
= smc_dpm_table
->VddSocVrMapping
;
438 smc_pptable
->VddMem0VrMapping
= smc_dpm_table
->VddMem0VrMapping
;
439 smc_pptable
->VddMem1VrMapping
= smc_dpm_table
->VddMem1VrMapping
;
440 smc_pptable
->GfxUlvPhaseSheddingMask
= smc_dpm_table
->GfxUlvPhaseSheddingMask
;
441 smc_pptable
->SocUlvPhaseSheddingMask
= smc_dpm_table
->SocUlvPhaseSheddingMask
;
442 smc_pptable
->ExternalSensorPresent
= smc_dpm_table
->ExternalSensorPresent
;
443 smc_pptable
->Padding8_V
= smc_dpm_table
->Padding8_V
;
445 /* Telemetry Settings */
446 smc_pptable
->GfxMaxCurrent
= smc_dpm_table
->GfxMaxCurrent
;
447 smc_pptable
->GfxOffset
= smc_dpm_table
->GfxOffset
;
448 smc_pptable
->Padding_TelemetryGfx
= smc_dpm_table
->Padding_TelemetryGfx
;
449 smc_pptable
->SocMaxCurrent
= smc_dpm_table
->SocMaxCurrent
;
450 smc_pptable
->SocOffset
= smc_dpm_table
->SocOffset
;
451 smc_pptable
->Padding_TelemetrySoc
= smc_dpm_table
->Padding_TelemetrySoc
;
452 smc_pptable
->Mem0MaxCurrent
= smc_dpm_table
->Mem0MaxCurrent
;
453 smc_pptable
->Mem0Offset
= smc_dpm_table
->Mem0Offset
;
454 smc_pptable
->Padding_TelemetryMem0
= smc_dpm_table
->Padding_TelemetryMem0
;
455 smc_pptable
->Mem1MaxCurrent
= smc_dpm_table
->Mem1MaxCurrent
;
456 smc_pptable
->Mem1Offset
= smc_dpm_table
->Mem1Offset
;
457 smc_pptable
->Padding_TelemetryMem1
= smc_dpm_table
->Padding_TelemetryMem1
;
460 smc_pptable
->AcDcGpio
= smc_dpm_table
->AcDcGpio
;
461 smc_pptable
->AcDcPolarity
= smc_dpm_table
->AcDcPolarity
;
462 smc_pptable
->VR0HotGpio
= smc_dpm_table
->VR0HotGpio
;
463 smc_pptable
->VR0HotPolarity
= smc_dpm_table
->VR0HotPolarity
;
464 smc_pptable
->VR1HotGpio
= smc_dpm_table
->VR1HotGpio
;
465 smc_pptable
->VR1HotPolarity
= smc_dpm_table
->VR1HotPolarity
;
466 smc_pptable
->GthrGpio
= smc_dpm_table
->GthrGpio
;
467 smc_pptable
->GthrPolarity
= smc_dpm_table
->GthrPolarity
;
469 /* LED Display Settings */
470 smc_pptable
->LedPin0
= smc_dpm_table
->LedPin0
;
471 smc_pptable
->LedPin1
= smc_dpm_table
->LedPin1
;
472 smc_pptable
->LedPin2
= smc_dpm_table
->LedPin2
;
473 smc_pptable
->padding8_4
= smc_dpm_table
->padding8_4
;
475 /* GFXCLK PLL Spread Spectrum */
476 smc_pptable
->PllGfxclkSpreadEnabled
= smc_dpm_table
->PllGfxclkSpreadEnabled
;
477 smc_pptable
->PllGfxclkSpreadPercent
= smc_dpm_table
->PllGfxclkSpreadPercent
;
478 smc_pptable
->PllGfxclkSpreadFreq
= smc_dpm_table
->PllGfxclkSpreadFreq
;
480 /* GFXCLK DFLL Spread Spectrum */
481 smc_pptable
->DfllGfxclkSpreadEnabled
= smc_dpm_table
->DfllGfxclkSpreadEnabled
;
482 smc_pptable
->DfllGfxclkSpreadPercent
= smc_dpm_table
->DfllGfxclkSpreadPercent
;
483 smc_pptable
->DfllGfxclkSpreadFreq
= smc_dpm_table
->DfllGfxclkSpreadFreq
;
485 /* UCLK Spread Spectrum */
486 smc_pptable
->UclkSpreadEnabled
= smc_dpm_table
->UclkSpreadEnabled
;
487 smc_pptable
->UclkSpreadPercent
= smc_dpm_table
->UclkSpreadPercent
;
488 smc_pptable
->UclkSpreadFreq
= smc_dpm_table
->UclkSpreadFreq
;
490 /* SOCCLK Spread Spectrum */
491 smc_pptable
->SoclkSpreadEnabled
= smc_dpm_table
->SoclkSpreadEnabled
;
492 smc_pptable
->SocclkSpreadPercent
= smc_dpm_table
->SocclkSpreadPercent
;
493 smc_pptable
->SocclkSpreadFreq
= smc_dpm_table
->SocclkSpreadFreq
;
495 /* Total board power */
496 smc_pptable
->TotalBoardPower
= smc_dpm_table
->TotalBoardPower
;
497 smc_pptable
->BoardPadding
= smc_dpm_table
->BoardPadding
;
499 /* Mvdd Svi2 Div Ratio Setting */
500 smc_pptable
->MvddRatio
= smc_dpm_table
->MvddRatio
;
502 if (adev
->pm
.pp_feature
& PP_GFXOFF_MASK
) {
503 /* TODO: remove it once SMU fw fix it */
504 smc_pptable
->DebugOverrides
|= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN
;
510 static int navi10_store_powerplay_table(struct smu_context
*smu
)
512 struct smu_11_0_powerplay_table
*powerplay_table
= NULL
;
513 struct smu_table_context
*table_context
= &smu
->smu_table
;
514 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
516 if (!table_context
->power_play_table
)
519 powerplay_table
= table_context
->power_play_table
;
521 memcpy(table_context
->driver_pptable
, &powerplay_table
->smc_pptable
,
524 table_context
->thermal_controller_type
= powerplay_table
->thermal_controller_type
;
526 mutex_lock(&smu_baco
->mutex
);
527 if (powerplay_table
->platform_caps
& SMU_11_0_PP_PLATFORM_CAP_BACO
||
528 powerplay_table
->platform_caps
& SMU_11_0_PP_PLATFORM_CAP_MACO
)
529 smu_baco
->platform_support
= true;
530 mutex_unlock(&smu_baco
->mutex
);
535 static int navi10_tables_init(struct smu_context
*smu
, struct smu_table
*tables
)
537 struct smu_table_context
*smu_table
= &smu
->smu_table
;
539 SMU_TABLE_INIT(tables
, SMU_TABLE_PPTABLE
, sizeof(PPTable_t
),
540 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
541 SMU_TABLE_INIT(tables
, SMU_TABLE_WATERMARKS
, sizeof(Watermarks_t
),
542 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
543 SMU_TABLE_INIT(tables
, SMU_TABLE_SMU_METRICS
, sizeof(SmuMetrics_t
),
544 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
545 SMU_TABLE_INIT(tables
, SMU_TABLE_OVERDRIVE
, sizeof(OverDriveTable_t
),
546 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
547 SMU_TABLE_INIT(tables
, SMU_TABLE_PMSTATUSLOG
, SMU11_TOOL_SIZE
,
548 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
549 SMU_TABLE_INIT(tables
, SMU_TABLE_ACTIVITY_MONITOR_COEFF
,
550 sizeof(DpmActivityMonitorCoeffInt_t
), PAGE_SIZE
,
551 AMDGPU_GEM_DOMAIN_VRAM
);
553 smu_table
->metrics_table
= kzalloc(sizeof(SmuMetrics_t
), GFP_KERNEL
);
554 if (!smu_table
->metrics_table
)
556 smu_table
->metrics_time
= 0;
558 smu_table
->watermarks_table
= kzalloc(sizeof(Watermarks_t
), GFP_KERNEL
);
559 if (!smu_table
->watermarks_table
)
565 static int navi10_get_metrics_table(struct smu_context
*smu
,
566 SmuMetrics_t
*metrics_table
)
568 struct smu_table_context
*smu_table
= &smu
->smu_table
;
571 mutex_lock(&smu
->metrics_lock
);
572 if (!smu_table
->metrics_time
|| time_after(jiffies
, smu_table
->metrics_time
+ msecs_to_jiffies(100))) {
573 ret
= smu_update_table(smu
, SMU_TABLE_SMU_METRICS
, 0,
574 (void *)smu_table
->metrics_table
, false);
576 pr_info("Failed to export SMU metrics table!\n");
577 mutex_unlock(&smu
->metrics_lock
);
580 smu_table
->metrics_time
= jiffies
;
583 memcpy(metrics_table
, smu_table
->metrics_table
, sizeof(SmuMetrics_t
));
584 mutex_unlock(&smu
->metrics_lock
);
589 static int navi10_allocate_dpm_context(struct smu_context
*smu
)
591 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
593 if (smu_dpm
->dpm_context
)
596 smu_dpm
->dpm_context
= kzalloc(sizeof(struct smu_11_0_dpm_context
),
598 if (!smu_dpm
->dpm_context
)
601 smu_dpm
->dpm_context_size
= sizeof(struct smu_11_0_dpm_context
);
606 static int navi10_set_default_dpm_table(struct smu_context
*smu
)
608 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
609 struct smu_table_context
*table_context
= &smu
->smu_table
;
610 struct smu_11_0_dpm_context
*dpm_context
= smu_dpm
->dpm_context
;
611 PPTable_t
*driver_ppt
= NULL
;
614 driver_ppt
= table_context
->driver_pptable
;
616 dpm_context
->dpm_tables
.soc_table
.min
= driver_ppt
->FreqTableSocclk
[0];
617 dpm_context
->dpm_tables
.soc_table
.max
= driver_ppt
->FreqTableSocclk
[NUM_SOCCLK_DPM_LEVELS
- 1];
619 dpm_context
->dpm_tables
.gfx_table
.min
= driver_ppt
->FreqTableGfx
[0];
620 dpm_context
->dpm_tables
.gfx_table
.max
= driver_ppt
->FreqTableGfx
[NUM_GFXCLK_DPM_LEVELS
- 1];
622 dpm_context
->dpm_tables
.uclk_table
.min
= driver_ppt
->FreqTableUclk
[0];
623 dpm_context
->dpm_tables
.uclk_table
.max
= driver_ppt
->FreqTableUclk
[NUM_UCLK_DPM_LEVELS
- 1];
625 dpm_context
->dpm_tables
.vclk_table
.min
= driver_ppt
->FreqTableVclk
[0];
626 dpm_context
->dpm_tables
.vclk_table
.max
= driver_ppt
->FreqTableVclk
[NUM_VCLK_DPM_LEVELS
- 1];
628 dpm_context
->dpm_tables
.dclk_table
.min
= driver_ppt
->FreqTableDclk
[0];
629 dpm_context
->dpm_tables
.dclk_table
.max
= driver_ppt
->FreqTableDclk
[NUM_DCLK_DPM_LEVELS
- 1];
631 dpm_context
->dpm_tables
.dcef_table
.min
= driver_ppt
->FreqTableDcefclk
[0];
632 dpm_context
->dpm_tables
.dcef_table
.max
= driver_ppt
->FreqTableDcefclk
[NUM_DCEFCLK_DPM_LEVELS
- 1];
634 dpm_context
->dpm_tables
.pixel_table
.min
= driver_ppt
->FreqTablePixclk
[0];
635 dpm_context
->dpm_tables
.pixel_table
.max
= driver_ppt
->FreqTablePixclk
[NUM_PIXCLK_DPM_LEVELS
- 1];
637 dpm_context
->dpm_tables
.display_table
.min
= driver_ppt
->FreqTableDispclk
[0];
638 dpm_context
->dpm_tables
.display_table
.max
= driver_ppt
->FreqTableDispclk
[NUM_DISPCLK_DPM_LEVELS
- 1];
640 dpm_context
->dpm_tables
.phy_table
.min
= driver_ppt
->FreqTablePhyclk
[0];
641 dpm_context
->dpm_tables
.phy_table
.max
= driver_ppt
->FreqTablePhyclk
[NUM_PHYCLK_DPM_LEVELS
- 1];
643 for (i
= 0; i
< MAX_PCIE_CONF
; i
++) {
644 dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
] = driver_ppt
->PcieGenSpeed
[i
];
645 dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] = driver_ppt
->PcieLaneCount
[i
];
651 static int navi10_dpm_set_uvd_enable(struct smu_context
*smu
, bool enable
)
653 struct smu_power_context
*smu_power
= &smu
->smu_power
;
654 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
658 /* vcn dpm on is a prerequisite for vcn power gate messages */
659 if (smu_feature_is_enabled(smu
, SMU_FEATURE_VCN_PG_BIT
)) {
660 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerUpVcn
, 1);
664 power_gate
->vcn_gated
= false;
666 if (smu_feature_is_enabled(smu
, SMU_FEATURE_VCN_PG_BIT
)) {
667 ret
= smu_send_smc_msg(smu
, SMU_MSG_PowerDownVcn
);
671 power_gate
->vcn_gated
= true;
677 static int navi10_dpm_set_jpeg_enable(struct smu_context
*smu
, bool enable
)
679 struct smu_power_context
*smu_power
= &smu
->smu_power
;
680 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
684 if (smu_feature_is_enabled(smu
, SMU_FEATURE_JPEG_PG_BIT
)) {
685 ret
= smu_send_smc_msg(smu
, SMU_MSG_PowerUpJpeg
);
689 power_gate
->jpeg_gated
= false;
691 if (smu_feature_is_enabled(smu
, SMU_FEATURE_JPEG_PG_BIT
)) {
692 ret
= smu_send_smc_msg(smu
, SMU_MSG_PowerDownJpeg
);
696 power_gate
->jpeg_gated
= true;
702 static int navi10_get_current_clk_freq_by_table(struct smu_context
*smu
,
703 enum smu_clk_type clk_type
,
706 int ret
= 0, clk_id
= 0;
707 SmuMetrics_t metrics
;
709 ret
= navi10_get_metrics_table(smu
, &metrics
);
713 clk_id
= smu_clk_get_index(smu
, clk_type
);
717 *value
= metrics
.CurrClock
[clk_id
];
722 static bool navi10_is_support_fine_grained_dpm(struct smu_context
*smu
, enum smu_clk_type clk_type
)
724 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
725 DpmDescriptor_t
*dpm_desc
= NULL
;
726 uint32_t clk_index
= 0;
728 clk_index
= smu_clk_get_index(smu
, clk_type
);
729 dpm_desc
= &pptable
->DpmDescriptor
[clk_index
];
731 /* 0 - Fine grained DPM, 1 - Discrete DPM */
732 return dpm_desc
->SnapToDiscrete
== 0 ? true : false;
735 static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table
*od_table
, enum SMU_11_0_ODFEATURE_ID feature
)
737 return od_table
->cap
[feature
];
741 static int navi10_print_clk_levels(struct smu_context
*smu
,
742 enum smu_clk_type clk_type
, char *buf
)
744 uint16_t *curve_settings
;
745 int i
, size
= 0, ret
= 0;
746 uint32_t cur_value
= 0, value
= 0, count
= 0;
747 uint32_t freq_values
[3] = {0};
748 uint32_t mark_index
= 0;
749 struct smu_table_context
*table_context
= &smu
->smu_table
;
750 uint32_t gen_speed
, lane_width
;
751 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
752 struct smu_11_0_dpm_context
*dpm_context
= smu_dpm
->dpm_context
;
753 struct amdgpu_device
*adev
= smu
->adev
;
754 PPTable_t
*pptable
= (PPTable_t
*)table_context
->driver_pptable
;
755 OverDriveTable_t
*od_table
=
756 (OverDriveTable_t
*)table_context
->overdrive_table
;
757 struct smu_11_0_overdrive_table
*od_settings
= smu
->od_settings
;
767 ret
= smu_get_current_clk_freq(smu
, clk_type
, &cur_value
);
772 cur_value
= cur_value
/ 100;
774 ret
= smu_get_dpm_level_count(smu
, clk_type
, &count
);
778 if (!navi10_is_support_fine_grained_dpm(smu
, clk_type
)) {
779 for (i
= 0; i
< count
; i
++) {
780 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, i
, &value
);
784 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n", i
, value
,
785 cur_value
== value
? "*" : "");
788 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, 0, &freq_values
[0]);
791 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, count
- 1, &freq_values
[2]);
795 freq_values
[1] = cur_value
;
796 mark_index
= cur_value
== freq_values
[0] ? 0 :
797 cur_value
== freq_values
[2] ? 2 : 1;
799 freq_values
[1] = (freq_values
[0] + freq_values
[2]) / 2;
801 for (i
= 0; i
< 3; i
++) {
802 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n", i
, freq_values
[i
],
803 i
== mark_index
? "*" : "");
809 gen_speed
= (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL
) &
810 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK
)
811 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT
;
812 lane_width
= (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL
) &
813 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK
)
814 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT
;
815 for (i
= 0; i
< NUM_LINK_LEVELS
; i
++)
816 size
+= sprintf(buf
+ size
, "%d: %s %s %dMhz %s\n", i
,
817 (dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
] == 0) ? "2.5GT/s," :
818 (dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
] == 1) ? "5.0GT/s," :
819 (dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
] == 2) ? "8.0GT/s," :
820 (dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
] == 3) ? "16.0GT/s," : "",
821 (dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] == 1) ? "x1" :
822 (dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] == 2) ? "x2" :
823 (dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] == 3) ? "x4" :
824 (dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] == 4) ? "x8" :
825 (dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] == 5) ? "x12" :
826 (dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] == 6) ? "x16" : "",
827 pptable
->LclkFreq
[i
],
828 (gen_speed
== dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
]) &&
829 (lane_width
== dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
]) ?
833 if (!smu
->od_enabled
|| !od_table
|| !od_settings
)
835 if (!navi10_od_feature_is_supported(od_settings
, SMU_11_0_ODFEATURE_GFXCLK_LIMITS
))
837 size
+= sprintf(buf
+ size
, "OD_SCLK:\n");
838 size
+= sprintf(buf
+ size
, "0: %uMhz\n1: %uMhz\n", od_table
->GfxclkFmin
, od_table
->GfxclkFmax
);
841 if (!smu
->od_enabled
|| !od_table
|| !od_settings
)
843 if (!navi10_od_feature_is_supported(od_settings
, SMU_11_0_ODFEATURE_UCLK_MAX
))
845 size
+= sprintf(buf
+ size
, "OD_MCLK:\n");
846 size
+= sprintf(buf
+ size
, "0: %uMHz\n", od_table
->UclkFmax
);
848 case SMU_OD_VDDC_CURVE
:
849 if (!smu
->od_enabled
|| !od_table
|| !od_settings
)
851 if (!navi10_od_feature_is_supported(od_settings
, SMU_11_0_ODFEATURE_GFXCLK_CURVE
))
853 size
+= sprintf(buf
+ size
, "OD_VDDC_CURVE:\n");
854 for (i
= 0; i
< 3; i
++) {
857 curve_settings
= &od_table
->GfxclkFreq1
;
860 curve_settings
= &od_table
->GfxclkFreq2
;
863 curve_settings
= &od_table
->GfxclkFreq3
;
868 size
+= sprintf(buf
+ size
, "%d: %uMHz @ %umV\n", i
, curve_settings
[0], curve_settings
[1] / NAVI10_VOLTAGE_SCALE
);
878 static int navi10_force_clk_levels(struct smu_context
*smu
,
879 enum smu_clk_type clk_type
, uint32_t mask
)
882 int ret
= 0, size
= 0;
883 uint32_t soft_min_level
= 0, soft_max_level
= 0, min_freq
= 0, max_freq
= 0;
885 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
886 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
896 /* There is only 2 levels for fine grained DPM */
897 if (navi10_is_support_fine_grained_dpm(smu
, clk_type
)) {
898 soft_max_level
= (soft_max_level
>= 1 ? 1 : 0);
899 soft_min_level
= (soft_min_level
>= 1 ? 1 : 0);
902 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, soft_min_level
, &min_freq
);
906 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, soft_max_level
, &max_freq
);
910 ret
= smu_set_soft_freq_range(smu
, clk_type
, min_freq
, max_freq
);
921 static int navi10_populate_umd_state_clk(struct smu_context
*smu
)
924 uint32_t min_sclk_freq
= 0, min_mclk_freq
= 0;
926 ret
= smu_get_dpm_freq_range(smu
, SMU_SCLK
, &min_sclk_freq
, NULL
, false);
930 smu
->pstate_sclk
= min_sclk_freq
* 100;
932 ret
= smu_get_dpm_freq_range(smu
, SMU_MCLK
, &min_mclk_freq
, NULL
, false);
936 smu
->pstate_mclk
= min_mclk_freq
* 100;
941 static int navi10_get_clock_by_type_with_latency(struct smu_context
*smu
,
942 enum smu_clk_type clk_type
,
943 struct pp_clock_levels_with_latency
*clocks
)
946 uint32_t level_count
= 0, freq
= 0;
952 ret
= smu_get_dpm_level_count(smu
, clk_type
, &level_count
);
956 level_count
= min(level_count
, (uint32_t)MAX_NUM_CLOCKS
);
957 clocks
->num_levels
= level_count
;
959 for (i
= 0; i
< level_count
; i
++) {
960 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, i
, &freq
);
964 clocks
->data
[i
].clocks_in_khz
= freq
* 1000;
965 clocks
->data
[i
].latency_in_us
= 0;
975 static int navi10_pre_display_config_changed(struct smu_context
*smu
)
978 uint32_t max_freq
= 0;
980 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_NumOfDisplays
, 0);
984 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
985 ret
= smu_get_dpm_freq_range(smu
, SMU_UCLK
, NULL
, &max_freq
, false);
988 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, 0, max_freq
);
996 static int navi10_display_config_changed(struct smu_context
*smu
)
1000 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
1001 !(smu
->watermarks_bitmap
& WATERMARKS_LOADED
)) {
1002 ret
= smu_write_watermarks_table(smu
);
1006 smu
->watermarks_bitmap
|= WATERMARKS_LOADED
;
1009 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
1010 smu_feature_is_supported(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
) &&
1011 smu_feature_is_supported(smu
, SMU_FEATURE_DPM_SOCCLK_BIT
)) {
1012 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_NumOfDisplays
,
1013 smu
->display_config
->num_display
);
1021 static int navi10_force_dpm_limit_value(struct smu_context
*smu
, bool highest
)
1024 uint32_t min_freq
, max_freq
, force_freq
;
1025 enum smu_clk_type clk_type
;
1027 enum smu_clk_type clks
[] = {
1033 for (i
= 0; i
< ARRAY_SIZE(clks
); i
++) {
1035 ret
= smu_get_dpm_freq_range(smu
, clk_type
, &min_freq
, &max_freq
, false);
1039 force_freq
= highest
? max_freq
: min_freq
;
1040 ret
= smu_set_soft_freq_range(smu
, clk_type
, force_freq
, force_freq
);
1048 static int navi10_unforce_dpm_levels(struct smu_context
*smu
)
1051 uint32_t min_freq
, max_freq
;
1052 enum smu_clk_type clk_type
;
1054 enum smu_clk_type clks
[] = {
1060 for (i
= 0; i
< ARRAY_SIZE(clks
); i
++) {
1062 ret
= smu_get_dpm_freq_range(smu
, clk_type
, &min_freq
, &max_freq
, false);
1066 ret
= smu_set_soft_freq_range(smu
, clk_type
, min_freq
, max_freq
);
1074 static int navi10_get_gpu_power(struct smu_context
*smu
, uint32_t *value
)
1077 SmuMetrics_t metrics
;
1082 ret
= navi10_get_metrics_table(smu
, &metrics
);
1086 *value
= metrics
.AverageSocketPower
<< 8;
1091 static int navi10_get_current_activity_percent(struct smu_context
*smu
,
1092 enum amd_pp_sensors sensor
,
1096 SmuMetrics_t metrics
;
1101 ret
= navi10_get_metrics_table(smu
, &metrics
);
1106 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1107 *value
= metrics
.AverageGfxActivity
;
1109 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1110 *value
= metrics
.AverageUclkActivity
;
1113 pr_err("Invalid sensor for retrieving clock activity\n");
1120 static bool navi10_is_dpm_running(struct smu_context
*smu
)
1123 uint32_t feature_mask
[2];
1124 unsigned long feature_enabled
;
1125 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
1126 feature_enabled
= (unsigned long)((uint64_t)feature_mask
[0] |
1127 ((uint64_t)feature_mask
[1] << 32));
1128 return !!(feature_enabled
& SMC_DPM_FEATURE
);
1131 static int navi10_get_fan_speed_rpm(struct smu_context
*smu
,
1134 SmuMetrics_t metrics
;
1140 ret
= navi10_get_metrics_table(smu
, &metrics
);
1144 *speed
= metrics
.CurrFanSpeed
;
1149 static int navi10_get_fan_speed_percent(struct smu_context
*smu
,
1153 uint32_t percent
= 0;
1154 uint32_t current_rpm
;
1155 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
1157 ret
= navi10_get_fan_speed_rpm(smu
, ¤t_rpm
);
1161 percent
= current_rpm
* 100 / pptable
->FanMaximumRpm
;
1162 *speed
= percent
> 100 ? 100 : percent
;
1167 static int navi10_get_power_profile_mode(struct smu_context
*smu
, char *buf
)
1169 DpmActivityMonitorCoeffInt_t activity_monitor
;
1170 uint32_t i
, size
= 0;
1171 int16_t workload_type
= 0;
1172 static const char *profile_name
[] = {
1180 static const char *title
[] = {
1181 "PROFILE_INDEX(NAME)",
1185 "MinActiveFreqType",
1190 "PD_Data_error_coeff",
1191 "PD_Data_error_rate_coeff"};
1197 size
+= sprintf(buf
+ size
, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1198 title
[0], title
[1], title
[2], title
[3], title
[4], title
[5],
1199 title
[6], title
[7], title
[8], title
[9], title
[10]);
1201 for (i
= 0; i
<= PP_SMC_POWER_PROFILE_CUSTOM
; i
++) {
1202 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1203 workload_type
= smu_workload_get_type(smu
, i
);
1204 if (workload_type
< 0)
1207 result
= smu_update_table(smu
,
1208 SMU_TABLE_ACTIVITY_MONITOR_COEFF
, workload_type
,
1209 (void *)(&activity_monitor
), false);
1211 pr_err("[%s] Failed to get activity monitor!", __func__
);
1215 size
+= sprintf(buf
+ size
, "%2d %14s%s:\n",
1216 i
, profile_name
[i
], (i
== smu
->power_profile_mode
) ? "*" : " ");
1218 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1222 activity_monitor
.Gfx_FPS
,
1223 activity_monitor
.Gfx_MinFreqStep
,
1224 activity_monitor
.Gfx_MinActiveFreqType
,
1225 activity_monitor
.Gfx_MinActiveFreq
,
1226 activity_monitor
.Gfx_BoosterFreqType
,
1227 activity_monitor
.Gfx_BoosterFreq
,
1228 activity_monitor
.Gfx_PD_Data_limit_c
,
1229 activity_monitor
.Gfx_PD_Data_error_coeff
,
1230 activity_monitor
.Gfx_PD_Data_error_rate_coeff
);
1232 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1236 activity_monitor
.Soc_FPS
,
1237 activity_monitor
.Soc_MinFreqStep
,
1238 activity_monitor
.Soc_MinActiveFreqType
,
1239 activity_monitor
.Soc_MinActiveFreq
,
1240 activity_monitor
.Soc_BoosterFreqType
,
1241 activity_monitor
.Soc_BoosterFreq
,
1242 activity_monitor
.Soc_PD_Data_limit_c
,
1243 activity_monitor
.Soc_PD_Data_error_coeff
,
1244 activity_monitor
.Soc_PD_Data_error_rate_coeff
);
1246 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1250 activity_monitor
.Mem_FPS
,
1251 activity_monitor
.Mem_MinFreqStep
,
1252 activity_monitor
.Mem_MinActiveFreqType
,
1253 activity_monitor
.Mem_MinActiveFreq
,
1254 activity_monitor
.Mem_BoosterFreqType
,
1255 activity_monitor
.Mem_BoosterFreq
,
1256 activity_monitor
.Mem_PD_Data_limit_c
,
1257 activity_monitor
.Mem_PD_Data_error_coeff
,
1258 activity_monitor
.Mem_PD_Data_error_rate_coeff
);
1264 static int navi10_set_power_profile_mode(struct smu_context
*smu
, long *input
, uint32_t size
)
1266 DpmActivityMonitorCoeffInt_t activity_monitor
;
1267 int workload_type
, ret
= 0;
1269 smu
->power_profile_mode
= input
[size
];
1271 if (smu
->power_profile_mode
> PP_SMC_POWER_PROFILE_CUSTOM
) {
1272 pr_err("Invalid power profile mode %d\n", smu
->power_profile_mode
);
1276 if (smu
->power_profile_mode
== PP_SMC_POWER_PROFILE_CUSTOM
) {
1280 ret
= smu_update_table(smu
,
1281 SMU_TABLE_ACTIVITY_MONITOR_COEFF
, WORKLOAD_PPLIB_CUSTOM_BIT
,
1282 (void *)(&activity_monitor
), false);
1284 pr_err("[%s] Failed to get activity monitor!", __func__
);
1289 case 0: /* Gfxclk */
1290 activity_monitor
.Gfx_FPS
= input
[1];
1291 activity_monitor
.Gfx_MinFreqStep
= input
[2];
1292 activity_monitor
.Gfx_MinActiveFreqType
= input
[3];
1293 activity_monitor
.Gfx_MinActiveFreq
= input
[4];
1294 activity_monitor
.Gfx_BoosterFreqType
= input
[5];
1295 activity_monitor
.Gfx_BoosterFreq
= input
[6];
1296 activity_monitor
.Gfx_PD_Data_limit_c
= input
[7];
1297 activity_monitor
.Gfx_PD_Data_error_coeff
= input
[8];
1298 activity_monitor
.Gfx_PD_Data_error_rate_coeff
= input
[9];
1300 case 1: /* Socclk */
1301 activity_monitor
.Soc_FPS
= input
[1];
1302 activity_monitor
.Soc_MinFreqStep
= input
[2];
1303 activity_monitor
.Soc_MinActiveFreqType
= input
[3];
1304 activity_monitor
.Soc_MinActiveFreq
= input
[4];
1305 activity_monitor
.Soc_BoosterFreqType
= input
[5];
1306 activity_monitor
.Soc_BoosterFreq
= input
[6];
1307 activity_monitor
.Soc_PD_Data_limit_c
= input
[7];
1308 activity_monitor
.Soc_PD_Data_error_coeff
= input
[8];
1309 activity_monitor
.Soc_PD_Data_error_rate_coeff
= input
[9];
1312 activity_monitor
.Mem_FPS
= input
[1];
1313 activity_monitor
.Mem_MinFreqStep
= input
[2];
1314 activity_monitor
.Mem_MinActiveFreqType
= input
[3];
1315 activity_monitor
.Mem_MinActiveFreq
= input
[4];
1316 activity_monitor
.Mem_BoosterFreqType
= input
[5];
1317 activity_monitor
.Mem_BoosterFreq
= input
[6];
1318 activity_monitor
.Mem_PD_Data_limit_c
= input
[7];
1319 activity_monitor
.Mem_PD_Data_error_coeff
= input
[8];
1320 activity_monitor
.Mem_PD_Data_error_rate_coeff
= input
[9];
1324 ret
= smu_update_table(smu
,
1325 SMU_TABLE_ACTIVITY_MONITOR_COEFF
, WORKLOAD_PPLIB_CUSTOM_BIT
,
1326 (void *)(&activity_monitor
), true);
1328 pr_err("[%s] Failed to set activity monitor!", __func__
);
1333 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1334 workload_type
= smu_workload_get_type(smu
, smu
->power_profile_mode
);
1335 if (workload_type
< 0)
1337 smu_send_smc_msg_with_param(smu
, SMU_MSG_SetWorkloadMask
,
1338 1 << workload_type
);
1343 static int navi10_get_profiling_clk_mask(struct smu_context
*smu
,
1344 enum amd_dpm_forced_level level
,
1345 uint32_t *sclk_mask
,
1346 uint32_t *mclk_mask
,
1350 uint32_t level_count
= 0;
1352 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
1355 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
1358 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
1360 ret
= smu_get_dpm_level_count(smu
, SMU_SCLK
, &level_count
);
1363 *sclk_mask
= level_count
- 1;
1367 ret
= smu_get_dpm_level_count(smu
, SMU_MCLK
, &level_count
);
1370 *mclk_mask
= level_count
- 1;
1374 ret
= smu_get_dpm_level_count(smu
, SMU_SOCCLK
, &level_count
);
1377 *soc_mask
= level_count
- 1;
1384 static int navi10_notify_smc_display_config(struct smu_context
*smu
)
1386 struct smu_clocks min_clocks
= {0};
1387 struct pp_display_clock_request clock_req
;
1390 min_clocks
.dcef_clock
= smu
->display_config
->min_dcef_set_clk
;
1391 min_clocks
.dcef_clock_in_sr
= smu
->display_config
->min_dcef_deep_sleep_set_clk
;
1392 min_clocks
.memory_clock
= smu
->display_config
->min_mem_set_clock
;
1394 if (smu_feature_is_supported(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
)) {
1395 clock_req
.clock_type
= amd_pp_dcef_clock
;
1396 clock_req
.clock_freq_in_khz
= min_clocks
.dcef_clock
* 10;
1398 ret
= smu_v11_0_display_clock_voltage_request(smu
, &clock_req
);
1400 if (smu_feature_is_supported(smu
, SMU_FEATURE_DS_DCEFCLK_BIT
)) {
1401 ret
= smu_send_smc_msg_with_param(smu
,
1402 SMU_MSG_SetMinDeepSleepDcefclk
,
1403 min_clocks
.dcef_clock_in_sr
/100);
1405 pr_err("Attempt to set divider for DCEFCLK Failed!");
1410 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1414 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
1415 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, min_clocks
.memory_clock
/100, 0);
1417 pr_err("[%s] Set hard min uclk failed!", __func__
);
1425 static int navi10_set_watermarks_table(struct smu_context
*smu
,
1426 void *watermarks
, struct
1427 dm_pp_wm_sets_with_clock_ranges_soc15
1431 Watermarks_t
*table
= watermarks
;
1433 if (!table
|| !clock_ranges
)
1436 if (clock_ranges
->num_wm_dmif_sets
> 4 ||
1437 clock_ranges
->num_wm_mcif_sets
> 4)
1440 for (i
= 0; i
< clock_ranges
->num_wm_dmif_sets
; i
++) {
1441 table
->WatermarkRow
[1][i
].MinClock
=
1442 cpu_to_le16((uint16_t)
1443 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_min_dcfclk_clk_in_khz
/
1445 table
->WatermarkRow
[1][i
].MaxClock
=
1446 cpu_to_le16((uint16_t)
1447 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_max_dcfclk_clk_in_khz
/
1449 table
->WatermarkRow
[1][i
].MinUclk
=
1450 cpu_to_le16((uint16_t)
1451 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_min_mem_clk_in_khz
/
1453 table
->WatermarkRow
[1][i
].MaxUclk
=
1454 cpu_to_le16((uint16_t)
1455 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_max_mem_clk_in_khz
/
1457 table
->WatermarkRow
[1][i
].WmSetting
= (uint8_t)
1458 clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_set_id
;
1461 for (i
= 0; i
< clock_ranges
->num_wm_mcif_sets
; i
++) {
1462 table
->WatermarkRow
[0][i
].MinClock
=
1463 cpu_to_le16((uint16_t)
1464 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_min_socclk_clk_in_khz
/
1466 table
->WatermarkRow
[0][i
].MaxClock
=
1467 cpu_to_le16((uint16_t)
1468 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_max_socclk_clk_in_khz
/
1470 table
->WatermarkRow
[0][i
].MinUclk
=
1471 cpu_to_le16((uint16_t)
1472 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_min_mem_clk_in_khz
/
1474 table
->WatermarkRow
[0][i
].MaxUclk
=
1475 cpu_to_le16((uint16_t)
1476 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_max_mem_clk_in_khz
/
1478 table
->WatermarkRow
[0][i
].WmSetting
= (uint8_t)
1479 clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_set_id
;
1485 static int navi10_thermal_get_temperature(struct smu_context
*smu
,
1486 enum amd_pp_sensors sensor
,
1489 SmuMetrics_t metrics
;
1495 ret
= navi10_get_metrics_table(smu
, &metrics
);
1500 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
1501 *value
= metrics
.TemperatureHotspot
*
1502 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1504 case AMDGPU_PP_SENSOR_EDGE_TEMP
:
1505 *value
= metrics
.TemperatureEdge
*
1506 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1508 case AMDGPU_PP_SENSOR_MEM_TEMP
:
1509 *value
= metrics
.TemperatureMem
*
1510 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1513 pr_err("Invalid sensor for retrieving temp\n");
1520 static int navi10_read_sensor(struct smu_context
*smu
,
1521 enum amd_pp_sensors sensor
,
1522 void *data
, uint32_t *size
)
1525 struct smu_table_context
*table_context
= &smu
->smu_table
;
1526 PPTable_t
*pptable
= table_context
->driver_pptable
;
1531 mutex_lock(&smu
->sensor_lock
);
1533 case AMDGPU_PP_SENSOR_MAX_FAN_RPM
:
1534 *(uint32_t *)data
= pptable
->FanMaximumRpm
;
1537 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1538 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1539 ret
= navi10_get_current_activity_percent(smu
, sensor
, (uint32_t *)data
);
1542 case AMDGPU_PP_SENSOR_GPU_POWER
:
1543 ret
= navi10_get_gpu_power(smu
, (uint32_t *)data
);
1546 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
1547 case AMDGPU_PP_SENSOR_EDGE_TEMP
:
1548 case AMDGPU_PP_SENSOR_MEM_TEMP
:
1549 ret
= navi10_thermal_get_temperature(smu
, sensor
, (uint32_t *)data
);
1553 ret
= smu_v11_0_read_sensor(smu
, sensor
, data
, size
);
1555 mutex_unlock(&smu
->sensor_lock
);
1560 static int navi10_get_uclk_dpm_states(struct smu_context
*smu
, uint32_t *clocks_in_khz
, uint32_t *num_states
)
1562 uint32_t num_discrete_levels
= 0;
1563 uint16_t *dpm_levels
= NULL
;
1565 struct smu_table_context
*table_context
= &smu
->smu_table
;
1566 PPTable_t
*driver_ppt
= NULL
;
1568 if (!clocks_in_khz
|| !num_states
|| !table_context
->driver_pptable
)
1571 driver_ppt
= table_context
->driver_pptable
;
1572 num_discrete_levels
= driver_ppt
->DpmDescriptor
[PPCLK_UCLK
].NumDiscreteLevels
;
1573 dpm_levels
= driver_ppt
->FreqTableUclk
;
1575 if (num_discrete_levels
== 0 || dpm_levels
== NULL
)
1578 *num_states
= num_discrete_levels
;
1579 for (i
= 0; i
< num_discrete_levels
; i
++) {
1580 /* convert to khz */
1581 *clocks_in_khz
= (*dpm_levels
) * 1000;
1589 static int navi10_set_performance_level(struct smu_context
*smu
,
1590 enum amd_dpm_forced_level level
);
1592 static int navi10_set_standard_performance_level(struct smu_context
*smu
)
1594 struct amdgpu_device
*adev
= smu
->adev
;
1596 uint32_t sclk_freq
= 0, uclk_freq
= 0;
1598 switch (adev
->asic_type
) {
1600 sclk_freq
= NAVI10_UMD_PSTATE_PROFILING_GFXCLK
;
1601 uclk_freq
= NAVI10_UMD_PSTATE_PROFILING_MEMCLK
;
1604 sclk_freq
= NAVI14_UMD_PSTATE_PROFILING_GFXCLK
;
1605 uclk_freq
= NAVI14_UMD_PSTATE_PROFILING_MEMCLK
;
1608 /* by default, this is same as auto performance level */
1609 return navi10_set_performance_level(smu
, AMD_DPM_FORCED_LEVEL_AUTO
);
1612 ret
= smu_set_soft_freq_range(smu
, SMU_SCLK
, sclk_freq
, sclk_freq
);
1615 ret
= smu_set_soft_freq_range(smu
, SMU_UCLK
, uclk_freq
, uclk_freq
);
1622 static int navi10_set_peak_performance_level(struct smu_context
*smu
)
1624 struct amdgpu_device
*adev
= smu
->adev
;
1626 uint32_t sclk_freq
= 0, uclk_freq
= 0;
1628 switch (adev
->asic_type
) {
1630 switch (adev
->pdev
->revision
) {
1631 case 0xf0: /* XTX */
1633 sclk_freq
= NAVI10_PEAK_SCLK_XTX
;
1637 sclk_freq
= NAVI10_PEAK_SCLK_XT
;
1640 sclk_freq
= NAVI10_PEAK_SCLK_XL
;
1645 switch (adev
->pdev
->revision
) {
1648 sclk_freq
= NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK
;
1650 case 0xc1: /* XTM */
1652 sclk_freq
= NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK
;
1654 case 0xc3: /* XLM */
1656 sclk_freq
= NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK
;
1658 case 0xc5: /* XTX */
1660 sclk_freq
= NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK
;
1663 sclk_freq
= NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK
;
1668 sclk_freq
= NAVI12_UMD_PSTATE_PEAK_GFXCLK
;
1671 ret
= smu_get_dpm_level_range(smu
, SMU_SCLK
, NULL
, &sclk_freq
);
1676 ret
= smu_get_dpm_level_range(smu
, SMU_UCLK
, NULL
, &uclk_freq
);
1680 ret
= smu_set_soft_freq_range(smu
, SMU_SCLK
, sclk_freq
, sclk_freq
);
1683 ret
= smu_set_soft_freq_range(smu
, SMU_UCLK
, uclk_freq
, uclk_freq
);
1690 static int navi10_set_performance_level(struct smu_context
*smu
,
1691 enum amd_dpm_forced_level level
)
1694 uint32_t sclk_mask
, mclk_mask
, soc_mask
;
1697 case AMD_DPM_FORCED_LEVEL_HIGH
:
1698 ret
= smu_force_dpm_limit_value(smu
, true);
1700 case AMD_DPM_FORCED_LEVEL_LOW
:
1701 ret
= smu_force_dpm_limit_value(smu
, false);
1703 case AMD_DPM_FORCED_LEVEL_AUTO
:
1704 ret
= smu_unforce_dpm_levels(smu
);
1706 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1707 ret
= navi10_set_standard_performance_level(smu
);
1709 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1710 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
1711 ret
= smu_get_profiling_clk_mask(smu
, level
,
1717 smu_force_clk_levels(smu
, SMU_SCLK
, 1 << sclk_mask
, false);
1718 smu_force_clk_levels(smu
, SMU_MCLK
, 1 << mclk_mask
, false);
1719 smu_force_clk_levels(smu
, SMU_SOCCLK
, 1 << soc_mask
, false);
1721 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1722 ret
= navi10_set_peak_performance_level(smu
);
1724 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1725 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
1732 static int navi10_get_thermal_temperature_range(struct smu_context
*smu
,
1733 struct smu_temperature_range
*range
)
1735 struct smu_table_context
*table_context
= &smu
->smu_table
;
1736 struct smu_11_0_powerplay_table
*powerplay_table
= table_context
->power_play_table
;
1738 if (!range
|| !powerplay_table
)
1741 range
->max
= powerplay_table
->software_shutdown_temp
*
1742 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1747 static int navi10_display_disable_memory_clock_switch(struct smu_context
*smu
,
1748 bool disable_memory_clock_switch
)
1751 struct smu_11_0_max_sustainable_clocks
*max_sustainable_clocks
=
1752 (struct smu_11_0_max_sustainable_clocks
*)
1753 smu
->smu_table
.max_sustainable_clocks
;
1754 uint32_t min_memory_clock
= smu
->hard_min_uclk_req_from_dal
;
1755 uint32_t max_memory_clock
= max_sustainable_clocks
->uclock
;
1757 if(smu
->disable_uclk_switch
== disable_memory_clock_switch
)
1760 if(disable_memory_clock_switch
)
1761 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, max_memory_clock
, 0);
1763 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, min_memory_clock
, 0);
1766 smu
->disable_uclk_switch
= disable_memory_clock_switch
;
1771 static uint32_t navi10_get_pptable_power_limit(struct smu_context
*smu
)
1773 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
1774 return pptable
->SocketPowerLimitAc
[PPT_THROTTLER_PPT0
];
1777 static int navi10_get_power_limit(struct smu_context
*smu
,
1781 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
1782 uint32_t asic_default_power_limit
= 0;
1786 if (!smu
->power_limit
) {
1787 if (smu_feature_is_enabled(smu
, SMU_FEATURE_PPT_BIT
)) {
1788 power_src
= smu_power_get_index(smu
, SMU_POWER_SOURCE_AC
);
1792 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetPptLimit
,
1795 pr_err("[%s] get PPT limit failed!", __func__
);
1798 smu_read_smc_arg(smu
, &asic_default_power_limit
);
1800 /* the last hope to figure out the ppt limit */
1802 pr_err("Cannot get PPT limit due to pptable missing!");
1805 asic_default_power_limit
=
1806 pptable
->SocketPowerLimitAc
[PPT_THROTTLER_PPT0
];
1809 smu
->power_limit
= asic_default_power_limit
;
1813 *limit
= smu_v11_0_get_max_power_limit(smu
);
1815 *limit
= smu
->power_limit
;
1820 static int navi10_update_pcie_parameters(struct smu_context
*smu
,
1821 uint32_t pcie_gen_cap
,
1822 uint32_t pcie_width_cap
)
1824 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
1826 uint32_t smu_pcie_arg
;
1828 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
1829 struct smu_11_0_dpm_context
*dpm_context
= smu_dpm
->dpm_context
;
1831 for (i
= 0; i
< NUM_LINK_LEVELS
; i
++) {
1832 smu_pcie_arg
= (i
<< 16) |
1833 ((pptable
->PcieGenSpeed
[i
] <= pcie_gen_cap
) ? (pptable
->PcieGenSpeed
[i
] << 8) :
1834 (pcie_gen_cap
<< 8)) | ((pptable
->PcieLaneCount
[i
] <= pcie_width_cap
) ?
1835 pptable
->PcieLaneCount
[i
] : pcie_width_cap
);
1836 ret
= smu_send_smc_msg_with_param(smu
,
1837 SMU_MSG_OverridePcieParameters
,
1843 if (pptable
->PcieGenSpeed
[i
] > pcie_gen_cap
)
1844 dpm_context
->dpm_tables
.pcie_table
.pcie_gen
[i
] = pcie_gen_cap
;
1845 if (pptable
->PcieLaneCount
[i
] > pcie_width_cap
)
1846 dpm_context
->dpm_tables
.pcie_table
.pcie_lane
[i
] = pcie_width_cap
;
1852 static inline void navi10_dump_od_table(OverDriveTable_t
*od_table
) {
1853 pr_debug("OD: Gfxclk: (%d, %d)\n", od_table
->GfxclkFmin
, od_table
->GfxclkFmax
);
1854 pr_debug("OD: Gfx1: (%d, %d)\n", od_table
->GfxclkFreq1
, od_table
->GfxclkVolt1
);
1855 pr_debug("OD: Gfx2: (%d, %d)\n", od_table
->GfxclkFreq2
, od_table
->GfxclkVolt2
);
1856 pr_debug("OD: Gfx3: (%d, %d)\n", od_table
->GfxclkFreq3
, od_table
->GfxclkVolt3
);
1857 pr_debug("OD: UclkFmax: %d\n", od_table
->UclkFmax
);
1858 pr_debug("OD: OverDrivePct: %d\n", od_table
->OverDrivePct
);
1861 static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table
*od_table
, enum SMU_11_0_ODSETTING_ID setting
, uint32_t value
)
1863 if (value
< od_table
->min
[setting
]) {
1864 pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting
, value
, od_table
->min
[setting
]);
1867 if (value
> od_table
->max
[setting
]) {
1868 pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting
, value
, od_table
->max
[setting
]);
1874 static int navi10_setup_od_limits(struct smu_context
*smu
) {
1875 struct smu_11_0_overdrive_table
*overdrive_table
= NULL
;
1876 struct smu_11_0_powerplay_table
*powerplay_table
= NULL
;
1878 if (!smu
->smu_table
.power_play_table
) {
1879 pr_err("powerplay table uninitialized!\n");
1882 powerplay_table
= (struct smu_11_0_powerplay_table
*)smu
->smu_table
.power_play_table
;
1883 overdrive_table
= &powerplay_table
->overdrive_table
;
1884 if (!smu
->od_settings
) {
1885 smu
->od_settings
= kmemdup(overdrive_table
, sizeof(struct smu_11_0_overdrive_table
), GFP_KERNEL
);
1887 memcpy(smu
->od_settings
, overdrive_table
, sizeof(struct smu_11_0_overdrive_table
));
1892 static int navi10_set_default_od_settings(struct smu_context
*smu
, bool initialize
) {
1893 OverDriveTable_t
*od_table
;
1896 ret
= smu_v11_0_set_default_od_settings(smu
, initialize
, sizeof(OverDriveTable_t
));
1901 ret
= navi10_setup_od_limits(smu
);
1903 pr_err("Failed to retrieve board OD limits\n");
1909 od_table
= (OverDriveTable_t
*)smu
->smu_table
.overdrive_table
;
1911 navi10_dump_od_table(od_table
);
1917 static int navi10_od_edit_dpm_table(struct smu_context
*smu
, enum PP_OD_DPM_TABLE_COMMAND type
, long input
[], uint32_t size
) {
1920 struct smu_table_context
*table_context
= &smu
->smu_table
;
1921 OverDriveTable_t
*od_table
;
1922 struct smu_11_0_overdrive_table
*od_settings
;
1923 enum SMU_11_0_ODSETTING_ID freq_setting
, voltage_setting
;
1924 uint16_t *freq_ptr
, *voltage_ptr
;
1925 od_table
= (OverDriveTable_t
*)table_context
->overdrive_table
;
1927 if (!smu
->od_enabled
) {
1928 pr_warn("OverDrive is not enabled!\n");
1932 if (!smu
->od_settings
) {
1933 pr_err("OD board limits are not set!\n");
1937 od_settings
= smu
->od_settings
;
1940 case PP_OD_EDIT_SCLK_VDDC_TABLE
:
1941 if (!navi10_od_feature_is_supported(od_settings
, SMU_11_0_ODFEATURE_GFXCLK_LIMITS
)) {
1942 pr_warn("GFXCLK_LIMITS not supported!\n");
1945 if (!table_context
->overdrive_table
) {
1946 pr_err("Overdrive is not initialized\n");
1949 for (i
= 0; i
< size
; i
+= 2) {
1951 pr_info("invalid number of input parameters %d\n", size
);
1956 freq_setting
= SMU_11_0_ODSETTING_GFXCLKFMIN
;
1957 freq_ptr
= &od_table
->GfxclkFmin
;
1958 if (input
[i
+ 1] > od_table
->GfxclkFmax
) {
1959 pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
1961 od_table
->GfxclkFmin
);
1966 freq_setting
= SMU_11_0_ODSETTING_GFXCLKFMAX
;
1967 freq_ptr
= &od_table
->GfxclkFmax
;
1968 if (input
[i
+ 1] < od_table
->GfxclkFmin
) {
1969 pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
1971 od_table
->GfxclkFmax
);
1976 pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input
[i
]);
1977 pr_info("Supported indices: [0:min,1:max]\n");
1980 ret
= navi10_od_setting_check_range(od_settings
, freq_setting
, input
[i
+ 1]);
1983 *freq_ptr
= input
[i
+ 1];
1986 case PP_OD_EDIT_MCLK_VDDC_TABLE
:
1987 if (!navi10_od_feature_is_supported(od_settings
, SMU_11_0_ODFEATURE_UCLK_MAX
)) {
1988 pr_warn("UCLK_MAX not supported!\n");
1992 pr_info("invalid number of parameters: %d\n", size
);
1995 if (input
[0] != 1) {
1996 pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input
[0]);
1997 pr_info("Supported indices: [1:max]\n");
2000 ret
= navi10_od_setting_check_range(od_settings
, SMU_11_0_ODSETTING_UCLKFMAX
, input
[1]);
2003 od_table
->UclkFmax
= input
[1];
2005 case PP_OD_COMMIT_DPM_TABLE
:
2006 navi10_dump_od_table(od_table
);
2007 ret
= smu_update_table(smu
, SMU_TABLE_OVERDRIVE
, 0, (void *)od_table
, true);
2009 pr_err("Failed to import overdrive table!\n");
2012 // no lock needed because smu_od_edit_dpm_table has it
2013 ret
= smu_handle_task(smu
, smu
->smu_dpm
.dpm_level
,
2014 AMD_PP_TASK_READJUST_POWER_STATE
,
2020 case PP_OD_EDIT_VDDC_CURVE
:
2021 if (!navi10_od_feature_is_supported(od_settings
, SMU_11_0_ODFEATURE_GFXCLK_CURVE
)) {
2022 pr_warn("GFXCLK_CURVE not supported!\n");
2026 pr_info("invalid number of parameters: %d\n", size
);
2030 pr_info("Overdrive is not initialized\n");
2036 freq_setting
= SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1
;
2037 voltage_setting
= SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1
;
2038 freq_ptr
= &od_table
->GfxclkFreq1
;
2039 voltage_ptr
= &od_table
->GfxclkVolt1
;
2042 freq_setting
= SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2
;
2043 voltage_setting
= SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2
;
2044 freq_ptr
= &od_table
->GfxclkFreq2
;
2045 voltage_ptr
= &od_table
->GfxclkVolt2
;
2048 freq_setting
= SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3
;
2049 voltage_setting
= SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3
;
2050 freq_ptr
= &od_table
->GfxclkFreq3
;
2051 voltage_ptr
= &od_table
->GfxclkVolt3
;
2054 pr_info("Invalid VDDC_CURVE index: %ld\n", input
[0]);
2055 pr_info("Supported indices: [0, 1, 2]\n");
2058 ret
= navi10_od_setting_check_range(od_settings
, freq_setting
, input
[1]);
2061 // Allow setting zero to disable the OverDrive VDDC curve
2062 if (input
[2] != 0) {
2063 ret
= navi10_od_setting_check_range(od_settings
, voltage_setting
, input
[2]);
2066 *freq_ptr
= input
[1];
2067 *voltage_ptr
= ((uint16_t)input
[2]) * NAVI10_VOLTAGE_SCALE
;
2068 pr_debug("OD: set curve %ld: (%d, %d)\n", input
[0], *freq_ptr
, *voltage_ptr
);
2070 // If setting 0, disable all voltage curve settings
2071 od_table
->GfxclkVolt1
= 0;
2072 od_table
->GfxclkVolt2
= 0;
2073 od_table
->GfxclkVolt3
= 0;
2075 navi10_dump_od_table(od_table
);
2083 static int navi10_run_btc(struct smu_context
*smu
)
2087 ret
= smu_send_smc_msg(smu
, SMU_MSG_RunBtc
);
2089 pr_err("RunBtc failed!\n");
2094 static const struct pptable_funcs navi10_ppt_funcs
= {
2095 .tables_init
= navi10_tables_init
,
2096 .alloc_dpm_context
= navi10_allocate_dpm_context
,
2097 .store_powerplay_table
= navi10_store_powerplay_table
,
2098 .check_powerplay_table
= navi10_check_powerplay_table
,
2099 .append_powerplay_table
= navi10_append_powerplay_table
,
2100 .get_smu_msg_index
= navi10_get_smu_msg_index
,
2101 .get_smu_clk_index
= navi10_get_smu_clk_index
,
2102 .get_smu_feature_index
= navi10_get_smu_feature_index
,
2103 .get_smu_table_index
= navi10_get_smu_table_index
,
2104 .get_smu_power_index
= navi10_get_pwr_src_index
,
2105 .get_workload_type
= navi10_get_workload_type
,
2106 .get_allowed_feature_mask
= navi10_get_allowed_feature_mask
,
2107 .set_default_dpm_table
= navi10_set_default_dpm_table
,
2108 .dpm_set_uvd_enable
= navi10_dpm_set_uvd_enable
,
2109 .dpm_set_jpeg_enable
= navi10_dpm_set_jpeg_enable
,
2110 .get_current_clk_freq_by_table
= navi10_get_current_clk_freq_by_table
,
2111 .print_clk_levels
= navi10_print_clk_levels
,
2112 .force_clk_levels
= navi10_force_clk_levels
,
2113 .populate_umd_state_clk
= navi10_populate_umd_state_clk
,
2114 .get_clock_by_type_with_latency
= navi10_get_clock_by_type_with_latency
,
2115 .pre_display_config_changed
= navi10_pre_display_config_changed
,
2116 .display_config_changed
= navi10_display_config_changed
,
2117 .notify_smc_display_config
= navi10_notify_smc_display_config
,
2118 .force_dpm_limit_value
= navi10_force_dpm_limit_value
,
2119 .unforce_dpm_levels
= navi10_unforce_dpm_levels
,
2120 .is_dpm_running
= navi10_is_dpm_running
,
2121 .get_fan_speed_percent
= navi10_get_fan_speed_percent
,
2122 .get_fan_speed_rpm
= navi10_get_fan_speed_rpm
,
2123 .get_power_profile_mode
= navi10_get_power_profile_mode
,
2124 .set_power_profile_mode
= navi10_set_power_profile_mode
,
2125 .get_profiling_clk_mask
= navi10_get_profiling_clk_mask
,
2126 .set_watermarks_table
= navi10_set_watermarks_table
,
2127 .read_sensor
= navi10_read_sensor
,
2128 .get_uclk_dpm_states
= navi10_get_uclk_dpm_states
,
2129 .set_performance_level
= navi10_set_performance_level
,
2130 .get_thermal_temperature_range
= navi10_get_thermal_temperature_range
,
2131 .display_disable_memory_clock_switch
= navi10_display_disable_memory_clock_switch
,
2132 .get_power_limit
= navi10_get_power_limit
,
2133 .update_pcie_parameters
= navi10_update_pcie_parameters
,
2134 .init_microcode
= smu_v11_0_init_microcode
,
2135 .load_microcode
= smu_v11_0_load_microcode
,
2136 .init_smc_tables
= smu_v11_0_init_smc_tables
,
2137 .fini_smc_tables
= smu_v11_0_fini_smc_tables
,
2138 .init_power
= smu_v11_0_init_power
,
2139 .fini_power
= smu_v11_0_fini_power
,
2140 .check_fw_status
= smu_v11_0_check_fw_status
,
2141 .setup_pptable
= smu_v11_0_setup_pptable
,
2142 .get_vbios_bootup_values
= smu_v11_0_get_vbios_bootup_values
,
2143 .get_clk_info_from_vbios
= smu_v11_0_get_clk_info_from_vbios
,
2144 .check_pptable
= smu_v11_0_check_pptable
,
2145 .parse_pptable
= smu_v11_0_parse_pptable
,
2146 .populate_smc_tables
= smu_v11_0_populate_smc_pptable
,
2147 .check_fw_version
= smu_v11_0_check_fw_version
,
2148 .write_pptable
= smu_v11_0_write_pptable
,
2149 .set_min_dcef_deep_sleep
= smu_v11_0_set_min_dcef_deep_sleep
,
2150 .set_driver_table_location
= smu_v11_0_set_driver_table_location
,
2151 .set_tool_table_location
= smu_v11_0_set_tool_table_location
,
2152 .notify_memory_pool_location
= smu_v11_0_notify_memory_pool_location
,
2153 .system_features_control
= smu_v11_0_system_features_control
,
2154 .send_smc_msg_with_param
= smu_v11_0_send_msg_with_param
,
2155 .read_smc_arg
= smu_v11_0_read_arg
,
2156 .init_display_count
= smu_v11_0_init_display_count
,
2157 .set_allowed_mask
= smu_v11_0_set_allowed_mask
,
2158 .get_enabled_mask
= smu_v11_0_get_enabled_mask
,
2159 .notify_display_change
= smu_v11_0_notify_display_change
,
2160 .set_power_limit
= smu_v11_0_set_power_limit
,
2161 .get_current_clk_freq
= smu_v11_0_get_current_clk_freq
,
2162 .init_max_sustainable_clocks
= smu_v11_0_init_max_sustainable_clocks
,
2163 .start_thermal_control
= smu_v11_0_start_thermal_control
,
2164 .stop_thermal_control
= smu_v11_0_stop_thermal_control
,
2165 .set_deep_sleep_dcefclk
= smu_v11_0_set_deep_sleep_dcefclk
,
2166 .display_clock_voltage_request
= smu_v11_0_display_clock_voltage_request
,
2167 .get_fan_control_mode
= smu_v11_0_get_fan_control_mode
,
2168 .set_fan_control_mode
= smu_v11_0_set_fan_control_mode
,
2169 .set_fan_speed_percent
= smu_v11_0_set_fan_speed_percent
,
2170 .set_fan_speed_rpm
= smu_v11_0_set_fan_speed_rpm
,
2171 .set_xgmi_pstate
= smu_v11_0_set_xgmi_pstate
,
2172 .gfx_off_control
= smu_v11_0_gfx_off_control
,
2173 .register_irq_handler
= smu_v11_0_register_irq_handler
,
2174 .set_azalia_d3_pme
= smu_v11_0_set_azalia_d3_pme
,
2175 .get_max_sustainable_clocks_by_dc
= smu_v11_0_get_max_sustainable_clocks_by_dc
,
2176 .baco_is_support
= smu_v11_0_baco_is_support
,
2177 .baco_get_state
= smu_v11_0_baco_get_state
,
2178 .baco_set_state
= smu_v11_0_baco_set_state
,
2179 .baco_enter
= smu_v11_0_baco_enter
,
2180 .baco_exit
= smu_v11_0_baco_exit
,
2181 .get_dpm_ultimate_freq
= smu_v11_0_get_dpm_ultimate_freq
,
2182 .set_soft_freq_limited_range
= smu_v11_0_set_soft_freq_limited_range
,
2183 .override_pcie_parameters
= smu_v11_0_override_pcie_parameters
,
2184 .set_default_od_settings
= navi10_set_default_od_settings
,
2185 .od_edit_dpm_table
= navi10_od_edit_dpm_table
,
2186 .get_pptable_power_limit
= navi10_get_pptable_power_limit
,
2187 .run_btc
= navi10_run_btc
,
2190 void navi10_set_ppt_funcs(struct smu_context
*smu
)
2192 smu
->ppt_funcs
= &navi10_ppt_funcs
;