2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
28 #include "amdgpu_smu.h"
29 #include "atomfirmware.h"
30 #include "amdgpu_atomfirmware.h"
31 #include "smu_v11_0.h"
32 #include "smu11_driver_if_navi10.h"
33 #include "soc15_common.h"
35 #include "navi10_ppt.h"
36 #include "smu_v11_0_pptable.h"
37 #include "smu_v11_0_ppsmc.h"
39 #include "asic_reg/mp/mp_11_0_sh_mask.h"
41 #define FEATURE_MASK(feature) (1ULL << feature)
42 #define SMC_DPM_FEATURE ( \
43 FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
44 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
45 FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \
46 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
47 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
48 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \
49 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
50 FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
52 #define MSG_MAP(msg, index) \
53 [SMU_MSG_##msg] = {1, (index)}
55 static struct smu_11_0_cmn2aisc_mapping navi10_message_map
[SMU_MSG_MAX_COUNT
] = {
56 MSG_MAP(TestMessage
, PPSMC_MSG_TestMessage
),
57 MSG_MAP(GetSmuVersion
, PPSMC_MSG_GetSmuVersion
),
58 MSG_MAP(GetDriverIfVersion
, PPSMC_MSG_GetDriverIfVersion
),
59 MSG_MAP(SetAllowedFeaturesMaskLow
, PPSMC_MSG_SetAllowedFeaturesMaskLow
),
60 MSG_MAP(SetAllowedFeaturesMaskHigh
, PPSMC_MSG_SetAllowedFeaturesMaskHigh
),
61 MSG_MAP(EnableAllSmuFeatures
, PPSMC_MSG_EnableAllSmuFeatures
),
62 MSG_MAP(DisableAllSmuFeatures
, PPSMC_MSG_DisableAllSmuFeatures
),
63 MSG_MAP(EnableSmuFeaturesLow
, PPSMC_MSG_EnableSmuFeaturesLow
),
64 MSG_MAP(EnableSmuFeaturesHigh
, PPSMC_MSG_EnableSmuFeaturesHigh
),
65 MSG_MAP(DisableSmuFeaturesLow
, PPSMC_MSG_DisableSmuFeaturesLow
),
66 MSG_MAP(DisableSmuFeaturesHigh
, PPSMC_MSG_DisableSmuFeaturesHigh
),
67 MSG_MAP(GetEnabledSmuFeaturesLow
, PPSMC_MSG_GetEnabledSmuFeaturesLow
),
68 MSG_MAP(GetEnabledSmuFeaturesHigh
, PPSMC_MSG_GetEnabledSmuFeaturesHigh
),
69 MSG_MAP(SetWorkloadMask
, PPSMC_MSG_SetWorkloadMask
),
70 MSG_MAP(SetPptLimit
, PPSMC_MSG_SetPptLimit
),
71 MSG_MAP(SetDriverDramAddrHigh
, PPSMC_MSG_SetDriverDramAddrHigh
),
72 MSG_MAP(SetDriverDramAddrLow
, PPSMC_MSG_SetDriverDramAddrLow
),
73 MSG_MAP(SetToolsDramAddrHigh
, PPSMC_MSG_SetToolsDramAddrHigh
),
74 MSG_MAP(SetToolsDramAddrLow
, PPSMC_MSG_SetToolsDramAddrLow
),
75 MSG_MAP(TransferTableSmu2Dram
, PPSMC_MSG_TransferTableSmu2Dram
),
76 MSG_MAP(TransferTableDram2Smu
, PPSMC_MSG_TransferTableDram2Smu
),
77 MSG_MAP(UseDefaultPPTable
, PPSMC_MSG_UseDefaultPPTable
),
78 MSG_MAP(UseBackupPPTable
, PPSMC_MSG_UseBackupPPTable
),
79 MSG_MAP(RunBtc
, PPSMC_MSG_RunBtc
),
80 MSG_MAP(EnterBaco
, PPSMC_MSG_EnterBaco
),
81 MSG_MAP(SetSoftMinByFreq
, PPSMC_MSG_SetSoftMinByFreq
),
82 MSG_MAP(SetSoftMaxByFreq
, PPSMC_MSG_SetSoftMaxByFreq
),
83 MSG_MAP(SetHardMinByFreq
, PPSMC_MSG_SetHardMinByFreq
),
84 MSG_MAP(SetHardMaxByFreq
, PPSMC_MSG_SetHardMaxByFreq
),
85 MSG_MAP(GetMinDpmFreq
, PPSMC_MSG_GetMinDpmFreq
),
86 MSG_MAP(GetMaxDpmFreq
, PPSMC_MSG_GetMaxDpmFreq
),
87 MSG_MAP(GetDpmFreqByIndex
, PPSMC_MSG_GetDpmFreqByIndex
),
88 MSG_MAP(SetMemoryChannelConfig
, PPSMC_MSG_SetMemoryChannelConfig
),
89 MSG_MAP(SetGeminiMode
, PPSMC_MSG_SetGeminiMode
),
90 MSG_MAP(SetGeminiApertureHigh
, PPSMC_MSG_SetGeminiApertureHigh
),
91 MSG_MAP(SetGeminiApertureLow
, PPSMC_MSG_SetGeminiApertureLow
),
92 MSG_MAP(OverridePcieParameters
, PPSMC_MSG_OverridePcieParameters
),
93 MSG_MAP(SetMinDeepSleepDcefclk
, PPSMC_MSG_SetMinDeepSleepDcefclk
),
94 MSG_MAP(ReenableAcDcInterrupt
, PPSMC_MSG_ReenableAcDcInterrupt
),
95 MSG_MAP(NotifyPowerSource
, PPSMC_MSG_NotifyPowerSource
),
96 MSG_MAP(SetUclkFastSwitch
, PPSMC_MSG_SetUclkFastSwitch
),
97 MSG_MAP(SetVideoFps
, PPSMC_MSG_SetVideoFps
),
98 MSG_MAP(PrepareMp1ForUnload
, PPSMC_MSG_PrepareMp1ForUnload
),
99 MSG_MAP(DramLogSetDramAddrHigh
, PPSMC_MSG_DramLogSetDramAddrHigh
),
100 MSG_MAP(DramLogSetDramAddrLow
, PPSMC_MSG_DramLogSetDramAddrLow
),
101 MSG_MAP(DramLogSetDramSize
, PPSMC_MSG_DramLogSetDramSize
),
102 MSG_MAP(ConfigureGfxDidt
, PPSMC_MSG_ConfigureGfxDidt
),
103 MSG_MAP(NumOfDisplays
, PPSMC_MSG_NumOfDisplays
),
104 MSG_MAP(SetSystemVirtualDramAddrHigh
, PPSMC_MSG_SetSystemVirtualDramAddrHigh
),
105 MSG_MAP(SetSystemVirtualDramAddrLow
, PPSMC_MSG_SetSystemVirtualDramAddrLow
),
106 MSG_MAP(AllowGfxOff
, PPSMC_MSG_AllowGfxOff
),
107 MSG_MAP(DisallowGfxOff
, PPSMC_MSG_DisallowGfxOff
),
108 MSG_MAP(GetPptLimit
, PPSMC_MSG_GetPptLimit
),
109 MSG_MAP(GetDcModeMaxDpmFreq
, PPSMC_MSG_GetDcModeMaxDpmFreq
),
110 MSG_MAP(GetDebugData
, PPSMC_MSG_GetDebugData
),
111 MSG_MAP(ExitBaco
, PPSMC_MSG_ExitBaco
),
112 MSG_MAP(PrepareMp1ForReset
, PPSMC_MSG_PrepareMp1ForReset
),
113 MSG_MAP(PrepareMp1ForShutdown
, PPSMC_MSG_PrepareMp1ForShutdown
),
114 MSG_MAP(PowerUpVcn
, PPSMC_MSG_PowerUpVcn
),
115 MSG_MAP(PowerDownVcn
, PPSMC_MSG_PowerDownVcn
),
116 MSG_MAP(PowerUpJpeg
, PPSMC_MSG_PowerUpJpeg
),
117 MSG_MAP(PowerDownJpeg
, PPSMC_MSG_PowerDownJpeg
),
118 MSG_MAP(BacoAudioD3PME
, PPSMC_MSG_BacoAudioD3PME
),
119 MSG_MAP(ArmD3
, PPSMC_MSG_ArmD3
),
122 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map
[SMU_CLK_COUNT
] = {
123 CLK_MAP(GFXCLK
, PPCLK_GFXCLK
),
124 CLK_MAP(SCLK
, PPCLK_GFXCLK
),
125 CLK_MAP(SOCCLK
, PPCLK_SOCCLK
),
126 CLK_MAP(FCLK
, PPCLK_SOCCLK
),
127 CLK_MAP(UCLK
, PPCLK_UCLK
),
128 CLK_MAP(MCLK
, PPCLK_UCLK
),
129 CLK_MAP(DCLK
, PPCLK_DCLK
),
130 CLK_MAP(VCLK
, PPCLK_VCLK
),
131 CLK_MAP(DCEFCLK
, PPCLK_DCEFCLK
),
132 CLK_MAP(DISPCLK
, PPCLK_DISPCLK
),
133 CLK_MAP(PIXCLK
, PPCLK_PIXCLK
),
134 CLK_MAP(PHYCLK
, PPCLK_PHYCLK
),
137 static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map
[SMU_FEATURE_COUNT
] = {
138 FEA_MAP(DPM_PREFETCHER
),
140 FEA_MAP(DPM_GFX_PACE
),
145 FEA_MAP(DPM_DCEFCLK
),
146 FEA_MAP(MEM_VDDCI_SCALING
),
147 FEA_MAP(MEM_MVDD_SCALING
),
160 FEA_MAP(RSMU_SMN_CG
),
170 FEA_MAP(FAN_CONTROL
),
174 FEA_MAP(LED_DISPLAY
),
176 FEA_MAP(OUT_OF_BAND_MONITOR
),
177 FEA_MAP(TEMP_DEPENDENT_VMIN
),
182 static struct smu_11_0_cmn2aisc_mapping navi10_table_map
[SMU_TABLE_COUNT
] = {
186 TAB_MAP(AVFS_PSM_DEBUG
),
187 TAB_MAP(AVFS_FUSE_OVERRIDE
),
188 TAB_MAP(PMSTATUSLOG
),
189 TAB_MAP(SMU_METRICS
),
190 TAB_MAP(DRIVER_SMU_CONFIG
),
191 TAB_MAP(ACTIVITY_MONITOR_COEFF
),
193 TAB_MAP(I2C_COMMANDS
),
197 static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map
[SMU_POWER_SOURCE_COUNT
] = {
202 static struct smu_11_0_cmn2aisc_mapping navi10_workload_map
[PP_SMC_POWER_PROFILE_COUNT
] = {
203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
, WORKLOAD_PPLIB_DEFAULT_BIT
),
204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D
, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT
),
205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING
, WORKLOAD_PPLIB_POWER_SAVING_BIT
),
206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO
, WORKLOAD_PPLIB_VIDEO_BIT
),
207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR
, WORKLOAD_PPLIB_VR_BIT
),
208 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE
, WORKLOAD_PPLIB_CUSTOM_BIT
),
209 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM
, WORKLOAD_PPLIB_CUSTOM_BIT
),
212 static int navi10_get_smu_msg_index(struct smu_context
*smc
, uint32_t index
)
214 struct smu_11_0_cmn2aisc_mapping mapping
;
216 if (index
> SMU_MSG_MAX_COUNT
)
219 mapping
= navi10_message_map
[index
];
220 if (!(mapping
.valid_mapping
)) {
224 return mapping
.map_to
;
227 static int navi10_get_smu_clk_index(struct smu_context
*smc
, uint32_t index
)
229 struct smu_11_0_cmn2aisc_mapping mapping
;
231 if (index
>= SMU_CLK_COUNT
)
234 mapping
= navi10_clk_map
[index
];
235 if (!(mapping
.valid_mapping
)) {
239 return mapping
.map_to
;
242 static int navi10_get_smu_feature_index(struct smu_context
*smc
, uint32_t index
)
244 struct smu_11_0_cmn2aisc_mapping mapping
;
246 if (index
>= SMU_FEATURE_COUNT
)
249 mapping
= navi10_feature_mask_map
[index
];
250 if (!(mapping
.valid_mapping
)) {
254 return mapping
.map_to
;
257 static int navi10_get_smu_table_index(struct smu_context
*smc
, uint32_t index
)
259 struct smu_11_0_cmn2aisc_mapping mapping
;
261 if (index
>= SMU_TABLE_COUNT
)
264 mapping
= navi10_table_map
[index
];
265 if (!(mapping
.valid_mapping
)) {
269 return mapping
.map_to
;
272 static int navi10_get_pwr_src_index(struct smu_context
*smc
, uint32_t index
)
274 struct smu_11_0_cmn2aisc_mapping mapping
;
276 if (index
>= SMU_POWER_SOURCE_COUNT
)
279 mapping
= navi10_pwr_src_map
[index
];
280 if (!(mapping
.valid_mapping
)) {
284 return mapping
.map_to
;
288 static int navi10_get_workload_type(struct smu_context
*smu
, enum PP_SMC_POWER_PROFILE profile
)
290 struct smu_11_0_cmn2aisc_mapping mapping
;
292 if (profile
> PP_SMC_POWER_PROFILE_CUSTOM
)
295 mapping
= navi10_workload_map
[profile
];
296 if (!(mapping
.valid_mapping
)) {
300 return mapping
.map_to
;
303 static bool is_asic_secure(struct smu_context
*smu
)
305 struct amdgpu_device
*adev
= smu
->adev
;
306 bool is_secure
= true;
307 uint32_t mp0_fw_intf
;
309 mp0_fw_intf
= RREG32_PCIE(MP0_Public
|
310 (smnMP0_FW_INTF
& 0xffffffff));
312 if (!(mp0_fw_intf
& (1 << 19)))
319 navi10_get_allowed_feature_mask(struct smu_context
*smu
,
320 uint32_t *feature_mask
, uint32_t num
)
322 struct amdgpu_device
*adev
= smu
->adev
;
327 memset(feature_mask
, 0, sizeof(uint32_t) * num
);
329 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT
)
330 | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT
)
331 | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT
)
332 | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT
)
333 | FEATURE_MASK(FEATURE_DPM_LINK_BIT
)
334 | FEATURE_MASK(FEATURE_GFX_ULV_BIT
)
335 | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT
)
336 | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT
)
337 | FEATURE_MASK(FEATURE_PPT_BIT
)
338 | FEATURE_MASK(FEATURE_TDC_BIT
)
339 | FEATURE_MASK(FEATURE_GFX_EDC_BIT
)
340 | FEATURE_MASK(FEATURE_VR0HOT_BIT
)
341 | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT
)
342 | FEATURE_MASK(FEATURE_THERMAL_BIT
)
343 | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT
)
344 | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT
)
345 | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT
)
346 | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT
)
347 | FEATURE_MASK(FEATURE_FW_DSTATE_BIT
)
348 | FEATURE_MASK(FEATURE_BACO_BIT
)
349 | FEATURE_MASK(FEATURE_ACDC_BIT
)
350 | FEATURE_MASK(FEATURE_GFX_SS_BIT
)
351 | FEATURE_MASK(FEATURE_APCC_DFLL_BIT
)
352 | FEATURE_MASK(FEATURE_FW_CTF_BIT
);
354 if (adev
->pm
.pp_feature
& PP_MCLK_DPM_MASK
)
355 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_DPM_UCLK_BIT
)
356 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT
)
357 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT
);
359 if (adev
->pm
.pp_feature
& PP_GFXOFF_MASK
) {
360 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_GFXOFF_BIT
);
361 /* TODO: remove it once fw fix the bug */
362 *(uint64_t *)feature_mask
&= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT
);
365 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_MMHUB
)
366 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_MMHUB_PG_BIT
);
368 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_ATHUB
)
369 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_ATHUB_PG_BIT
);
371 if (smu
->adev
->pg_flags
& AMD_PG_SUPPORT_VCN
)
372 *(uint64_t *)feature_mask
|= FEATURE_MASK(FEATURE_VCN_PG_BIT
);
374 /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
375 if (is_asic_secure(smu
)) {
376 /* only for navi10 A0 */
377 if ((adev
->asic_type
== CHIP_NAVI10
) &&
378 (adev
->rev_id
== 0)) {
379 *(uint64_t *)feature_mask
&=
380 ~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT
)
381 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT
)
382 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT
));
383 *(uint64_t *)feature_mask
&=
384 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT
);
391 static int navi10_check_powerplay_table(struct smu_context
*smu
)
396 static int navi10_append_powerplay_table(struct smu_context
*smu
)
398 struct amdgpu_device
*adev
= smu
->adev
;
399 struct smu_table_context
*table_context
= &smu
->smu_table
;
400 PPTable_t
*smc_pptable
= table_context
->driver_pptable
;
401 struct atom_smc_dpm_info_v4_5
*smc_dpm_table
;
404 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
407 ret
= smu_get_atom_data_table(smu
, index
, NULL
, NULL
, NULL
,
408 (uint8_t **)&smc_dpm_table
);
412 memcpy(smc_pptable
->I2cControllers
, smc_dpm_table
->I2cControllers
,
413 sizeof(I2cControllerConfig_t
) * NUM_I2C_CONTROLLERS
);
415 /* SVI2 Board Parameters */
416 smc_pptable
->MaxVoltageStepGfx
= smc_dpm_table
->MaxVoltageStepGfx
;
417 smc_pptable
->MaxVoltageStepSoc
= smc_dpm_table
->MaxVoltageStepSoc
;
418 smc_pptable
->VddGfxVrMapping
= smc_dpm_table
->VddGfxVrMapping
;
419 smc_pptable
->VddSocVrMapping
= smc_dpm_table
->VddSocVrMapping
;
420 smc_pptable
->VddMem0VrMapping
= smc_dpm_table
->VddMem0VrMapping
;
421 smc_pptable
->VddMem1VrMapping
= smc_dpm_table
->VddMem1VrMapping
;
422 smc_pptable
->GfxUlvPhaseSheddingMask
= smc_dpm_table
->GfxUlvPhaseSheddingMask
;
423 smc_pptable
->SocUlvPhaseSheddingMask
= smc_dpm_table
->SocUlvPhaseSheddingMask
;
424 smc_pptable
->ExternalSensorPresent
= smc_dpm_table
->ExternalSensorPresent
;
425 smc_pptable
->Padding8_V
= smc_dpm_table
->Padding8_V
;
427 /* Telemetry Settings */
428 smc_pptable
->GfxMaxCurrent
= smc_dpm_table
->GfxMaxCurrent
;
429 smc_pptable
->GfxOffset
= smc_dpm_table
->GfxOffset
;
430 smc_pptable
->Padding_TelemetryGfx
= smc_dpm_table
->Padding_TelemetryGfx
;
431 smc_pptable
->SocMaxCurrent
= smc_dpm_table
->SocMaxCurrent
;
432 smc_pptable
->SocOffset
= smc_dpm_table
->SocOffset
;
433 smc_pptable
->Padding_TelemetrySoc
= smc_dpm_table
->Padding_TelemetrySoc
;
434 smc_pptable
->Mem0MaxCurrent
= smc_dpm_table
->Mem0MaxCurrent
;
435 smc_pptable
->Mem0Offset
= smc_dpm_table
->Mem0Offset
;
436 smc_pptable
->Padding_TelemetryMem0
= smc_dpm_table
->Padding_TelemetryMem0
;
437 smc_pptable
->Mem1MaxCurrent
= smc_dpm_table
->Mem1MaxCurrent
;
438 smc_pptable
->Mem1Offset
= smc_dpm_table
->Mem1Offset
;
439 smc_pptable
->Padding_TelemetryMem1
= smc_dpm_table
->Padding_TelemetryMem1
;
442 smc_pptable
->AcDcGpio
= smc_dpm_table
->AcDcGpio
;
443 smc_pptable
->AcDcPolarity
= smc_dpm_table
->AcDcPolarity
;
444 smc_pptable
->VR0HotGpio
= smc_dpm_table
->VR0HotGpio
;
445 smc_pptable
->VR0HotPolarity
= smc_dpm_table
->VR0HotPolarity
;
446 smc_pptable
->VR1HotGpio
= smc_dpm_table
->VR1HotGpio
;
447 smc_pptable
->VR1HotPolarity
= smc_dpm_table
->VR1HotPolarity
;
448 smc_pptable
->GthrGpio
= smc_dpm_table
->GthrGpio
;
449 smc_pptable
->GthrPolarity
= smc_dpm_table
->GthrPolarity
;
451 /* LED Display Settings */
452 smc_pptable
->LedPin0
= smc_dpm_table
->LedPin0
;
453 smc_pptable
->LedPin1
= smc_dpm_table
->LedPin1
;
454 smc_pptable
->LedPin2
= smc_dpm_table
->LedPin2
;
455 smc_pptable
->padding8_4
= smc_dpm_table
->padding8_4
;
457 /* GFXCLK PLL Spread Spectrum */
458 smc_pptable
->PllGfxclkSpreadEnabled
= smc_dpm_table
->PllGfxclkSpreadEnabled
;
459 smc_pptable
->PllGfxclkSpreadPercent
= smc_dpm_table
->PllGfxclkSpreadPercent
;
460 smc_pptable
->PllGfxclkSpreadFreq
= smc_dpm_table
->PllGfxclkSpreadFreq
;
462 /* GFXCLK DFLL Spread Spectrum */
463 smc_pptable
->DfllGfxclkSpreadEnabled
= smc_dpm_table
->DfllGfxclkSpreadEnabled
;
464 smc_pptable
->DfllGfxclkSpreadPercent
= smc_dpm_table
->DfllGfxclkSpreadPercent
;
465 smc_pptable
->DfllGfxclkSpreadFreq
= smc_dpm_table
->DfllGfxclkSpreadFreq
;
467 /* UCLK Spread Spectrum */
468 smc_pptable
->UclkSpreadEnabled
= smc_dpm_table
->UclkSpreadEnabled
;
469 smc_pptable
->UclkSpreadPercent
= smc_dpm_table
->UclkSpreadPercent
;
470 smc_pptable
->UclkSpreadFreq
= smc_dpm_table
->UclkSpreadFreq
;
472 /* SOCCLK Spread Spectrum */
473 smc_pptable
->SoclkSpreadEnabled
= smc_dpm_table
->SoclkSpreadEnabled
;
474 smc_pptable
->SocclkSpreadPercent
= smc_dpm_table
->SocclkSpreadPercent
;
475 smc_pptable
->SocclkSpreadFreq
= smc_dpm_table
->SocclkSpreadFreq
;
477 /* Total board power */
478 smc_pptable
->TotalBoardPower
= smc_dpm_table
->TotalBoardPower
;
479 smc_pptable
->BoardPadding
= smc_dpm_table
->BoardPadding
;
481 /* Mvdd Svi2 Div Ratio Setting */
482 smc_pptable
->MvddRatio
= smc_dpm_table
->MvddRatio
;
484 if (adev
->pm
.pp_feature
& PP_GFXOFF_MASK
) {
485 /* TODO: remove it once SMU fw fix it */
486 smc_pptable
->DebugOverrides
|= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN
;
492 static int navi10_store_powerplay_table(struct smu_context
*smu
)
494 struct smu_11_0_powerplay_table
*powerplay_table
= NULL
;
495 struct smu_table_context
*table_context
= &smu
->smu_table
;
496 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
498 if (!table_context
->power_play_table
)
501 powerplay_table
= table_context
->power_play_table
;
503 memcpy(table_context
->driver_pptable
, &powerplay_table
->smc_pptable
,
506 table_context
->thermal_controller_type
= powerplay_table
->thermal_controller_type
;
508 mutex_lock(&smu_baco
->mutex
);
509 if (powerplay_table
->platform_caps
& SMU_11_0_PP_PLATFORM_CAP_BACO
||
510 powerplay_table
->platform_caps
& SMU_11_0_PP_PLATFORM_CAP_MACO
)
511 smu_baco
->platform_support
= true;
512 mutex_unlock(&smu_baco
->mutex
);
517 static int navi10_tables_init(struct smu_context
*smu
, struct smu_table
*tables
)
519 struct smu_table_context
*smu_table
= &smu
->smu_table
;
521 SMU_TABLE_INIT(tables
, SMU_TABLE_PPTABLE
, sizeof(PPTable_t
),
522 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
523 SMU_TABLE_INIT(tables
, SMU_TABLE_WATERMARKS
, sizeof(Watermarks_t
),
524 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
525 SMU_TABLE_INIT(tables
, SMU_TABLE_SMU_METRICS
, sizeof(SmuMetrics_t
),
526 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
527 SMU_TABLE_INIT(tables
, SMU_TABLE_OVERDRIVE
, sizeof(OverDriveTable_t
),
528 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
529 SMU_TABLE_INIT(tables
, SMU_TABLE_PMSTATUSLOG
, SMU11_TOOL_SIZE
,
530 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
531 SMU_TABLE_INIT(tables
, SMU_TABLE_ACTIVITY_MONITOR_COEFF
,
532 sizeof(DpmActivityMonitorCoeffInt_t
), PAGE_SIZE
,
533 AMDGPU_GEM_DOMAIN_VRAM
);
535 smu_table
->metrics_table
= kzalloc(sizeof(SmuMetrics_t
), GFP_KERNEL
);
536 if (!smu_table
->metrics_table
)
538 smu_table
->metrics_time
= 0;
543 static int navi10_get_metrics_table(struct smu_context
*smu
,
544 SmuMetrics_t
*metrics_table
)
546 struct smu_table_context
*smu_table
= &smu
->smu_table
;
549 if (!smu_table
->metrics_time
|| time_after(jiffies
, smu_table
->metrics_time
+ HZ
/ 1000)) {
550 ret
= smu_update_table(smu
, SMU_TABLE_SMU_METRICS
, 0,
551 (void *)smu_table
->metrics_table
, false);
553 pr_info("Failed to export SMU metrics table!\n");
556 smu_table
->metrics_time
= jiffies
;
559 memcpy(metrics_table
, smu_table
->metrics_table
, sizeof(SmuMetrics_t
));
564 static int navi10_allocate_dpm_context(struct smu_context
*smu
)
566 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
568 if (smu_dpm
->dpm_context
)
571 smu_dpm
->dpm_context
= kzalloc(sizeof(struct smu_11_0_dpm_context
),
573 if (!smu_dpm
->dpm_context
)
576 smu_dpm
->dpm_context_size
= sizeof(struct smu_11_0_dpm_context
);
581 static int navi10_set_default_dpm_table(struct smu_context
*smu
)
583 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
584 struct smu_table_context
*table_context
= &smu
->smu_table
;
585 struct smu_11_0_dpm_context
*dpm_context
= smu_dpm
->dpm_context
;
586 PPTable_t
*driver_ppt
= NULL
;
588 driver_ppt
= table_context
->driver_pptable
;
590 dpm_context
->dpm_tables
.soc_table
.min
= driver_ppt
->FreqTableSocclk
[0];
591 dpm_context
->dpm_tables
.soc_table
.max
= driver_ppt
->FreqTableSocclk
[NUM_SOCCLK_DPM_LEVELS
- 1];
593 dpm_context
->dpm_tables
.gfx_table
.min
= driver_ppt
->FreqTableGfx
[0];
594 dpm_context
->dpm_tables
.gfx_table
.max
= driver_ppt
->FreqTableGfx
[NUM_GFXCLK_DPM_LEVELS
- 1];
596 dpm_context
->dpm_tables
.uclk_table
.min
= driver_ppt
->FreqTableUclk
[0];
597 dpm_context
->dpm_tables
.uclk_table
.max
= driver_ppt
->FreqTableUclk
[NUM_UCLK_DPM_LEVELS
- 1];
599 dpm_context
->dpm_tables
.vclk_table
.min
= driver_ppt
->FreqTableVclk
[0];
600 dpm_context
->dpm_tables
.vclk_table
.max
= driver_ppt
->FreqTableVclk
[NUM_VCLK_DPM_LEVELS
- 1];
602 dpm_context
->dpm_tables
.dclk_table
.min
= driver_ppt
->FreqTableDclk
[0];
603 dpm_context
->dpm_tables
.dclk_table
.max
= driver_ppt
->FreqTableDclk
[NUM_DCLK_DPM_LEVELS
- 1];
605 dpm_context
->dpm_tables
.dcef_table
.min
= driver_ppt
->FreqTableDcefclk
[0];
606 dpm_context
->dpm_tables
.dcef_table
.max
= driver_ppt
->FreqTableDcefclk
[NUM_DCEFCLK_DPM_LEVELS
- 1];
608 dpm_context
->dpm_tables
.pixel_table
.min
= driver_ppt
->FreqTablePixclk
[0];
609 dpm_context
->dpm_tables
.pixel_table
.max
= driver_ppt
->FreqTablePixclk
[NUM_PIXCLK_DPM_LEVELS
- 1];
611 dpm_context
->dpm_tables
.display_table
.min
= driver_ppt
->FreqTableDispclk
[0];
612 dpm_context
->dpm_tables
.display_table
.max
= driver_ppt
->FreqTableDispclk
[NUM_DISPCLK_DPM_LEVELS
- 1];
614 dpm_context
->dpm_tables
.phy_table
.min
= driver_ppt
->FreqTablePhyclk
[0];
615 dpm_context
->dpm_tables
.phy_table
.max
= driver_ppt
->FreqTablePhyclk
[NUM_PHYCLK_DPM_LEVELS
- 1];
620 static int navi10_dpm_set_uvd_enable(struct smu_context
*smu
, bool enable
)
622 struct smu_power_context
*smu_power
= &smu
->smu_power
;
623 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
627 /* vcn dpm on is a prerequisite for vcn power gate messages */
628 if (smu_feature_is_enabled(smu
, SMU_FEATURE_VCN_PG_BIT
)) {
629 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerUpVcn
, 1);
633 power_gate
->vcn_gated
= false;
635 if (smu_feature_is_enabled(smu
, SMU_FEATURE_VCN_PG_BIT
)) {
636 ret
= smu_send_smc_msg(smu
, SMU_MSG_PowerDownVcn
);
640 power_gate
->vcn_gated
= true;
646 static int navi10_get_current_clk_freq_by_table(struct smu_context
*smu
,
647 enum smu_clk_type clk_type
,
650 int ret
= 0, clk_id
= 0;
651 SmuMetrics_t metrics
;
653 ret
= navi10_get_metrics_table(smu
, &metrics
);
657 clk_id
= smu_clk_get_index(smu
, clk_type
);
661 *value
= metrics
.CurrClock
[clk_id
];
666 static bool navi10_is_support_fine_grained_dpm(struct smu_context
*smu
, enum smu_clk_type clk_type
)
668 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
669 DpmDescriptor_t
*dpm_desc
= NULL
;
670 uint32_t clk_index
= 0;
672 clk_index
= smu_clk_get_index(smu
, clk_type
);
673 dpm_desc
= &pptable
->DpmDescriptor
[clk_index
];
675 /* 0 - Fine grained DPM, 1 - Discrete DPM */
676 return dpm_desc
->SnapToDiscrete
== 0 ? true : false;
679 static int navi10_print_clk_levels(struct smu_context
*smu
,
680 enum smu_clk_type clk_type
, char *buf
)
682 int i
, size
= 0, ret
= 0;
683 uint32_t cur_value
= 0, value
= 0, count
= 0;
684 uint32_t freq_values
[3] = {0};
685 uint32_t mark_index
= 0;
695 ret
= smu_get_current_clk_freq(smu
, clk_type
, &cur_value
);
700 cur_value
= cur_value
/ 100;
702 ret
= smu_get_dpm_level_count(smu
, clk_type
, &count
);
706 if (!navi10_is_support_fine_grained_dpm(smu
, clk_type
)) {
707 for (i
= 0; i
< count
; i
++) {
708 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, i
, &value
);
712 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n", i
, value
,
713 cur_value
== value
? "*" : "");
716 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, 0, &freq_values
[0]);
719 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, count
- 1, &freq_values
[2]);
723 freq_values
[1] = cur_value
;
724 mark_index
= cur_value
== freq_values
[0] ? 0 :
725 cur_value
== freq_values
[2] ? 2 : 1;
727 freq_values
[1] = (freq_values
[0] + freq_values
[2]) / 2;
729 for (i
= 0; i
< 3; i
++) {
730 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n", i
, freq_values
[i
],
731 i
== mark_index
? "*" : "");
743 static int navi10_force_clk_levels(struct smu_context
*smu
,
744 enum smu_clk_type clk_type
, uint32_t mask
)
747 int ret
= 0, size
= 0;
748 uint32_t soft_min_level
= 0, soft_max_level
= 0, min_freq
= 0, max_freq
= 0;
750 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
751 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
761 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, soft_min_level
, &min_freq
);
765 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, soft_max_level
, &max_freq
);
769 ret
= smu_set_soft_freq_range(smu
, clk_type
, min_freq
, max_freq
);
780 static int navi10_populate_umd_state_clk(struct smu_context
*smu
)
783 uint32_t min_sclk_freq
= 0, min_mclk_freq
= 0;
785 ret
= smu_get_dpm_freq_range(smu
, SMU_SCLK
, &min_sclk_freq
, NULL
);
789 smu
->pstate_sclk
= min_sclk_freq
* 100;
791 ret
= smu_get_dpm_freq_range(smu
, SMU_MCLK
, &min_mclk_freq
, NULL
);
795 smu
->pstate_mclk
= min_mclk_freq
* 100;
800 static int navi10_get_clock_by_type_with_latency(struct smu_context
*smu
,
801 enum smu_clk_type clk_type
,
802 struct pp_clock_levels_with_latency
*clocks
)
805 uint32_t level_count
= 0, freq
= 0;
811 ret
= smu_get_dpm_level_count(smu
, clk_type
, &level_count
);
815 level_count
= min(level_count
, (uint32_t)MAX_NUM_CLOCKS
);
816 clocks
->num_levels
= level_count
;
818 for (i
= 0; i
< level_count
; i
++) {
819 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, i
, &freq
);
823 clocks
->data
[i
].clocks_in_khz
= freq
* 1000;
824 clocks
->data
[i
].latency_in_us
= 0;
834 static int navi10_pre_display_config_changed(struct smu_context
*smu
)
837 uint32_t max_freq
= 0;
839 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_NumOfDisplays
, 0);
843 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
844 ret
= smu_get_dpm_freq_range(smu
, SMU_UCLK
, NULL
, &max_freq
);
847 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, 0, max_freq
);
855 static int navi10_display_config_changed(struct smu_context
*smu
)
859 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
860 !(smu
->watermarks_bitmap
& WATERMARKS_LOADED
)) {
861 ret
= smu_write_watermarks_table(smu
);
865 smu
->watermarks_bitmap
|= WATERMARKS_LOADED
;
868 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
869 smu_feature_is_supported(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
) &&
870 smu_feature_is_supported(smu
, SMU_FEATURE_DPM_SOCCLK_BIT
)) {
871 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_NumOfDisplays
,
872 smu
->display_config
->num_display
);
880 static int navi10_force_dpm_limit_value(struct smu_context
*smu
, bool highest
)
883 uint32_t min_freq
, max_freq
, force_freq
;
884 enum smu_clk_type clk_type
;
886 enum smu_clk_type clks
[] = {
892 for (i
= 0; i
< ARRAY_SIZE(clks
); i
++) {
894 ret
= smu_get_dpm_freq_range(smu
, clk_type
, &min_freq
, &max_freq
);
898 force_freq
= highest
? max_freq
: min_freq
;
899 ret
= smu_set_soft_freq_range(smu
, clk_type
, force_freq
, force_freq
);
907 static int navi10_unforce_dpm_levels(struct smu_context
*smu
)
910 uint32_t min_freq
, max_freq
;
911 enum smu_clk_type clk_type
;
913 enum smu_clk_type clks
[] = {
919 for (i
= 0; i
< ARRAY_SIZE(clks
); i
++) {
921 ret
= smu_get_dpm_freq_range(smu
, clk_type
, &min_freq
, &max_freq
);
925 ret
= smu_set_soft_freq_range(smu
, clk_type
, min_freq
, max_freq
);
933 static int navi10_get_gpu_power(struct smu_context
*smu
, uint32_t *value
)
936 SmuMetrics_t metrics
;
941 ret
= navi10_get_metrics_table(smu
, &metrics
);
947 *value
= metrics
.AverageSocketPower
<< 8;
952 static int navi10_get_current_activity_percent(struct smu_context
*smu
,
953 enum amd_pp_sensors sensor
,
957 SmuMetrics_t metrics
;
962 ret
= navi10_get_metrics_table(smu
, &metrics
);
967 case AMDGPU_PP_SENSOR_GPU_LOAD
:
968 *value
= metrics
.AverageGfxActivity
;
970 case AMDGPU_PP_SENSOR_MEM_LOAD
:
971 *value
= metrics
.AverageUclkActivity
;
974 pr_err("Invalid sensor for retrieving clock activity\n");
981 static bool navi10_is_dpm_running(struct smu_context
*smu
)
984 uint32_t feature_mask
[2];
985 unsigned long feature_enabled
;
986 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
987 feature_enabled
= (unsigned long)((uint64_t)feature_mask
[0] |
988 ((uint64_t)feature_mask
[1] << 32));
989 return !!(feature_enabled
& SMC_DPM_FEATURE
);
992 static int navi10_get_fan_speed_rpm(struct smu_context
*smu
,
995 SmuMetrics_t metrics
;
1001 ret
= navi10_get_metrics_table(smu
, &metrics
);
1007 *speed
= metrics
.CurrFanSpeed
;
1012 static int navi10_get_fan_speed_percent(struct smu_context
*smu
,
1016 uint32_t percent
= 0;
1017 uint32_t current_rpm
;
1018 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
1020 ret
= navi10_get_fan_speed_rpm(smu
, ¤t_rpm
);
1024 percent
= current_rpm
* 100 / pptable
->FanMaximumRpm
;
1025 *speed
= percent
> 100 ? 100 : percent
;
1030 static int navi10_get_power_profile_mode(struct smu_context
*smu
, char *buf
)
1032 DpmActivityMonitorCoeffInt_t activity_monitor
;
1033 uint32_t i
, size
= 0;
1034 int16_t workload_type
= 0;
1035 static const char *profile_name
[] = {
1043 static const char *title
[] = {
1044 "PROFILE_INDEX(NAME)",
1048 "MinActiveFreqType",
1053 "PD_Data_error_coeff",
1054 "PD_Data_error_rate_coeff"};
1060 size
+= sprintf(buf
+ size
, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1061 title
[0], title
[1], title
[2], title
[3], title
[4], title
[5],
1062 title
[6], title
[7], title
[8], title
[9], title
[10]);
1064 for (i
= 0; i
<= PP_SMC_POWER_PROFILE_CUSTOM
; i
++) {
1065 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1066 workload_type
= smu_workload_get_type(smu
, i
);
1067 if (workload_type
< 0)
1070 result
= smu_update_table(smu
,
1071 SMU_TABLE_ACTIVITY_MONITOR_COEFF
, workload_type
,
1072 (void *)(&activity_monitor
), false);
1074 pr_err("[%s] Failed to get activity monitor!", __func__
);
1078 size
+= sprintf(buf
+ size
, "%2d %14s%s:\n",
1079 i
, profile_name
[i
], (i
== smu
->power_profile_mode
) ? "*" : " ");
1081 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1085 activity_monitor
.Gfx_FPS
,
1086 activity_monitor
.Gfx_MinFreqStep
,
1087 activity_monitor
.Gfx_MinActiveFreqType
,
1088 activity_monitor
.Gfx_MinActiveFreq
,
1089 activity_monitor
.Gfx_BoosterFreqType
,
1090 activity_monitor
.Gfx_BoosterFreq
,
1091 activity_monitor
.Gfx_PD_Data_limit_c
,
1092 activity_monitor
.Gfx_PD_Data_error_coeff
,
1093 activity_monitor
.Gfx_PD_Data_error_rate_coeff
);
1095 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1099 activity_monitor
.Soc_FPS
,
1100 activity_monitor
.Soc_MinFreqStep
,
1101 activity_monitor
.Soc_MinActiveFreqType
,
1102 activity_monitor
.Soc_MinActiveFreq
,
1103 activity_monitor
.Soc_BoosterFreqType
,
1104 activity_monitor
.Soc_BoosterFreq
,
1105 activity_monitor
.Soc_PD_Data_limit_c
,
1106 activity_monitor
.Soc_PD_Data_error_coeff
,
1107 activity_monitor
.Soc_PD_Data_error_rate_coeff
);
1109 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1113 activity_monitor
.Mem_FPS
,
1114 activity_monitor
.Mem_MinFreqStep
,
1115 activity_monitor
.Mem_MinActiveFreqType
,
1116 activity_monitor
.Mem_MinActiveFreq
,
1117 activity_monitor
.Mem_BoosterFreqType
,
1118 activity_monitor
.Mem_BoosterFreq
,
1119 activity_monitor
.Mem_PD_Data_limit_c
,
1120 activity_monitor
.Mem_PD_Data_error_coeff
,
1121 activity_monitor
.Mem_PD_Data_error_rate_coeff
);
1127 static int navi10_set_power_profile_mode(struct smu_context
*smu
, long *input
, uint32_t size
)
1129 DpmActivityMonitorCoeffInt_t activity_monitor
;
1130 int workload_type
, ret
= 0;
1132 smu
->power_profile_mode
= input
[size
];
1134 if (smu
->power_profile_mode
> PP_SMC_POWER_PROFILE_CUSTOM
) {
1135 pr_err("Invalid power profile mode %d\n", smu
->power_profile_mode
);
1139 if (smu
->power_profile_mode
== PP_SMC_POWER_PROFILE_CUSTOM
) {
1143 ret
= smu_update_table(smu
,
1144 SMU_TABLE_ACTIVITY_MONITOR_COEFF
, WORKLOAD_PPLIB_CUSTOM_BIT
,
1145 (void *)(&activity_monitor
), false);
1147 pr_err("[%s] Failed to get activity monitor!", __func__
);
1152 case 0: /* Gfxclk */
1153 activity_monitor
.Gfx_FPS
= input
[1];
1154 activity_monitor
.Gfx_MinFreqStep
= input
[2];
1155 activity_monitor
.Gfx_MinActiveFreqType
= input
[3];
1156 activity_monitor
.Gfx_MinActiveFreq
= input
[4];
1157 activity_monitor
.Gfx_BoosterFreqType
= input
[5];
1158 activity_monitor
.Gfx_BoosterFreq
= input
[6];
1159 activity_monitor
.Gfx_PD_Data_limit_c
= input
[7];
1160 activity_monitor
.Gfx_PD_Data_error_coeff
= input
[8];
1161 activity_monitor
.Gfx_PD_Data_error_rate_coeff
= input
[9];
1163 case 1: /* Socclk */
1164 activity_monitor
.Soc_FPS
= input
[1];
1165 activity_monitor
.Soc_MinFreqStep
= input
[2];
1166 activity_monitor
.Soc_MinActiveFreqType
= input
[3];
1167 activity_monitor
.Soc_MinActiveFreq
= input
[4];
1168 activity_monitor
.Soc_BoosterFreqType
= input
[5];
1169 activity_monitor
.Soc_BoosterFreq
= input
[6];
1170 activity_monitor
.Soc_PD_Data_limit_c
= input
[7];
1171 activity_monitor
.Soc_PD_Data_error_coeff
= input
[8];
1172 activity_monitor
.Soc_PD_Data_error_rate_coeff
= input
[9];
1175 activity_monitor
.Mem_FPS
= input
[1];
1176 activity_monitor
.Mem_MinFreqStep
= input
[2];
1177 activity_monitor
.Mem_MinActiveFreqType
= input
[3];
1178 activity_monitor
.Mem_MinActiveFreq
= input
[4];
1179 activity_monitor
.Mem_BoosterFreqType
= input
[5];
1180 activity_monitor
.Mem_BoosterFreq
= input
[6];
1181 activity_monitor
.Mem_PD_Data_limit_c
= input
[7];
1182 activity_monitor
.Mem_PD_Data_error_coeff
= input
[8];
1183 activity_monitor
.Mem_PD_Data_error_rate_coeff
= input
[9];
1187 ret
= smu_update_table(smu
,
1188 SMU_TABLE_ACTIVITY_MONITOR_COEFF
, WORKLOAD_PPLIB_CUSTOM_BIT
,
1189 (void *)(&activity_monitor
), true);
1191 pr_err("[%s] Failed to set activity monitor!", __func__
);
1196 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1197 workload_type
= smu_workload_get_type(smu
, smu
->power_profile_mode
);
1198 if (workload_type
< 0)
1200 smu_send_smc_msg_with_param(smu
, SMU_MSG_SetWorkloadMask
,
1201 1 << workload_type
);
1206 static int navi10_get_profiling_clk_mask(struct smu_context
*smu
,
1207 enum amd_dpm_forced_level level
,
1208 uint32_t *sclk_mask
,
1209 uint32_t *mclk_mask
,
1213 uint32_t level_count
= 0;
1215 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
1218 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
1221 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
1223 ret
= smu_get_dpm_level_count(smu
, SMU_SCLK
, &level_count
);
1226 *sclk_mask
= level_count
- 1;
1230 ret
= smu_get_dpm_level_count(smu
, SMU_MCLK
, &level_count
);
1233 *mclk_mask
= level_count
- 1;
1237 ret
= smu_get_dpm_level_count(smu
, SMU_SOCCLK
, &level_count
);
1240 *soc_mask
= level_count
- 1;
1247 static int navi10_notify_smc_dispaly_config(struct smu_context
*smu
)
1249 struct smu_clocks min_clocks
= {0};
1250 struct pp_display_clock_request clock_req
;
1253 min_clocks
.dcef_clock
= smu
->display_config
->min_dcef_set_clk
;
1254 min_clocks
.dcef_clock_in_sr
= smu
->display_config
->min_dcef_deep_sleep_set_clk
;
1255 min_clocks
.memory_clock
= smu
->display_config
->min_mem_set_clock
;
1257 if (smu_feature_is_supported(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
)) {
1258 clock_req
.clock_type
= amd_pp_dcef_clock
;
1259 clock_req
.clock_freq_in_khz
= min_clocks
.dcef_clock
* 10;
1260 if (!smu_display_clock_voltage_request(smu
, &clock_req
)) {
1261 if (smu_feature_is_supported(smu
, SMU_FEATURE_DS_DCEFCLK_BIT
)) {
1262 ret
= smu_send_smc_msg_with_param(smu
,
1263 SMU_MSG_SetMinDeepSleepDcefclk
,
1264 min_clocks
.dcef_clock_in_sr
/100);
1266 pr_err("Attempt to set divider for DCEFCLK Failed!");
1271 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1275 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
1276 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, min_clocks
.memory_clock
/100, 0);
1278 pr_err("[%s] Set hard min uclk failed!", __func__
);
1286 static int navi10_set_watermarks_table(struct smu_context
*smu
,
1287 void *watermarks
, struct
1288 dm_pp_wm_sets_with_clock_ranges_soc15
1292 Watermarks_t
*table
= watermarks
;
1294 if (!table
|| !clock_ranges
)
1297 if (clock_ranges
->num_wm_dmif_sets
> 4 ||
1298 clock_ranges
->num_wm_mcif_sets
> 4)
1301 for (i
= 0; i
< clock_ranges
->num_wm_dmif_sets
; i
++) {
1302 table
->WatermarkRow
[1][i
].MinClock
=
1303 cpu_to_le16((uint16_t)
1304 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_min_dcfclk_clk_in_khz
/
1306 table
->WatermarkRow
[1][i
].MaxClock
=
1307 cpu_to_le16((uint16_t)
1308 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_max_dcfclk_clk_in_khz
/
1310 table
->WatermarkRow
[1][i
].MinUclk
=
1311 cpu_to_le16((uint16_t)
1312 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_min_mem_clk_in_khz
/
1314 table
->WatermarkRow
[1][i
].MaxUclk
=
1315 cpu_to_le16((uint16_t)
1316 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_max_mem_clk_in_khz
/
1318 table
->WatermarkRow
[1][i
].WmSetting
= (uint8_t)
1319 clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_set_id
;
1322 for (i
= 0; i
< clock_ranges
->num_wm_mcif_sets
; i
++) {
1323 table
->WatermarkRow
[0][i
].MinClock
=
1324 cpu_to_le16((uint16_t)
1325 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_min_socclk_clk_in_khz
/
1327 table
->WatermarkRow
[0][i
].MaxClock
=
1328 cpu_to_le16((uint16_t)
1329 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_max_socclk_clk_in_khz
/
1331 table
->WatermarkRow
[0][i
].MinUclk
=
1332 cpu_to_le16((uint16_t)
1333 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_min_mem_clk_in_khz
/
1335 table
->WatermarkRow
[0][i
].MaxUclk
=
1336 cpu_to_le16((uint16_t)
1337 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_max_mem_clk_in_khz
/
1339 table
->WatermarkRow
[0][i
].WmSetting
= (uint8_t)
1340 clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_set_id
;
1346 static int navi10_thermal_get_temperature(struct smu_context
*smu
,
1347 enum amd_pp_sensors sensor
,
1350 SmuMetrics_t metrics
;
1356 ret
= navi10_get_metrics_table(smu
, &metrics
);
1361 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
1362 *value
= metrics
.TemperatureHotspot
*
1363 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1365 case AMDGPU_PP_SENSOR_EDGE_TEMP
:
1366 *value
= metrics
.TemperatureEdge
*
1367 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1369 case AMDGPU_PP_SENSOR_MEM_TEMP
:
1370 *value
= metrics
.TemperatureMem
*
1371 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1374 pr_err("Invalid sensor for retrieving temp\n");
1381 static int navi10_read_sensor(struct smu_context
*smu
,
1382 enum amd_pp_sensors sensor
,
1383 void *data
, uint32_t *size
)
1386 struct smu_table_context
*table_context
= &smu
->smu_table
;
1387 PPTable_t
*pptable
= table_context
->driver_pptable
;
1390 case AMDGPU_PP_SENSOR_MAX_FAN_RPM
:
1391 *(uint32_t *)data
= pptable
->FanMaximumRpm
;
1394 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1395 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1396 ret
= navi10_get_current_activity_percent(smu
, sensor
, (uint32_t *)data
);
1399 case AMDGPU_PP_SENSOR_GPU_POWER
:
1400 ret
= navi10_get_gpu_power(smu
, (uint32_t *)data
);
1403 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
1404 case AMDGPU_PP_SENSOR_EDGE_TEMP
:
1405 case AMDGPU_PP_SENSOR_MEM_TEMP
:
1406 ret
= navi10_thermal_get_temperature(smu
, sensor
, (uint32_t *)data
);
1416 static int navi10_get_uclk_dpm_states(struct smu_context
*smu
, uint32_t *clocks_in_khz
, uint32_t *num_states
)
1418 uint32_t num_discrete_levels
= 0;
1419 uint16_t *dpm_levels
= NULL
;
1421 struct smu_table_context
*table_context
= &smu
->smu_table
;
1422 PPTable_t
*driver_ppt
= NULL
;
1424 if (!clocks_in_khz
|| !num_states
|| !table_context
->driver_pptable
)
1427 driver_ppt
= table_context
->driver_pptable
;
1428 num_discrete_levels
= driver_ppt
->DpmDescriptor
[PPCLK_UCLK
].NumDiscreteLevels
;
1429 dpm_levels
= driver_ppt
->FreqTableUclk
;
1431 if (num_discrete_levels
== 0 || dpm_levels
== NULL
)
1434 *num_states
= num_discrete_levels
;
1435 for (i
= 0; i
< num_discrete_levels
; i
++) {
1436 /* convert to khz */
1437 *clocks_in_khz
= (*dpm_levels
) * 1000;
1445 static int navi10_set_peak_clock_by_device(struct smu_context
*smu
)
1447 struct amdgpu_device
*adev
= smu
->adev
;
1449 uint32_t sclk_freq
= 0, uclk_freq
= 0;
1450 uint32_t uclk_level
= 0;
1452 switch (adev
->pdev
->revision
) {
1453 case 0xf0: /* XTX */
1455 sclk_freq
= NAVI10_PEAK_SCLK_XTX
;
1459 sclk_freq
= NAVI10_PEAK_SCLK_XT
;
1462 sclk_freq
= NAVI10_PEAK_SCLK_XL
;
1466 ret
= smu_get_dpm_level_count(smu
, SMU_UCLK
, &uclk_level
);
1469 ret
= smu_get_dpm_freq_by_index(smu
, SMU_UCLK
, uclk_level
- 1, &uclk_freq
);
1473 ret
= smu_set_soft_freq_range(smu
, SMU_SCLK
, sclk_freq
, sclk_freq
);
1476 ret
= smu_set_soft_freq_range(smu
, SMU_UCLK
, uclk_freq
, uclk_freq
);
1483 static int navi10_set_performance_level(struct smu_context
*smu
, enum amd_dpm_forced_level level
)
1488 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1489 ret
= navi10_set_peak_clock_by_device(smu
);
1499 static int navi10_get_thermal_temperature_range(struct smu_context
*smu
,
1500 struct smu_temperature_range
*range
)
1502 struct smu_table_context
*table_context
= &smu
->smu_table
;
1503 struct smu_11_0_powerplay_table
*powerplay_table
= table_context
->power_play_table
;
1505 if (!range
|| !powerplay_table
)
1508 /* The unit is temperature */
1510 range
->max
= powerplay_table
->software_shutdown_temp
;
1515 static int navi10_display_disable_memory_clock_switch(struct smu_context
*smu
,
1516 bool disable_memory_clock_switch
)
1519 struct smu_11_0_max_sustainable_clocks
*max_sustainable_clocks
=
1520 (struct smu_11_0_max_sustainable_clocks
*)
1521 smu
->smu_table
.max_sustainable_clocks
;
1522 uint32_t min_memory_clock
= smu
->hard_min_uclk_req_from_dal
;
1523 uint32_t max_memory_clock
= max_sustainable_clocks
->uclock
;
1525 if(smu
->disable_uclk_switch
== disable_memory_clock_switch
)
1528 if(disable_memory_clock_switch
)
1529 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, max_memory_clock
, 0);
1531 ret
= smu_set_hard_freq_range(smu
, SMU_UCLK
, min_memory_clock
, 0);
1534 smu
->disable_uclk_switch
= disable_memory_clock_switch
;
1539 static int navi10_get_power_limit(struct smu_context
*smu
,
1543 PPTable_t
*pptable
= smu
->smu_table
.driver_pptable
;
1544 uint32_t asic_default_power_limit
= 0;
1548 if (!smu
->default_power_limit
||
1549 !smu
->power_limit
) {
1550 if (smu_feature_is_enabled(smu
, SMU_FEATURE_PPT_BIT
)) {
1551 power_src
= smu_power_get_index(smu
, SMU_POWER_SOURCE_AC
);
1555 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetPptLimit
,
1558 pr_err("[%s] get PPT limit failed!", __func__
);
1561 smu_read_smc_arg(smu
, &asic_default_power_limit
);
1563 /* the last hope to figure out the ppt limit */
1565 pr_err("Cannot get PPT limit due to pptable missing!");
1568 asic_default_power_limit
=
1569 pptable
->SocketPowerLimitAc
[PPT_THROTTLER_PPT0
];
1572 if (smu
->od_enabled
) {
1573 asic_default_power_limit
*= (100 + smu
->smu_table
.TDPODLimit
);
1574 asic_default_power_limit
/= 100;
1577 smu
->default_power_limit
= asic_default_power_limit
;
1578 smu
->power_limit
= asic_default_power_limit
;
1582 *limit
= smu
->default_power_limit
;
1584 *limit
= smu
->power_limit
;
1589 static const struct pptable_funcs navi10_ppt_funcs
= {
1590 .tables_init
= navi10_tables_init
,
1591 .alloc_dpm_context
= navi10_allocate_dpm_context
,
1592 .store_powerplay_table
= navi10_store_powerplay_table
,
1593 .check_powerplay_table
= navi10_check_powerplay_table
,
1594 .append_powerplay_table
= navi10_append_powerplay_table
,
1595 .get_smu_msg_index
= navi10_get_smu_msg_index
,
1596 .get_smu_clk_index
= navi10_get_smu_clk_index
,
1597 .get_smu_feature_index
= navi10_get_smu_feature_index
,
1598 .get_smu_table_index
= navi10_get_smu_table_index
,
1599 .get_smu_power_index
= navi10_get_pwr_src_index
,
1600 .get_workload_type
= navi10_get_workload_type
,
1601 .get_allowed_feature_mask
= navi10_get_allowed_feature_mask
,
1602 .set_default_dpm_table
= navi10_set_default_dpm_table
,
1603 .dpm_set_uvd_enable
= navi10_dpm_set_uvd_enable
,
1604 .get_current_clk_freq_by_table
= navi10_get_current_clk_freq_by_table
,
1605 .print_clk_levels
= navi10_print_clk_levels
,
1606 .force_clk_levels
= navi10_force_clk_levels
,
1607 .populate_umd_state_clk
= navi10_populate_umd_state_clk
,
1608 .get_clock_by_type_with_latency
= navi10_get_clock_by_type_with_latency
,
1609 .pre_display_config_changed
= navi10_pre_display_config_changed
,
1610 .display_config_changed
= navi10_display_config_changed
,
1611 .notify_smc_dispaly_config
= navi10_notify_smc_dispaly_config
,
1612 .force_dpm_limit_value
= navi10_force_dpm_limit_value
,
1613 .unforce_dpm_levels
= navi10_unforce_dpm_levels
,
1614 .is_dpm_running
= navi10_is_dpm_running
,
1615 .get_fan_speed_percent
= navi10_get_fan_speed_percent
,
1616 .get_fan_speed_rpm
= navi10_get_fan_speed_rpm
,
1617 .get_power_profile_mode
= navi10_get_power_profile_mode
,
1618 .set_power_profile_mode
= navi10_set_power_profile_mode
,
1619 .get_profiling_clk_mask
= navi10_get_profiling_clk_mask
,
1620 .set_watermarks_table
= navi10_set_watermarks_table
,
1621 .read_sensor
= navi10_read_sensor
,
1622 .get_uclk_dpm_states
= navi10_get_uclk_dpm_states
,
1623 .set_performance_level
= navi10_set_performance_level
,
1624 .get_thermal_temperature_range
= navi10_get_thermal_temperature_range
,
1625 .display_disable_memory_clock_switch
= navi10_display_disable_memory_clock_switch
,
1626 .get_power_limit
= navi10_get_power_limit
,
1629 void navi10_set_ppt_funcs(struct smu_context
*smu
)
1631 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1633 smu
->ppt_funcs
= &navi10_ppt_funcs
;
1634 smu
->smc_if_version
= SMU11_DRIVER_IF_VERSION
;
1635 smu_table
->table_count
= TABLE_COUNT
;