2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_smu.h"
26 #include "smu_internal.h"
27 #include "soc15_common.h"
28 #include "smu_v12_0_ppsmc.h"
29 #include "smu12_driver_if.h"
30 #include "smu_v12_0.h"
31 #include "renoir_ppt.h"
34 #define CLK_MAP(clk, index) \
35 [SMU_##clk] = {1, (index)}
37 #define MSG_MAP(msg, index) \
38 [SMU_MSG_##msg] = {1, (index)}
40 #define TAB_MAP_VALID(tab) \
41 [SMU_TABLE_##tab] = {1, TABLE_##tab}
43 #define TAB_MAP_INVALID(tab) \
44 [SMU_TABLE_##tab] = {0, TABLE_##tab}
46 static struct smu_12_0_cmn2aisc_mapping renoir_message_map
[SMU_MSG_MAX_COUNT
] = {
47 MSG_MAP(TestMessage
, PPSMC_MSG_TestMessage
),
48 MSG_MAP(GetSmuVersion
, PPSMC_MSG_GetSmuVersion
),
49 MSG_MAP(GetDriverIfVersion
, PPSMC_MSG_GetDriverIfVersion
),
50 MSG_MAP(PowerUpGfx
, PPSMC_MSG_PowerUpGfx
),
51 MSG_MAP(AllowGfxOff
, PPSMC_MSG_EnableGfxOff
),
52 MSG_MAP(DisallowGfxOff
, PPSMC_MSG_DisableGfxOff
),
53 MSG_MAP(PowerDownIspByTile
, PPSMC_MSG_PowerDownIspByTile
),
54 MSG_MAP(PowerUpIspByTile
, PPSMC_MSG_PowerUpIspByTile
),
55 MSG_MAP(PowerDownVcn
, PPSMC_MSG_PowerDownVcn
),
56 MSG_MAP(PowerUpVcn
, PPSMC_MSG_PowerUpVcn
),
57 MSG_MAP(PowerDownSdma
, PPSMC_MSG_PowerDownSdma
),
58 MSG_MAP(PowerUpSdma
, PPSMC_MSG_PowerUpSdma
),
59 MSG_MAP(SetHardMinIspclkByFreq
, PPSMC_MSG_SetHardMinIspclkByFreq
),
60 MSG_MAP(SetHardMinVcn
, PPSMC_MSG_SetHardMinVcn
),
61 MSG_MAP(Spare1
, PPSMC_MSG_spare1
),
62 MSG_MAP(Spare2
, PPSMC_MSG_spare2
),
63 MSG_MAP(SetAllowFclkSwitch
, PPSMC_MSG_SetAllowFclkSwitch
),
64 MSG_MAP(SetMinVideoGfxclkFreq
, PPSMC_MSG_SetMinVideoGfxclkFreq
),
65 MSG_MAP(ActiveProcessNotify
, PPSMC_MSG_ActiveProcessNotify
),
66 MSG_MAP(SetCustomPolicy
, PPSMC_MSG_SetCustomPolicy
),
67 MSG_MAP(SetVideoFps
, PPSMC_MSG_SetVideoFps
),
68 MSG_MAP(NumOfDisplays
, PPSMC_MSG_SetDisplayCount
),
69 MSG_MAP(QueryPowerLimit
, PPSMC_MSG_QueryPowerLimit
),
70 MSG_MAP(SetDriverDramAddrHigh
, PPSMC_MSG_SetDriverDramAddrHigh
),
71 MSG_MAP(SetDriverDramAddrLow
, PPSMC_MSG_SetDriverDramAddrLow
),
72 MSG_MAP(TransferTableSmu2Dram
, PPSMC_MSG_TransferTableSmu2Dram
),
73 MSG_MAP(TransferTableDram2Smu
, PPSMC_MSG_TransferTableDram2Smu
),
74 MSG_MAP(GfxDeviceDriverReset
, PPSMC_MSG_GfxDeviceDriverReset
),
75 MSG_MAP(SetGfxclkOverdriveByFreqVid
, PPSMC_MSG_SetGfxclkOverdriveByFreqVid
),
76 MSG_MAP(SetHardMinDcfclkByFreq
, PPSMC_MSG_SetHardMinDcfclkByFreq
),
77 MSG_MAP(SetHardMinSocclkByFreq
, PPSMC_MSG_SetHardMinSocclkByFreq
),
78 MSG_MAP(ControlIgpuATS
, PPSMC_MSG_ControlIgpuATS
),
79 MSG_MAP(SetMinVideoFclkFreq
, PPSMC_MSG_SetMinVideoFclkFreq
),
80 MSG_MAP(SetMinDeepSleepDcfclk
, PPSMC_MSG_SetMinDeepSleepDcfclk
),
81 MSG_MAP(ForcePowerDownGfx
, PPSMC_MSG_ForcePowerDownGfx
),
82 MSG_MAP(SetPhyclkVoltageByFreq
, PPSMC_MSG_SetPhyclkVoltageByFreq
),
83 MSG_MAP(SetDppclkVoltageByFreq
, PPSMC_MSG_SetDppclkVoltageByFreq
),
84 MSG_MAP(SetSoftMinVcn
, PPSMC_MSG_SetSoftMinVcn
),
85 MSG_MAP(EnablePostCode
, PPSMC_MSG_EnablePostCode
),
86 MSG_MAP(GetGfxclkFrequency
, PPSMC_MSG_GetGfxclkFrequency
),
87 MSG_MAP(GetFclkFrequency
, PPSMC_MSG_GetFclkFrequency
),
88 MSG_MAP(GetMinGfxclkFrequency
, PPSMC_MSG_GetMinGfxclkFrequency
),
89 MSG_MAP(GetMaxGfxclkFrequency
, PPSMC_MSG_GetMaxGfxclkFrequency
),
90 MSG_MAP(SoftReset
, PPSMC_MSG_SoftReset
),
91 MSG_MAP(SetGfxCGPG
, PPSMC_MSG_SetGfxCGPG
),
92 MSG_MAP(SetSoftMaxGfxClk
, PPSMC_MSG_SetSoftMaxGfxClk
),
93 MSG_MAP(SetHardMinGfxClk
, PPSMC_MSG_SetHardMinGfxClk
),
94 MSG_MAP(SetSoftMaxSocclkByFreq
, PPSMC_MSG_SetSoftMaxSocclkByFreq
),
95 MSG_MAP(SetSoftMaxFclkByFreq
, PPSMC_MSG_SetSoftMaxFclkByFreq
),
96 MSG_MAP(SetSoftMaxVcn
, PPSMC_MSG_SetSoftMaxVcn
),
97 MSG_MAP(PowerGateMmHub
, PPSMC_MSG_PowerGateMmHub
),
98 MSG_MAP(UpdatePmeRestore
, PPSMC_MSG_UpdatePmeRestore
),
99 MSG_MAP(GpuChangeState
, PPSMC_MSG_GpuChangeState
),
100 MSG_MAP(SetPowerLimitPercentage
, PPSMC_MSG_SetPowerLimitPercentage
),
101 MSG_MAP(ForceGfxContentSave
, PPSMC_MSG_ForceGfxContentSave
),
102 MSG_MAP(EnableTmdp48MHzRefclkPwrDown
, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown
),
103 MSG_MAP(PowerDownJpeg
, PPSMC_MSG_PowerDownJpeg
),
104 MSG_MAP(PowerUpJpeg
, PPSMC_MSG_PowerUpJpeg
),
105 MSG_MAP(PowerGateAtHub
, PPSMC_MSG_PowerGateAtHub
),
106 MSG_MAP(SetSoftMinJpeg
, PPSMC_MSG_SetSoftMinJpeg
),
107 MSG_MAP(SetHardMinFclkByFreq
, PPSMC_MSG_SetHardMinFclkByFreq
),
110 static struct smu_12_0_cmn2aisc_mapping renoir_clk_map
[SMU_CLK_COUNT
] = {
111 CLK_MAP(GFXCLK
, CLOCK_GFXCLK
),
112 CLK_MAP(SCLK
, CLOCK_GFXCLK
),
113 CLK_MAP(SOCCLK
, CLOCK_SOCCLK
),
114 CLK_MAP(UCLK
, CLOCK_UMCCLK
),
115 CLK_MAP(MCLK
, CLOCK_UMCCLK
),
118 static struct smu_12_0_cmn2aisc_mapping renoir_table_map
[SMU_TABLE_COUNT
] = {
119 TAB_MAP_VALID(WATERMARKS
),
120 TAB_MAP_INVALID(CUSTOM_DPM
),
121 TAB_MAP_VALID(DPMCLOCKS
),
122 TAB_MAP_VALID(SMU_METRICS
),
125 static int renoir_get_smu_msg_index(struct smu_context
*smc
, uint32_t index
)
127 struct smu_12_0_cmn2aisc_mapping mapping
;
129 if (index
>= SMU_MSG_MAX_COUNT
)
132 mapping
= renoir_message_map
[index
];
133 if (!(mapping
.valid_mapping
))
136 return mapping
.map_to
;
139 static int renoir_get_smu_clk_index(struct smu_context
*smc
, uint32_t index
)
141 struct smu_12_0_cmn2aisc_mapping mapping
;
143 if (index
>= SMU_CLK_COUNT
)
146 mapping
= renoir_clk_map
[index
];
147 if (!(mapping
.valid_mapping
)) {
151 return mapping
.map_to
;
154 static int renoir_get_smu_table_index(struct smu_context
*smc
, uint32_t index
)
156 struct smu_12_0_cmn2aisc_mapping mapping
;
158 if (index
>= SMU_TABLE_COUNT
)
161 mapping
= renoir_table_map
[index
];
162 if (!(mapping
.valid_mapping
))
165 return mapping
.map_to
;
168 static int renoir_get_metrics_table(struct smu_context
*smu
,
169 SmuMetrics_t
*metrics_table
)
171 struct smu_table_context
*smu_table
= &smu
->smu_table
;
174 mutex_lock(&smu
->metrics_lock
);
175 if (!smu_table
->metrics_time
|| time_after(jiffies
, smu_table
->metrics_time
+ msecs_to_jiffies(100))) {
176 ret
= smu_update_table(smu
, SMU_TABLE_SMU_METRICS
, 0,
177 (void *)smu_table
->metrics_table
, false);
179 pr_info("Failed to export SMU metrics table!\n");
180 mutex_unlock(&smu
->metrics_lock
);
183 smu_table
->metrics_time
= jiffies
;
186 memcpy(metrics_table
, smu_table
->metrics_table
, sizeof(SmuMetrics_t
));
187 mutex_unlock(&smu
->metrics_lock
);
192 static int renoir_tables_init(struct smu_context
*smu
, struct smu_table
*tables
)
194 struct smu_table_context
*smu_table
= &smu
->smu_table
;
196 SMU_TABLE_INIT(tables
, SMU_TABLE_WATERMARKS
, sizeof(Watermarks_t
),
197 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
198 SMU_TABLE_INIT(tables
, SMU_TABLE_DPMCLOCKS
, sizeof(DpmClocks_t
),
199 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
200 SMU_TABLE_INIT(tables
, SMU_TABLE_SMU_METRICS
, sizeof(SmuMetrics_t
),
201 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
);
203 smu_table
->clocks_table
= kzalloc(sizeof(DpmClocks_t
), GFP_KERNEL
);
204 if (!smu_table
->clocks_table
)
207 smu_table
->metrics_table
= kzalloc(sizeof(SmuMetrics_t
), GFP_KERNEL
);
208 if (!smu_table
->metrics_table
)
210 smu_table
->metrics_time
= 0;
212 smu_table
->watermarks_table
= kzalloc(sizeof(Watermarks_t
), GFP_KERNEL
);
213 if (!smu_table
->watermarks_table
)
220 * This interface just for getting uclk ultimate freq and should't introduce
221 * other likewise function result in overmuch callback.
223 static int renoir_get_dpm_clk_limited(struct smu_context
*smu
, enum smu_clk_type clk_type
,
224 uint32_t dpm_level
, uint32_t *freq
)
226 DpmClocks_t
*clk_table
= smu
->smu_table
.clocks_table
;
228 if (!clk_table
|| clk_type
>= SMU_CLK_COUNT
)
231 GET_DPM_CUR_FREQ(clk_table
, clk_type
, dpm_level
, *freq
);
236 static int renoir_print_clk_levels(struct smu_context
*smu
,
237 enum smu_clk_type clk_type
, char *buf
)
239 int i
, size
= 0, ret
= 0;
240 uint32_t cur_value
= 0, value
= 0, count
= 0, min
= 0, max
= 0;
241 DpmClocks_t
*clk_table
= smu
->smu_table
.clocks_table
;
242 SmuMetrics_t metrics
;
244 if (!clk_table
|| clk_type
>= SMU_CLK_COUNT
)
247 memset(&metrics
, 0, sizeof(metrics
));
249 ret
= renoir_get_metrics_table(smu
, &metrics
);
256 /* retirve table returned paramters unit is MHz */
257 cur_value
= metrics
.ClockFrequency
[CLOCK_GFXCLK
];
258 ret
= smu_get_dpm_freq_range(smu
, SMU_GFXCLK
, &min
, &max
, false);
260 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
261 if (cur_value
== max
)
263 else if (cur_value
== min
)
268 size
+= sprintf(buf
+ size
, "0: %uMhz %s\n", min
,
270 size
+= sprintf(buf
+ size
, "1: %uMhz %s\n",
271 i
== 1 ? cur_value
: RENOIR_UMD_PSTATE_GFXCLK
,
273 size
+= sprintf(buf
+ size
, "2: %uMhz %s\n", max
,
278 count
= NUM_SOCCLK_DPM_LEVELS
;
279 cur_value
= metrics
.ClockFrequency
[CLOCK_SOCCLK
];
282 count
= NUM_MEMCLK_DPM_LEVELS
;
283 cur_value
= metrics
.ClockFrequency
[CLOCK_UMCCLK
];
286 count
= NUM_DCFCLK_DPM_LEVELS
;
287 cur_value
= metrics
.ClockFrequency
[CLOCK_DCFCLK
];
290 count
= NUM_FCLK_DPM_LEVELS
;
291 cur_value
= metrics
.ClockFrequency
[CLOCK_FCLK
];
297 for (i
= 0; i
< count
; i
++) {
298 GET_DPM_CUR_FREQ(clk_table
, clk_type
, i
, value
);
299 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n", i
, value
,
300 cur_value
== value
? "*" : "");
306 static enum amd_pm_state_type
renoir_get_current_power_state(struct smu_context
*smu
)
308 enum amd_pm_state_type pm_type
;
309 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
311 if (!smu_dpm_ctx
->dpm_context
||
312 !smu_dpm_ctx
->dpm_current_power_state
)
315 switch (smu_dpm_ctx
->dpm_current_power_state
->classification
.ui_label
) {
316 case SMU_STATE_UI_LABEL_BATTERY
:
317 pm_type
= POWER_STATE_TYPE_BATTERY
;
319 case SMU_STATE_UI_LABEL_BALLANCED
:
320 pm_type
= POWER_STATE_TYPE_BALANCED
;
322 case SMU_STATE_UI_LABEL_PERFORMANCE
:
323 pm_type
= POWER_STATE_TYPE_PERFORMANCE
;
326 if (smu_dpm_ctx
->dpm_current_power_state
->classification
.flags
& SMU_STATE_CLASSIFICATION_FLAG_BOOT
)
327 pm_type
= POWER_STATE_TYPE_INTERNAL_BOOT
;
329 pm_type
= POWER_STATE_TYPE_DEFAULT
;
336 static int renoir_dpm_set_uvd_enable(struct smu_context
*smu
, bool enable
)
338 struct smu_power_context
*smu_power
= &smu
->smu_power
;
339 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
343 /* vcn dpm on is a prerequisite for vcn power gate messages */
344 if (smu_feature_is_enabled(smu
, SMU_FEATURE_VCN_PG_BIT
)) {
345 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerUpVcn
, 0);
349 power_gate
->vcn_gated
= false;
351 if (smu_feature_is_enabled(smu
, SMU_FEATURE_VCN_PG_BIT
)) {
352 ret
= smu_send_smc_msg(smu
, SMU_MSG_PowerDownVcn
);
356 power_gate
->vcn_gated
= true;
362 static int renoir_dpm_set_jpeg_enable(struct smu_context
*smu
, bool enable
)
364 struct smu_power_context
*smu_power
= &smu
->smu_power
;
365 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
369 if (smu_feature_is_enabled(smu
, SMU_FEATURE_JPEG_PG_BIT
)) {
370 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerUpJpeg
, 0);
374 power_gate
->jpeg_gated
= false;
376 if (smu_feature_is_enabled(smu
, SMU_FEATURE_JPEG_PG_BIT
)) {
377 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerDownJpeg
, 0);
381 power_gate
->jpeg_gated
= true;
387 static int renoir_get_current_clk_freq_by_table(struct smu_context
*smu
,
388 enum smu_clk_type clk_type
,
391 int ret
= 0, clk_id
= 0;
392 SmuMetrics_t metrics
;
394 ret
= renoir_get_metrics_table(smu
, &metrics
);
398 clk_id
= smu_clk_get_index(smu
, clk_type
);
402 *value
= metrics
.ClockFrequency
[clk_id
];
407 static int renoir_force_dpm_limit_value(struct smu_context
*smu
, bool highest
)
410 uint32_t min_freq
, max_freq
, force_freq
;
411 enum smu_clk_type clk_type
;
413 enum smu_clk_type clks
[] = {
419 for (i
= 0; i
< ARRAY_SIZE(clks
); i
++) {
421 ret
= smu_get_dpm_freq_range(smu
, clk_type
, &min_freq
, &max_freq
, false);
425 force_freq
= highest
? max_freq
: min_freq
;
426 ret
= smu_set_soft_freq_range(smu
, clk_type
, force_freq
, force_freq
);
434 static int renoir_unforce_dpm_levels(struct smu_context
*smu
) {
437 uint32_t min_freq
, max_freq
;
438 enum smu_clk_type clk_type
;
440 struct clk_feature_map
{
441 enum smu_clk_type clk_type
;
443 } clk_feature_map
[] = {
444 {SMU_GFXCLK
, SMU_FEATURE_DPM_GFXCLK_BIT
},
445 {SMU_MCLK
, SMU_FEATURE_DPM_UCLK_BIT
},
446 {SMU_SOCCLK
, SMU_FEATURE_DPM_SOCCLK_BIT
},
449 for (i
= 0; i
< ARRAY_SIZE(clk_feature_map
); i
++) {
450 if (!smu_feature_is_enabled(smu
, clk_feature_map
[i
].feature
))
453 clk_type
= clk_feature_map
[i
].clk_type
;
455 ret
= smu_get_dpm_freq_range(smu
, clk_type
, &min_freq
, &max_freq
, false);
459 ret
= smu_set_soft_freq_range(smu
, clk_type
, min_freq
, max_freq
);
467 static int renoir_get_gpu_temperature(struct smu_context
*smu
, uint32_t *value
)
470 SmuMetrics_t metrics
;
475 ret
= renoir_get_metrics_table(smu
, &metrics
);
479 *value
= (metrics
.GfxTemperature
/ 100) *
480 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
;
485 static int renoir_get_current_activity_percent(struct smu_context
*smu
,
486 enum amd_pp_sensors sensor
,
490 SmuMetrics_t metrics
;
495 ret
= renoir_get_metrics_table(smu
, &metrics
);
500 case AMDGPU_PP_SENSOR_GPU_LOAD
:
501 *value
= metrics
.AverageGfxActivity
/ 100;
504 pr_err("Invalid sensor for retrieving clock activity\n");
511 static int renoir_get_workload_type(struct smu_context
*smu
, uint32_t profile
)
514 uint32_t pplib_workload
= 0;
517 case PP_SMC_POWER_PROFILE_FULLSCREEN3D
:
518 pplib_workload
= WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT
;
520 case PP_SMC_POWER_PROFILE_CUSTOM
:
521 pplib_workload
= WORKLOAD_PPLIB_COUNT
;
523 case PP_SMC_POWER_PROFILE_VIDEO
:
524 pplib_workload
= WORKLOAD_PPLIB_VIDEO_BIT
;
526 case PP_SMC_POWER_PROFILE_VR
:
527 pplib_workload
= WORKLOAD_PPLIB_VR_BIT
;
529 case PP_SMC_POWER_PROFILE_COMPUTE
:
530 pplib_workload
= WORKLOAD_PPLIB_COMPUTE_BIT
;
536 return pplib_workload
;
539 static int renoir_get_profiling_clk_mask(struct smu_context
*smu
,
540 enum amd_dpm_forced_level level
,
546 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
549 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
552 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
554 /* The sclk as gfxclk and has three level about max/min/current */
558 *mclk_mask
= NUM_MEMCLK_DPM_LEVELS
- 1;
561 *soc_mask
= NUM_SOCCLK_DPM_LEVELS
- 1;
568 * This interface get dpm clock table for dc
570 static int renoir_get_dpm_clock_table(struct smu_context
*smu
, struct dpm_clocks
*clock_table
)
572 DpmClocks_t
*table
= smu
->smu_table
.clocks_table
;
575 if (!clock_table
|| !table
)
578 for (i
= 0; i
< NUM_DCFCLK_DPM_LEVELS
; i
++) {
579 clock_table
->DcfClocks
[i
].Freq
= table
->DcfClocks
[i
].Freq
;
580 clock_table
->DcfClocks
[i
].Vol
= table
->DcfClocks
[i
].Vol
;
583 for (i
= 0; i
< NUM_SOCCLK_DPM_LEVELS
; i
++) {
584 clock_table
->SocClocks
[i
].Freq
= table
->SocClocks
[i
].Freq
;
585 clock_table
->SocClocks
[i
].Vol
= table
->SocClocks
[i
].Vol
;
588 for (i
= 0; i
< NUM_FCLK_DPM_LEVELS
; i
++) {
589 clock_table
->FClocks
[i
].Freq
= table
->FClocks
[i
].Freq
;
590 clock_table
->FClocks
[i
].Vol
= table
->FClocks
[i
].Vol
;
593 for (i
= 0; i
< NUM_MEMCLK_DPM_LEVELS
; i
++) {
594 clock_table
->MemClocks
[i
].Freq
= table
->MemClocks
[i
].Freq
;
595 clock_table
->MemClocks
[i
].Vol
= table
->MemClocks
[i
].Vol
;
601 static int renoir_force_clk_levels(struct smu_context
*smu
,
602 enum smu_clk_type clk_type
, uint32_t mask
)
606 uint32_t soft_min_level
= 0, soft_max_level
= 0, min_freq
= 0, max_freq
= 0;
607 DpmClocks_t
*clk_table
= smu
->smu_table
.clocks_table
;
609 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
610 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
615 if (soft_min_level
> 2 || soft_max_level
> 2) {
616 pr_info("Currently sclk only support 3 levels on APU\n");
620 ret
= smu_get_dpm_freq_range(smu
, SMU_GFXCLK
, &min_freq
, &max_freq
, false);
623 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxGfxClk
,
624 soft_max_level
== 0 ? min_freq
:
625 soft_max_level
== 1 ? RENOIR_UMD_PSTATE_GFXCLK
: max_freq
);
628 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinGfxClk
,
629 soft_min_level
== 2 ? max_freq
:
630 soft_min_level
== 1 ? RENOIR_UMD_PSTATE_GFXCLK
: min_freq
);
635 GET_DPM_CUR_FREQ(clk_table
, clk_type
, soft_min_level
, min_freq
);
636 GET_DPM_CUR_FREQ(clk_table
, clk_type
, soft_max_level
, max_freq
);
637 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxSocclkByFreq
, max_freq
);
640 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinSocclkByFreq
, min_freq
);
646 GET_DPM_CUR_FREQ(clk_table
, clk_type
, soft_min_level
, min_freq
);
647 GET_DPM_CUR_FREQ(clk_table
, clk_type
, soft_max_level
, max_freq
);
648 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxFclkByFreq
, max_freq
);
651 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinFclkByFreq
, min_freq
);
662 static int renoir_set_power_profile_mode(struct smu_context
*smu
, long *input
, uint32_t size
)
664 int workload_type
, ret
;
665 uint32_t profile_mode
= input
[size
];
667 if (profile_mode
> PP_SMC_POWER_PROFILE_CUSTOM
) {
668 pr_err("Invalid power profile mode %d\n", smu
->power_profile_mode
);
672 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
673 workload_type
= smu_workload_get_type(smu
, smu
->power_profile_mode
);
674 if (workload_type
< 0) {
675 pr_err("Unsupported power profile mode %d on RENOIR\n",smu
->power_profile_mode
);
679 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetWorkloadMask
,
682 pr_err("Fail to set workload type %d\n", workload_type
);
686 smu
->power_profile_mode
= profile_mode
;
691 static int renoir_set_peak_clock_by_device(struct smu_context
*smu
)
694 uint32_t sclk_freq
= 0, uclk_freq
= 0;
696 ret
= smu_get_dpm_freq_range(smu
, SMU_SCLK
, NULL
, &sclk_freq
, false);
700 ret
= smu_set_soft_freq_range(smu
, SMU_SCLK
, sclk_freq
, sclk_freq
);
704 ret
= smu_get_dpm_freq_range(smu
, SMU_UCLK
, NULL
, &uclk_freq
, false);
708 ret
= smu_set_soft_freq_range(smu
, SMU_UCLK
, uclk_freq
, uclk_freq
);
715 static int renoir_set_performance_level(struct smu_context
*smu
,
716 enum amd_dpm_forced_level level
)
719 uint32_t sclk_mask
, mclk_mask
, soc_mask
;
722 case AMD_DPM_FORCED_LEVEL_HIGH
:
723 ret
= smu_force_dpm_limit_value(smu
, true);
725 case AMD_DPM_FORCED_LEVEL_LOW
:
726 ret
= smu_force_dpm_limit_value(smu
, false);
728 case AMD_DPM_FORCED_LEVEL_AUTO
:
729 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
730 ret
= smu_unforce_dpm_levels(smu
);
732 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
733 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
734 ret
= smu_get_profiling_clk_mask(smu
, level
,
740 smu_force_clk_levels(smu
, SMU_SCLK
, 1 << sclk_mask
, false);
741 smu_force_clk_levels(smu
, SMU_MCLK
, 1 << mclk_mask
, false);
742 smu_force_clk_levels(smu
, SMU_SOCCLK
, 1 << soc_mask
, false);
744 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
745 ret
= renoir_set_peak_clock_by_device(smu
);
747 case AMD_DPM_FORCED_LEVEL_MANUAL
:
748 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
755 /* save watermark settings into pplib smu structure,
756 * also pass data to smu controller
758 static int renoir_set_watermarks_table(
759 struct smu_context
*smu
,
761 struct dm_pp_wm_sets_with_clock_ranges_soc15
*clock_ranges
)
765 Watermarks_t
*table
= watermarks
;
767 if (!table
|| !clock_ranges
)
770 if (clock_ranges
->num_wm_dmif_sets
> 4 ||
771 clock_ranges
->num_wm_mcif_sets
> 4)
774 /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
775 for (i
= 0; i
< clock_ranges
->num_wm_dmif_sets
; i
++) {
776 table
->WatermarkRow
[WM_DCFCLK
][i
].MinClock
=
777 cpu_to_le16((uint16_t)
778 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_min_dcfclk_clk_in_khz
));
779 table
->WatermarkRow
[WM_DCFCLK
][i
].MaxClock
=
780 cpu_to_le16((uint16_t)
781 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_max_dcfclk_clk_in_khz
));
782 table
->WatermarkRow
[WM_DCFCLK
][i
].MinMclk
=
783 cpu_to_le16((uint16_t)
784 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_min_mem_clk_in_khz
));
785 table
->WatermarkRow
[WM_DCFCLK
][i
].MaxMclk
=
786 cpu_to_le16((uint16_t)
787 (clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_max_mem_clk_in_khz
));
788 table
->WatermarkRow
[WM_DCFCLK
][i
].WmSetting
= (uint8_t)
789 clock_ranges
->wm_dmif_clocks_ranges
[i
].wm_set_id
;
792 for (i
= 0; i
< clock_ranges
->num_wm_mcif_sets
; i
++) {
793 table
->WatermarkRow
[WM_SOCCLK
][i
].MinClock
=
794 cpu_to_le16((uint16_t)
795 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_min_socclk_clk_in_khz
));
796 table
->WatermarkRow
[WM_SOCCLK
][i
].MaxClock
=
797 cpu_to_le16((uint16_t)
798 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_max_socclk_clk_in_khz
));
799 table
->WatermarkRow
[WM_SOCCLK
][i
].MinMclk
=
800 cpu_to_le16((uint16_t)
801 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_min_mem_clk_in_khz
));
802 table
->WatermarkRow
[WM_SOCCLK
][i
].MaxMclk
=
803 cpu_to_le16((uint16_t)
804 (clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_max_mem_clk_in_khz
));
805 table
->WatermarkRow
[WM_SOCCLK
][i
].WmSetting
= (uint8_t)
806 clock_ranges
->wm_mcif_clocks_ranges
[i
].wm_set_id
;
809 /* pass data to smu controller */
810 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
811 !(smu
->watermarks_bitmap
& WATERMARKS_LOADED
)) {
812 ret
= smu_write_watermarks_table(smu
);
814 pr_err("Failed to update WMTABLE!");
817 smu
->watermarks_bitmap
|= WATERMARKS_LOADED
;
823 static int renoir_get_power_profile_mode(struct smu_context
*smu
,
826 static const char *profile_name
[] = {
834 uint32_t i
, size
= 0;
835 int16_t workload_type
= 0;
837 if (!smu
->pm_enabled
|| !buf
)
840 for (i
= 0; i
<= PP_SMC_POWER_PROFILE_CUSTOM
; i
++) {
842 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
843 * Not all profile modes are supported on arcturus.
845 workload_type
= smu_workload_get_type(smu
, i
);
846 if (workload_type
< 0)
849 size
+= sprintf(buf
+ size
, "%2d %14s%s\n",
850 i
, profile_name
[i
], (i
== smu
->power_profile_mode
) ? "*" : " ");
856 static int renoir_read_sensor(struct smu_context
*smu
,
857 enum amd_pp_sensors sensor
,
858 void *data
, uint32_t *size
)
865 mutex_lock(&smu
->sensor_lock
);
867 case AMDGPU_PP_SENSOR_GPU_LOAD
:
868 ret
= renoir_get_current_activity_percent(smu
, sensor
, (uint32_t *)data
);
871 case AMDGPU_PP_SENSOR_GPU_TEMP
:
872 ret
= renoir_get_gpu_temperature(smu
, (uint32_t *)data
);
876 ret
= smu_v12_0_read_sensor(smu
, sensor
, data
, size
);
878 mutex_unlock(&smu
->sensor_lock
);
883 static const struct pptable_funcs renoir_ppt_funcs
= {
884 .get_smu_msg_index
= renoir_get_smu_msg_index
,
885 .get_smu_clk_index
= renoir_get_smu_clk_index
,
886 .get_smu_table_index
= renoir_get_smu_table_index
,
887 .tables_init
= renoir_tables_init
,
888 .set_power_state
= NULL
,
889 .get_dpm_clk_limited
= renoir_get_dpm_clk_limited
,
890 .print_clk_levels
= renoir_print_clk_levels
,
891 .get_current_power_state
= renoir_get_current_power_state
,
892 .dpm_set_uvd_enable
= renoir_dpm_set_uvd_enable
,
893 .dpm_set_jpeg_enable
= renoir_dpm_set_jpeg_enable
,
894 .get_current_clk_freq_by_table
= renoir_get_current_clk_freq_by_table
,
895 .force_dpm_limit_value
= renoir_force_dpm_limit_value
,
896 .unforce_dpm_levels
= renoir_unforce_dpm_levels
,
897 .get_workload_type
= renoir_get_workload_type
,
898 .get_profiling_clk_mask
= renoir_get_profiling_clk_mask
,
899 .force_clk_levels
= renoir_force_clk_levels
,
900 .set_power_profile_mode
= renoir_set_power_profile_mode
,
901 .set_performance_level
= renoir_set_performance_level
,
902 .get_dpm_clock_table
= renoir_get_dpm_clock_table
,
903 .set_watermarks_table
= renoir_set_watermarks_table
,
904 .get_power_profile_mode
= renoir_get_power_profile_mode
,
905 .read_sensor
= renoir_read_sensor
,
906 .check_fw_status
= smu_v12_0_check_fw_status
,
907 .check_fw_version
= smu_v12_0_check_fw_version
,
908 .powergate_sdma
= smu_v12_0_powergate_sdma
,
909 .powergate_vcn
= smu_v12_0_powergate_vcn
,
910 .powergate_jpeg
= smu_v12_0_powergate_jpeg
,
911 .send_smc_msg_with_param
= smu_v12_0_send_msg_with_param
,
912 .read_smc_arg
= smu_v12_0_read_arg
,
913 .set_gfx_cgpg
= smu_v12_0_set_gfx_cgpg
,
914 .gfx_off_control
= smu_v12_0_gfx_off_control
,
915 .init_smc_tables
= smu_v12_0_init_smc_tables
,
916 .fini_smc_tables
= smu_v12_0_fini_smc_tables
,
917 .populate_smc_tables
= smu_v12_0_populate_smc_tables
,
918 .get_enabled_mask
= smu_v12_0_get_enabled_mask
,
919 .get_current_clk_freq
= smu_v12_0_get_current_clk_freq
,
920 .get_dpm_ultimate_freq
= smu_v12_0_get_dpm_ultimate_freq
,
921 .mode2_reset
= smu_v12_0_mode2_reset
,
922 .set_soft_freq_limited_range
= smu_v12_0_set_soft_freq_limited_range
,
923 .set_driver_table_location
= smu_v12_0_set_driver_table_location
,
926 void renoir_set_ppt_funcs(struct smu_context
*smu
)
928 smu
->ppt_funcs
= &renoir_ppt_funcs
;
929 smu
->smc_if_version
= SMU12_DRIVER_IF_VERSION
;