2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_smu.h"
27 #include "smu_internal.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "smu_v12_0.h"
31 #include "soc15_common.h"
34 #include "asic_reg/mp/mp_12_0_0_offset.h"
35 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
37 #define smnMP1_FIRMWARE_FLAGS 0x3010024
39 #define mmSMUIO_GFX_MISC_CNTL 0x00c8
40 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
41 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
42 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
44 int smu_v12_0_send_msg_without_waiting(struct smu_context
*smu
,
47 struct amdgpu_device
*adev
= smu
->adev
;
49 WREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_66
, msg
);
53 int smu_v12_0_read_arg(struct smu_context
*smu
, uint32_t *arg
)
55 struct amdgpu_device
*adev
= smu
->adev
;
57 *arg
= RREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_82
);
61 int smu_v12_0_wait_for_response(struct smu_context
*smu
)
63 struct amdgpu_device
*adev
= smu
->adev
;
64 uint32_t cur_value
, i
;
66 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
67 cur_value
= RREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_90
);
68 if ((cur_value
& MP1_C2PMSG_90__CONTENT_MASK
) != 0)
69 return cur_value
== 0x1 ? 0 : -EIO
;
74 /* timeout means wrong logic */
79 smu_v12_0_send_msg_with_param(struct smu_context
*smu
,
80 enum smu_message_type msg
,
83 struct amdgpu_device
*adev
= smu
->adev
;
84 int ret
= 0, index
= 0;
86 index
= smu_msg_get_index(smu
, msg
);
90 ret
= smu_v12_0_wait_for_response(smu
);
92 pr_err("Msg issuing pre-check failed and "
93 "SMU may be not in the right state!\n");
97 WREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_90
, 0);
99 WREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_82
, param
);
101 smu_v12_0_send_msg_without_waiting(smu
, (uint16_t)index
);
103 ret
= smu_v12_0_wait_for_response(smu
);
105 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
111 int smu_v12_0_check_fw_status(struct smu_context
*smu
)
113 struct amdgpu_device
*adev
= smu
->adev
;
114 uint32_t mp1_fw_flags
;
116 mp1_fw_flags
= RREG32_PCIE(MP1_Public
|
117 (smnMP1_FIRMWARE_FLAGS
& 0xffffffff));
119 if ((mp1_fw_flags
& MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
) >>
120 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT
)
126 int smu_v12_0_check_fw_version(struct smu_context
*smu
)
128 uint32_t if_version
= 0xff, smu_version
= 0xff;
130 uint8_t smu_minor
, smu_debug
;
133 ret
= smu_get_smc_version(smu
, &if_version
, &smu_version
);
137 smu_major
= (smu_version
>> 16) & 0xffff;
138 smu_minor
= (smu_version
>> 8) & 0xff;
139 smu_debug
= (smu_version
>> 0) & 0xff;
142 * 1. if_version mismatch is not critical as our fw is designed
143 * to be backward compatible.
144 * 2. New fw usually brings some optimizations. But that's visible
145 * only on the paired driver.
146 * Considering above, we just leave user a warning message instead
147 * of halt driver loading.
149 if (if_version
!= smu
->smc_if_version
) {
150 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
151 "smu fw version = 0x%08x (%d.%d.%d)\n",
152 smu
->smc_if_version
, if_version
,
153 smu_version
, smu_major
, smu_minor
, smu_debug
);
154 pr_warn("SMU driver if version not matched\n");
160 int smu_v12_0_powergate_sdma(struct smu_context
*smu
, bool gate
)
166 return smu_send_smc_msg(smu
, SMU_MSG_PowerDownSdma
);
168 return smu_send_smc_msg(smu
, SMU_MSG_PowerUpSdma
);
171 int smu_v12_0_powergate_vcn(struct smu_context
*smu
, bool gate
)
177 return smu_send_smc_msg(smu
, SMU_MSG_PowerDownVcn
);
179 return smu_send_smc_msg(smu
, SMU_MSG_PowerUpVcn
);
182 int smu_v12_0_powergate_jpeg(struct smu_context
*smu
, bool gate
)
188 return smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerDownJpeg
, 0);
190 return smu_send_smc_msg_with_param(smu
, SMU_MSG_PowerUpJpeg
, 0);
193 int smu_v12_0_set_gfx_cgpg(struct smu_context
*smu
, bool enable
)
195 if (!(smu
->adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
))
198 return smu_v12_0_send_msg_with_param(smu
,
199 SMU_MSG_SetGfxCGPG
, enable
? 1 : 0);
202 int smu_v12_0_read_sensor(struct smu_context
*smu
,
203 enum amd_pp_sensors sensor
,
204 void *data
, uint32_t *size
)
212 case AMDGPU_PP_SENSOR_GFX_MCLK
:
213 ret
= smu_get_current_clk_freq(smu
, SMU_UCLK
, (uint32_t *)data
);
216 case AMDGPU_PP_SENSOR_GFX_SCLK
:
217 ret
= smu_get_current_clk_freq(smu
, SMU_GFXCLK
, (uint32_t *)data
);
220 case AMDGPU_PP_SENSOR_MIN_FAN_RPM
:
221 *(uint32_t *)data
= 0;
225 ret
= smu_common_read_sensor(smu
, sensor
, data
, size
);
236 * smu_v12_0_get_gfxoff_status - get gfxoff status
238 * @smu: amdgpu_device pointer
240 * This function will be used to get gfxoff status
242 * Returns 0=GFXOFF(default).
243 * Returns 1=Transition out of GFX State.
244 * Returns 2=Not in GFXOFF.
245 * Returns 3=Transition into GFXOFF.
247 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context
*smu
)
250 uint32_t gfxOff_Status
= 0;
251 struct amdgpu_device
*adev
= smu
->adev
;
253 reg
= RREG32_SOC15(SMUIO
, 0, mmSMUIO_GFX_MISC_CNTL
);
254 gfxOff_Status
= (reg
& SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK
)
255 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT
;
257 return gfxOff_Status
;
260 int smu_v12_0_gfx_off_control(struct smu_context
*smu
, bool enable
)
262 int ret
= 0, timeout
= 500;
265 ret
= smu_send_smc_msg(smu
, SMU_MSG_AllowGfxOff
);
268 ret
= smu_send_smc_msg(smu
, SMU_MSG_DisallowGfxOff
);
270 /* confirm gfx is back to "on" state, timeout is 0.5 second */
271 while (!(smu_v12_0_get_gfxoff_status(smu
) == 2)) {
275 DRM_ERROR("disable gfxoff timeout and failed!\n");
284 int smu_v12_0_init_smc_tables(struct smu_context
*smu
)
286 struct smu_table_context
*smu_table
= &smu
->smu_table
;
287 struct smu_table
*tables
= NULL
;
289 if (smu_table
->tables
)
292 tables
= kcalloc(SMU_TABLE_COUNT
, sizeof(struct smu_table
),
297 smu_table
->tables
= tables
;
299 return smu_tables_init(smu
, tables
);
302 int smu_v12_0_fini_smc_tables(struct smu_context
*smu
)
304 struct smu_table_context
*smu_table
= &smu
->smu_table
;
306 if (!smu_table
->tables
)
309 kfree(smu_table
->clocks_table
);
310 kfree(smu_table
->tables
);
312 smu_table
->clocks_table
= NULL
;
313 smu_table
->tables
= NULL
;
318 int smu_v12_0_populate_smc_tables(struct smu_context
*smu
)
320 struct smu_table_context
*smu_table
= &smu
->smu_table
;
322 return smu_update_table(smu
, SMU_TABLE_DPMCLOCKS
, 0, smu_table
->clocks_table
, false);
325 int smu_v12_0_get_enabled_mask(struct smu_context
*smu
,
326 uint32_t *feature_mask
, uint32_t num
)
328 uint32_t feature_mask_high
= 0, feature_mask_low
= 0;
331 if (!feature_mask
|| num
< 2)
334 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetEnabledSmuFeaturesHigh
);
337 ret
= smu_read_smc_arg(smu
, &feature_mask_high
);
341 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetEnabledSmuFeaturesLow
);
344 ret
= smu_read_smc_arg(smu
, &feature_mask_low
);
348 feature_mask
[0] = feature_mask_low
;
349 feature_mask
[1] = feature_mask_high
;
354 int smu_v12_0_get_current_clk_freq(struct smu_context
*smu
,
355 enum smu_clk_type clk_id
,
361 if (clk_id
>= SMU_CLK_COUNT
|| !value
)
364 ret
= smu_get_current_clk_freq_by_table(smu
, clk_id
, &freq
);
374 int smu_v12_0_get_dpm_ultimate_freq(struct smu_context
*smu
, enum smu_clk_type clk_type
,
375 uint32_t *min
, uint32_t *max
)
378 uint32_t mclk_mask
, soc_mask
;
381 ret
= smu_get_profiling_clk_mask(smu
, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
,
391 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetMaxGfxclkFrequency
);
393 pr_err("Attempt to get max GX frequency from SMC Failed !\n");
396 ret
= smu_read_smc_arg(smu
, max
);
403 ret
= smu_get_dpm_clk_limited(smu
, clk_type
, mclk_mask
, max
);
408 ret
= smu_get_dpm_clk_limited(smu
, clk_type
, soc_mask
, max
);
422 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetMinGfxclkFrequency
);
424 pr_err("Attempt to get min GX frequency from SMC Failed !\n");
427 ret
= smu_read_smc_arg(smu
, min
);
434 ret
= smu_get_dpm_clk_limited(smu
, clk_type
, 0, min
);
439 ret
= smu_get_dpm_clk_limited(smu
, clk_type
, 0, min
);
452 int smu_v12_0_mode2_reset(struct smu_context
*smu
){
453 return smu_v12_0_send_msg_with_param(smu
, SMU_MSG_GfxDeviceDriverReset
, SMU_RESET_MODE_2
);
456 int smu_v12_0_set_soft_freq_limited_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
457 uint32_t min
, uint32_t max
)
467 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinGfxClk
, min
);
471 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxGfxClk
, max
);
477 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinFclkByFreq
, min
);
481 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxFclkByFreq
, max
);
486 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinSocclkByFreq
, min
);
490 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxSocclkByFreq
, max
);
495 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinVcn
, min
);
499 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxVcn
, max
);
510 int smu_v12_0_set_driver_table_location(struct smu_context
*smu
)
512 struct smu_table
*driver_table
= &smu
->smu_table
.driver_table
;
515 if (driver_table
->mc_address
) {
516 ret
= smu_send_smc_msg_with_param(smu
,
517 SMU_MSG_SetDriverDramAddrHigh
,
518 upper_32_bits(driver_table
->mc_address
));
520 ret
= smu_send_smc_msg_with_param(smu
,
521 SMU_MSG_SetDriverDramAddrLow
,
522 lower_32_bits(driver_table
->mc_address
));