treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / powerplay / smu_v12_0.c
blob870e6db2907eb67e43d75ee26850c173d2985960
1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_internal.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "smu_v12_0.h"
31 #include "soc15_common.h"
32 #include "atom.h"
34 #include "asic_reg/mp/mp_12_0_0_offset.h"
35 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
37 #define smnMP1_FIRMWARE_FLAGS 0x3010024
39 #define mmSMUIO_GFX_MISC_CNTL 0x00c8
40 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
41 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
42 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
44 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
45 uint16_t msg)
47 struct amdgpu_device *adev = smu->adev;
49 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
50 return 0;
53 int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
55 struct amdgpu_device *adev = smu->adev;
57 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
58 return 0;
61 int smu_v12_0_wait_for_response(struct smu_context *smu)
63 struct amdgpu_device *adev = smu->adev;
64 uint32_t cur_value, i;
66 for (i = 0; i < adev->usec_timeout; i++) {
67 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
68 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
69 return cur_value == 0x1 ? 0 : -EIO;
71 udelay(1);
74 /* timeout means wrong logic */
75 return -ETIME;
78 int
79 smu_v12_0_send_msg_with_param(struct smu_context *smu,
80 enum smu_message_type msg,
81 uint32_t param)
83 struct amdgpu_device *adev = smu->adev;
84 int ret = 0, index = 0;
86 index = smu_msg_get_index(smu, msg);
87 if (index < 0)
88 return index;
90 ret = smu_v12_0_wait_for_response(smu);
91 if (ret) {
92 pr_err("Msg issuing pre-check failed and "
93 "SMU may be not in the right state!\n");
94 return ret;
97 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
99 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
101 smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
103 ret = smu_v12_0_wait_for_response(smu);
104 if (ret)
105 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
106 index, ret, param);
108 return ret;
111 int smu_v12_0_check_fw_status(struct smu_context *smu)
113 struct amdgpu_device *adev = smu->adev;
114 uint32_t mp1_fw_flags;
116 mp1_fw_flags = RREG32_PCIE(MP1_Public |
117 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
119 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
120 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
121 return 0;
123 return -EIO;
126 int smu_v12_0_check_fw_version(struct smu_context *smu)
128 uint32_t if_version = 0xff, smu_version = 0xff;
129 uint16_t smu_major;
130 uint8_t smu_minor, smu_debug;
131 int ret = 0;
133 ret = smu_get_smc_version(smu, &if_version, &smu_version);
134 if (ret)
135 return ret;
137 smu_major = (smu_version >> 16) & 0xffff;
138 smu_minor = (smu_version >> 8) & 0xff;
139 smu_debug = (smu_version >> 0) & 0xff;
142 * 1. if_version mismatch is not critical as our fw is designed
143 * to be backward compatible.
144 * 2. New fw usually brings some optimizations. But that's visible
145 * only on the paired driver.
146 * Considering above, we just leave user a warning message instead
147 * of halt driver loading.
149 if (if_version != smu->smc_if_version) {
150 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
151 "smu fw version = 0x%08x (%d.%d.%d)\n",
152 smu->smc_if_version, if_version,
153 smu_version, smu_major, smu_minor, smu_debug);
154 pr_warn("SMU driver if version not matched\n");
157 return ret;
160 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
162 if (!smu->is_apu)
163 return 0;
165 if (gate)
166 return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
167 else
168 return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
171 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
173 if (!smu->is_apu)
174 return 0;
176 if (gate)
177 return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
178 else
179 return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
182 int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
184 if (!smu->is_apu)
185 return 0;
187 if (gate)
188 return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
189 else
190 return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
193 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
195 if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
196 return 0;
198 return smu_v12_0_send_msg_with_param(smu,
199 SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
202 int smu_v12_0_read_sensor(struct smu_context *smu,
203 enum amd_pp_sensors sensor,
204 void *data, uint32_t *size)
206 int ret = 0;
208 if(!data || !size)
209 return -EINVAL;
211 switch (sensor) {
212 case AMDGPU_PP_SENSOR_GFX_MCLK:
213 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
214 *size = 4;
215 break;
216 case AMDGPU_PP_SENSOR_GFX_SCLK:
217 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
218 *size = 4;
219 break;
220 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
221 *(uint32_t *)data = 0;
222 *size = 4;
223 break;
224 default:
225 ret = smu_common_read_sensor(smu, sensor, data, size);
226 break;
229 if (ret)
230 *size = 0;
232 return ret;
236 * smu_v12_0_get_gfxoff_status - get gfxoff status
238 * @smu: amdgpu_device pointer
240 * This function will be used to get gfxoff status
242 * Returns 0=GFXOFF(default).
243 * Returns 1=Transition out of GFX State.
244 * Returns 2=Not in GFXOFF.
245 * Returns 3=Transition into GFXOFF.
247 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
249 uint32_t reg;
250 uint32_t gfxOff_Status = 0;
251 struct amdgpu_device *adev = smu->adev;
253 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
254 gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
255 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
257 return gfxOff_Status;
260 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
262 int ret = 0, timeout = 500;
264 if (enable) {
265 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
267 } else {
268 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
270 /* confirm gfx is back to "on" state, timeout is 0.5 second */
271 while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
272 msleep(1);
273 timeout--;
274 if (timeout == 0) {
275 DRM_ERROR("disable gfxoff timeout and failed!\n");
276 break;
281 return ret;
284 int smu_v12_0_init_smc_tables(struct smu_context *smu)
286 struct smu_table_context *smu_table = &smu->smu_table;
287 struct smu_table *tables = NULL;
289 if (smu_table->tables)
290 return -EINVAL;
292 tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
293 GFP_KERNEL);
294 if (!tables)
295 return -ENOMEM;
297 smu_table->tables = tables;
299 return smu_tables_init(smu, tables);
302 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
304 struct smu_table_context *smu_table = &smu->smu_table;
306 if (!smu_table->tables)
307 return -EINVAL;
309 kfree(smu_table->clocks_table);
310 kfree(smu_table->tables);
312 smu_table->clocks_table = NULL;
313 smu_table->tables = NULL;
315 return 0;
318 int smu_v12_0_populate_smc_tables(struct smu_context *smu)
320 struct smu_table_context *smu_table = &smu->smu_table;
322 return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
325 int smu_v12_0_get_enabled_mask(struct smu_context *smu,
326 uint32_t *feature_mask, uint32_t num)
328 uint32_t feature_mask_high = 0, feature_mask_low = 0;
329 int ret = 0;
331 if (!feature_mask || num < 2)
332 return -EINVAL;
334 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
335 if (ret)
336 return ret;
337 ret = smu_read_smc_arg(smu, &feature_mask_high);
338 if (ret)
339 return ret;
341 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
342 if (ret)
343 return ret;
344 ret = smu_read_smc_arg(smu, &feature_mask_low);
345 if (ret)
346 return ret;
348 feature_mask[0] = feature_mask_low;
349 feature_mask[1] = feature_mask_high;
351 return ret;
354 int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
355 enum smu_clk_type clk_id,
356 uint32_t *value)
358 int ret = 0;
359 uint32_t freq = 0;
361 if (clk_id >= SMU_CLK_COUNT || !value)
362 return -EINVAL;
364 ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
365 if (ret)
366 return ret;
368 freq *= 100;
369 *value = freq;
371 return ret;
374 int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
375 uint32_t *min, uint32_t *max)
377 int ret = 0;
378 uint32_t mclk_mask, soc_mask;
380 if (max) {
381 ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
382 NULL,
383 &mclk_mask,
384 &soc_mask);
385 if (ret)
386 goto failed;
388 switch (clk_type) {
389 case SMU_GFXCLK:
390 case SMU_SCLK:
391 ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
392 if (ret) {
393 pr_err("Attempt to get max GX frequency from SMC Failed !\n");
394 goto failed;
396 ret = smu_read_smc_arg(smu, max);
397 if (ret)
398 goto failed;
399 break;
400 case SMU_UCLK:
401 case SMU_FCLK:
402 case SMU_MCLK:
403 ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
404 if (ret)
405 goto failed;
406 break;
407 case SMU_SOCCLK:
408 ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
409 if (ret)
410 goto failed;
411 break;
412 default:
413 ret = -EINVAL;
414 goto failed;
418 if (min) {
419 switch (clk_type) {
420 case SMU_GFXCLK:
421 case SMU_SCLK:
422 ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
423 if (ret) {
424 pr_err("Attempt to get min GX frequency from SMC Failed !\n");
425 goto failed;
427 ret = smu_read_smc_arg(smu, min);
428 if (ret)
429 goto failed;
430 break;
431 case SMU_UCLK:
432 case SMU_FCLK:
433 case SMU_MCLK:
434 ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
435 if (ret)
436 goto failed;
437 break;
438 case SMU_SOCCLK:
439 ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
440 if (ret)
441 goto failed;
442 break;
443 default:
444 ret = -EINVAL;
445 goto failed;
448 failed:
449 return ret;
452 int smu_v12_0_mode2_reset(struct smu_context *smu){
453 return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
456 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
457 uint32_t min, uint32_t max)
459 int ret = 0;
461 if (max < min)
462 return -EINVAL;
464 switch (clk_type) {
465 case SMU_GFXCLK:
466 case SMU_SCLK:
467 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
468 if (ret)
469 return ret;
471 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
472 if (ret)
473 return ret;
474 break;
475 case SMU_FCLK:
476 case SMU_MCLK:
477 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
478 if (ret)
479 return ret;
481 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
482 if (ret)
483 return ret;
484 break;
485 case SMU_SOCCLK:
486 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
487 if (ret)
488 return ret;
490 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
491 if (ret)
492 return ret;
493 break;
494 case SMU_VCLK:
495 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
496 if (ret)
497 return ret;
499 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
500 if (ret)
501 return ret;
502 break;
503 default:
504 return -EINVAL;
507 return ret;
510 int smu_v12_0_set_driver_table_location(struct smu_context *smu)
512 struct smu_table *driver_table = &smu->smu_table.driver_table;
513 int ret = 0;
515 if (driver_table->mc_address) {
516 ret = smu_send_smc_msg_with_param(smu,
517 SMU_MSG_SetDriverDramAddrHigh,
518 upper_32_bits(driver_table->mc_address));
519 if (!ret)
520 ret = smu_send_smc_msg_with_param(smu,
521 SMU_MSG_SetDriverDramAddrLow,
522 lower_32_bits(driver_table->mc_address));
525 return ret;