treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / powerplay / hwmgr / smu7_hwmgr.c
blobd70abada66bf0fadacd0e2d46b85742ffa750896
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 #include <drm/amdgpu_drm.h>
31 #include "ppatomctrl.h"
32 #include "atombios.h"
33 #include "pptable_v1_0.h"
34 #include "pppcielanes.h"
35 #include "amd_pcie_helpers.h"
36 #include "hardwaremanager.h"
37 #include "process_pptables_v1_0.h"
38 #include "cgs_common.h"
40 #include "smu7_common.h"
42 #include "hwmgr.h"
43 #include "smu7_hwmgr.h"
44 #include "smu_ucode_xfer_vi.h"
45 #include "smu7_powertune.h"
46 #include "smu7_dyn_defaults.h"
47 #include "smu7_thermal.h"
48 #include "smu7_clockpowergating.h"
49 #include "processpptables.h"
50 #include "pp_thermal.h"
51 #include "smu7_baco.h"
53 #include "ivsrcid/ivsrcid_vislands30.h"
55 #define MC_CG_ARB_FREQ_F0 0x0a
56 #define MC_CG_ARB_FREQ_F1 0x0b
57 #define MC_CG_ARB_FREQ_F2 0x0c
58 #define MC_CG_ARB_FREQ_F3 0x0d
60 #define MC_CG_SEQ_DRAMCONF_S0 0x05
61 #define MC_CG_SEQ_DRAMCONF_S1 0x06
62 #define MC_CG_SEQ_YCLK_SUSPEND 0x04
63 #define MC_CG_SEQ_YCLK_RESUME 0x0a
65 #define SMC_CG_IND_START 0xc0030000
66 #define SMC_CG_IND_END 0xc0040000
68 #define MEM_FREQ_LOW_LATENCY 25000
69 #define MEM_FREQ_HIGH_LATENCY 80000
71 #define MEM_LATENCY_HIGH 45
72 #define MEM_LATENCY_LOW 35
73 #define MEM_LATENCY_ERR 0xFFFF
75 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
76 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
77 #define MC_SEQ_MISC0_GDDR5_VALUE 5
79 #define PCIE_BUS_CLK 10000
80 #define TCLK (PCIE_BUS_CLK / 10)
82 static struct profile_mode_setting smu7_profiling[7] =
83 {{0, 0, 0, 0, 0, 0, 0, 0},
84 {1, 0, 100, 30, 1, 0, 100, 10},
85 {1, 10, 0, 30, 0, 0, 0, 0},
86 {0, 0, 0, 0, 1, 10, 16, 31},
87 {1, 0, 11, 50, 1, 0, 100, 10},
88 {1, 0, 5, 30, 0, 0, 0, 0},
89 {0, 0, 0, 0, 0, 0, 0, 0},
92 #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
94 #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
95 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
96 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
97 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
100 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
101 enum DPM_EVENT_SRC {
102 DPM_EVENT_SRC_ANALOG = 0,
103 DPM_EVENT_SRC_EXTERNAL = 1,
104 DPM_EVENT_SRC_DIGITAL = 2,
105 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
106 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
109 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
110 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
111 enum pp_clock_type type, uint32_t mask);
113 static struct smu7_power_state *cast_phw_smu7_power_state(
114 struct pp_hw_power_state *hw_ps)
116 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
117 "Invalid Powerstate Type!",
118 return NULL);
120 return (struct smu7_power_state *)hw_ps;
123 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
124 const struct pp_hw_power_state *hw_ps)
126 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
127 "Invalid Powerstate Type!",
128 return NULL);
130 return (const struct smu7_power_state *)hw_ps;
134 * Find the MC microcode version and store it in the HwMgr struct
136 * @param hwmgr the address of the powerplay hardware manager.
137 * @return always 0
139 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
141 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
143 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
145 return 0;
148 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
150 uint32_t speedCntl = 0;
152 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
153 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
154 ixPCIE_LC_SPEED_CNTL);
155 return((uint16_t)PHM_GET_FIELD(speedCntl,
156 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
159 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
161 uint32_t link_width;
163 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
164 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
165 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
167 PP_ASSERT_WITH_CODE((7 >= link_width),
168 "Invalid PCIe lane width!", return 0);
170 return decode_pcie_lane_width(link_width);
174 * Enable voltage control
176 * @param pHwMgr the address of the powerplay hardware manager.
177 * @return always PP_Result_OK
179 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
181 if (hwmgr->chip_id == CHIP_VEGAM) {
182 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
183 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
184 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
185 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
188 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
189 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
191 return 0;
195 * Checks if we want to support voltage control
197 * @param hwmgr the address of the powerplay hardware manager.
199 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
201 const struct smu7_hwmgr *data =
202 (const struct smu7_hwmgr *)(hwmgr->backend);
204 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
208 * Enable voltage control
210 * @param hwmgr the address of the powerplay hardware manager.
211 * @return always 0
213 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
215 /* enable voltage control */
216 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
217 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
219 return 0;
222 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
223 struct phm_clock_voltage_dependency_table *voltage_dependency_table
226 uint32_t i;
228 PP_ASSERT_WITH_CODE((NULL != voltage_table),
229 "Voltage Dependency Table empty.", return -EINVAL;);
231 voltage_table->mask_low = 0;
232 voltage_table->phase_delay = 0;
233 voltage_table->count = voltage_dependency_table->count;
235 for (i = 0; i < voltage_dependency_table->count; i++) {
236 voltage_table->entries[i].value =
237 voltage_dependency_table->entries[i].v;
238 voltage_table->entries[i].smio_low = 0;
241 return 0;
246 * Create Voltage Tables.
248 * @param hwmgr the address of the powerplay hardware manager.
249 * @return always 0
251 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
253 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
254 struct phm_ppt_v1_information *table_info =
255 (struct phm_ppt_v1_information *)hwmgr->pptable;
256 int result = 0;
257 uint32_t tmp;
259 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
260 result = atomctrl_get_voltage_table_v3(hwmgr,
261 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
262 &(data->mvdd_voltage_table));
263 PP_ASSERT_WITH_CODE((0 == result),
264 "Failed to retrieve MVDD table.",
265 return result);
266 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
267 if (hwmgr->pp_table_version == PP_TABLE_V1)
268 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
269 table_info->vdd_dep_on_mclk);
270 else if (hwmgr->pp_table_version == PP_TABLE_V0)
271 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
272 hwmgr->dyn_state.mvdd_dependency_on_mclk);
274 PP_ASSERT_WITH_CODE((0 == result),
275 "Failed to retrieve SVI2 MVDD table from dependency table.",
276 return result;);
279 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
280 result = atomctrl_get_voltage_table_v3(hwmgr,
281 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
282 &(data->vddci_voltage_table));
283 PP_ASSERT_WITH_CODE((0 == result),
284 "Failed to retrieve VDDCI table.",
285 return result);
286 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
287 if (hwmgr->pp_table_version == PP_TABLE_V1)
288 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
289 table_info->vdd_dep_on_mclk);
290 else if (hwmgr->pp_table_version == PP_TABLE_V0)
291 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
292 hwmgr->dyn_state.vddci_dependency_on_mclk);
293 PP_ASSERT_WITH_CODE((0 == result),
294 "Failed to retrieve SVI2 VDDCI table from dependency table.",
295 return result);
298 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
299 /* VDDGFX has only SVI2 voltage control */
300 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
301 table_info->vddgfx_lookup_table);
302 PP_ASSERT_WITH_CODE((0 == result),
303 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
307 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
308 result = atomctrl_get_voltage_table_v3(hwmgr,
309 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
310 &data->vddc_voltage_table);
311 PP_ASSERT_WITH_CODE((0 == result),
312 "Failed to retrieve VDDC table.", return result;);
313 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
315 if (hwmgr->pp_table_version == PP_TABLE_V0)
316 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
317 hwmgr->dyn_state.vddc_dependency_on_mclk);
318 else if (hwmgr->pp_table_version == PP_TABLE_V1)
319 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
320 table_info->vddc_lookup_table);
322 PP_ASSERT_WITH_CODE((0 == result),
323 "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
326 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
327 PP_ASSERT_WITH_CODE(
328 (data->vddc_voltage_table.count <= tmp),
329 "Too many voltage values for VDDC. Trimming to fit state table.",
330 phm_trim_voltage_table_to_fit_state_table(tmp,
331 &(data->vddc_voltage_table)));
333 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
334 PP_ASSERT_WITH_CODE(
335 (data->vddgfx_voltage_table.count <= tmp),
336 "Too many voltage values for VDDC. Trimming to fit state table.",
337 phm_trim_voltage_table_to_fit_state_table(tmp,
338 &(data->vddgfx_voltage_table)));
340 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
341 PP_ASSERT_WITH_CODE(
342 (data->vddci_voltage_table.count <= tmp),
343 "Too many voltage values for VDDCI. Trimming to fit state table.",
344 phm_trim_voltage_table_to_fit_state_table(tmp,
345 &(data->vddci_voltage_table)));
347 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
348 PP_ASSERT_WITH_CODE(
349 (data->mvdd_voltage_table.count <= tmp),
350 "Too many voltage values for MVDD. Trimming to fit state table.",
351 phm_trim_voltage_table_to_fit_state_table(tmp,
352 &(data->mvdd_voltage_table)));
354 return 0;
358 * Programs static screed detection parameters
360 * @param hwmgr the address of the powerplay hardware manager.
361 * @return always 0
363 static int smu7_program_static_screen_threshold_parameters(
364 struct pp_hwmgr *hwmgr)
366 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
368 /* Set static screen threshold unit */
369 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
370 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
371 data->static_screen_threshold_unit);
372 /* Set static screen threshold */
373 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
374 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
375 data->static_screen_threshold);
377 return 0;
381 * Setup display gap for glitch free memory clock switching.
383 * @param hwmgr the address of the powerplay hardware manager.
384 * @return always 0
386 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
388 uint32_t display_gap =
389 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
390 ixCG_DISPLAY_GAP_CNTL);
392 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
393 DISP_GAP, DISPLAY_GAP_IGNORE);
395 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
396 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 ixCG_DISPLAY_GAP_CNTL, display_gap);
401 return 0;
405 * Programs activity state transition voting clients
407 * @param hwmgr the address of the powerplay hardware manager.
408 * @return always 0
410 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
412 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
413 int i;
415 /* Clear reset for voting clients before enabling DPM */
416 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
417 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
418 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
419 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
421 for (i = 0; i < 8; i++)
422 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
423 ixCG_FREQ_TRAN_VOTING_0 + i * 4,
424 data->voting_rights_clients[i]);
425 return 0;
428 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
430 int i;
432 /* Reset voting clients before disabling DPM */
433 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
434 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
435 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
436 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
438 for (i = 0; i < 8; i++)
439 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
440 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
442 return 0;
445 /* Copy one arb setting to another and then switch the active set.
446 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
448 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
449 uint32_t arb_src, uint32_t arb_dest)
451 uint32_t mc_arb_dram_timing;
452 uint32_t mc_arb_dram_timing2;
453 uint32_t burst_time;
454 uint32_t mc_cg_config;
456 switch (arb_src) {
457 case MC_CG_ARB_FREQ_F0:
458 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
459 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
460 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
461 break;
462 case MC_CG_ARB_FREQ_F1:
463 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
464 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
465 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
466 break;
467 default:
468 return -EINVAL;
471 switch (arb_dest) {
472 case MC_CG_ARB_FREQ_F0:
473 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
474 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
475 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
476 break;
477 case MC_CG_ARB_FREQ_F1:
478 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
479 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
480 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
481 break;
482 default:
483 return -EINVAL;
486 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
487 mc_cg_config |= 0x0000000F;
488 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
489 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
491 return 0;
494 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
496 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
500 * Initial switch from ARB F0->F1
502 * @param hwmgr the address of the powerplay hardware manager.
503 * @return always 0
504 * This function is to be called from the SetPowerState table.
506 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
508 return smu7_copy_and_switch_arb_sets(hwmgr,
509 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
512 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
514 uint32_t tmp;
516 tmp = (cgs_read_ind_register(hwmgr->device,
517 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
518 0x0000ff00) >> 8;
520 if (tmp == MC_CG_ARB_FREQ_F0)
521 return 0;
523 return smu7_copy_and_switch_arb_sets(hwmgr,
524 tmp, MC_CG_ARB_FREQ_F0);
527 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
529 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
531 struct phm_ppt_v1_information *table_info =
532 (struct phm_ppt_v1_information *)(hwmgr->pptable);
533 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
535 uint32_t i, max_entry;
536 uint32_t tmp;
538 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
539 data->use_pcie_power_saving_levels), "No pcie performance levels!",
540 return -EINVAL);
542 if (table_info != NULL)
543 pcie_table = table_info->pcie_table;
545 if (data->use_pcie_performance_levels &&
546 !data->use_pcie_power_saving_levels) {
547 data->pcie_gen_power_saving = data->pcie_gen_performance;
548 data->pcie_lane_power_saving = data->pcie_lane_performance;
549 } else if (!data->use_pcie_performance_levels &&
550 data->use_pcie_power_saving_levels) {
551 data->pcie_gen_performance = data->pcie_gen_power_saving;
552 data->pcie_lane_performance = data->pcie_lane_power_saving;
554 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
555 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
556 tmp,
557 MAX_REGULAR_DPM_NUMBER);
559 if (pcie_table != NULL) {
560 /* max_entry is used to make sure we reserve one PCIE level
561 * for boot level (fix for A+A PSPP issue).
562 * If PCIE table from PPTable have ULV entry + 8 entries,
563 * then ignore the last entry.*/
564 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
565 for (i = 1; i < max_entry; i++) {
566 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
567 get_pcie_gen_support(data->pcie_gen_cap,
568 pcie_table->entries[i].gen_speed),
569 get_pcie_lane_support(data->pcie_lane_cap,
570 pcie_table->entries[i].lane_width));
572 data->dpm_table.pcie_speed_table.count = max_entry - 1;
573 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
574 } else {
575 /* Hardcode Pcie Table */
576 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
577 get_pcie_gen_support(data->pcie_gen_cap,
578 PP_Min_PCIEGen),
579 get_pcie_lane_support(data->pcie_lane_cap,
580 PP_Max_PCIELane));
581 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
582 get_pcie_gen_support(data->pcie_gen_cap,
583 PP_Min_PCIEGen),
584 get_pcie_lane_support(data->pcie_lane_cap,
585 PP_Max_PCIELane));
586 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
587 get_pcie_gen_support(data->pcie_gen_cap,
588 PP_Max_PCIEGen),
589 get_pcie_lane_support(data->pcie_lane_cap,
590 PP_Max_PCIELane));
591 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
592 get_pcie_gen_support(data->pcie_gen_cap,
593 PP_Max_PCIEGen),
594 get_pcie_lane_support(data->pcie_lane_cap,
595 PP_Max_PCIELane));
596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
597 get_pcie_gen_support(data->pcie_gen_cap,
598 PP_Max_PCIEGen),
599 get_pcie_lane_support(data->pcie_lane_cap,
600 PP_Max_PCIELane));
601 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
602 get_pcie_gen_support(data->pcie_gen_cap,
603 PP_Max_PCIEGen),
604 get_pcie_lane_support(data->pcie_lane_cap,
605 PP_Max_PCIELane));
607 data->dpm_table.pcie_speed_table.count = 6;
609 /* Populate last level for boot PCIE level, but do not increment count. */
610 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
611 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
612 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
613 get_pcie_gen_support(data->pcie_gen_cap,
614 PP_Max_PCIEGen),
615 data->vbios_boot_state.pcie_lane_bootup_value);
616 } else {
617 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
618 data->dpm_table.pcie_speed_table.count,
619 get_pcie_gen_support(data->pcie_gen_cap,
620 PP_Min_PCIEGen),
621 get_pcie_lane_support(data->pcie_lane_cap,
622 PP_Max_PCIELane));
624 return 0;
627 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
629 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
631 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
633 phm_reset_single_dpm_table(
634 &data->dpm_table.sclk_table,
635 smum_get_mac_definition(hwmgr,
636 SMU_MAX_LEVELS_GRAPHICS),
637 MAX_REGULAR_DPM_NUMBER);
638 phm_reset_single_dpm_table(
639 &data->dpm_table.mclk_table,
640 smum_get_mac_definition(hwmgr,
641 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
643 phm_reset_single_dpm_table(
644 &data->dpm_table.vddc_table,
645 smum_get_mac_definition(hwmgr,
646 SMU_MAX_LEVELS_VDDC),
647 MAX_REGULAR_DPM_NUMBER);
648 phm_reset_single_dpm_table(
649 &data->dpm_table.vddci_table,
650 smum_get_mac_definition(hwmgr,
651 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
653 phm_reset_single_dpm_table(
654 &data->dpm_table.mvdd_table,
655 smum_get_mac_definition(hwmgr,
656 SMU_MAX_LEVELS_MVDD),
657 MAX_REGULAR_DPM_NUMBER);
658 return 0;
661 * This function is to initialize all DPM state tables
662 * for SMU7 based on the dependency table.
663 * Dynamic state patching function will then trim these
664 * state tables to the allowed range based
665 * on the power policy or external client requests,
666 * such as UVD request, etc.
669 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
671 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
672 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
673 hwmgr->dyn_state.vddc_dependency_on_sclk;
674 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
675 hwmgr->dyn_state.vddc_dependency_on_mclk;
676 struct phm_cac_leakage_table *std_voltage_table =
677 hwmgr->dyn_state.cac_leakage_table;
678 uint32_t i;
680 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
681 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
682 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
683 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
685 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
686 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
687 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
688 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
691 /* Initialize Sclk DPM table based on allow Sclk values*/
692 data->dpm_table.sclk_table.count = 0;
694 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
695 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
696 allowed_vdd_sclk_table->entries[i].clk) {
697 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
698 allowed_vdd_sclk_table->entries[i].clk;
699 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
700 data->dpm_table.sclk_table.count++;
704 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
705 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
706 /* Initialize Mclk DPM table based on allow Mclk values */
707 data->dpm_table.mclk_table.count = 0;
708 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
709 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
710 allowed_vdd_mclk_table->entries[i].clk) {
711 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
712 allowed_vdd_mclk_table->entries[i].clk;
713 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
714 data->dpm_table.mclk_table.count++;
718 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
719 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
720 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
721 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
722 /* param1 is for corresponding std voltage */
723 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
726 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
727 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
729 if (NULL != allowed_vdd_mclk_table) {
730 /* Initialize Vddci DPM table based on allow Mclk values */
731 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
732 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
733 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
735 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
738 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
740 if (NULL != allowed_vdd_mclk_table) {
742 * Initialize MVDD DPM table based on allow Mclk
743 * values
745 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
746 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
747 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
749 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
752 return 0;
755 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
757 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
758 struct phm_ppt_v1_information *table_info =
759 (struct phm_ppt_v1_information *)(hwmgr->pptable);
760 uint32_t i;
762 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
763 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
765 if (table_info == NULL)
766 return -EINVAL;
768 dep_sclk_table = table_info->vdd_dep_on_sclk;
769 dep_mclk_table = table_info->vdd_dep_on_mclk;
771 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
772 "SCLK dependency table is missing.",
773 return -EINVAL);
774 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
775 "SCLK dependency table count is 0.",
776 return -EINVAL);
778 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
779 "MCLK dependency table is missing.",
780 return -EINVAL);
781 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
782 "MCLK dependency table count is 0",
783 return -EINVAL);
785 /* Initialize Sclk DPM table based on allow Sclk values */
786 data->dpm_table.sclk_table.count = 0;
787 for (i = 0; i < dep_sclk_table->count; i++) {
788 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
789 dep_sclk_table->entries[i].clk) {
791 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
792 dep_sclk_table->entries[i].clk;
794 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
795 (i == 0) ? true : false;
796 data->dpm_table.sclk_table.count++;
799 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
800 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
801 /* Initialize Mclk DPM table based on allow Mclk values */
802 data->dpm_table.mclk_table.count = 0;
803 for (i = 0; i < dep_mclk_table->count; i++) {
804 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
805 [data->dpm_table.mclk_table.count - 1].value !=
806 dep_mclk_table->entries[i].clk) {
807 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
808 dep_mclk_table->entries[i].clk;
809 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
810 (i == 0) ? true : false;
811 data->dpm_table.mclk_table.count++;
815 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
816 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
817 return 0;
820 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
822 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
823 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
824 struct phm_ppt_v1_information *table_info =
825 (struct phm_ppt_v1_information *)(hwmgr->pptable);
826 uint32_t i;
828 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
829 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
830 struct phm_odn_performance_level *entries;
832 if (table_info == NULL)
833 return -EINVAL;
835 dep_sclk_table = table_info->vdd_dep_on_sclk;
836 dep_mclk_table = table_info->vdd_dep_on_mclk;
838 odn_table->odn_core_clock_dpm_levels.num_of_pl =
839 data->golden_dpm_table.sclk_table.count;
840 entries = odn_table->odn_core_clock_dpm_levels.entries;
841 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
842 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
843 entries[i].enabled = true;
844 entries[i].vddc = dep_sclk_table->entries[i].vddc;
847 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
848 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
850 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
851 data->golden_dpm_table.mclk_table.count;
852 entries = odn_table->odn_memory_clock_dpm_levels.entries;
853 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
854 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
855 entries[i].enabled = true;
856 entries[i].vddc = dep_mclk_table->entries[i].vddc;
859 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
860 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
862 return 0;
865 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
867 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
868 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
869 struct phm_ppt_v1_information *table_info =
870 (struct phm_ppt_v1_information *)(hwmgr->pptable);
871 uint32_t min_vddc = 0;
872 uint32_t max_vddc = 0;
874 if (!table_info)
875 return;
877 dep_sclk_table = table_info->vdd_dep_on_sclk;
879 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
881 if (min_vddc == 0 || min_vddc > 2000
882 || min_vddc > dep_sclk_table->entries[0].vddc)
883 min_vddc = dep_sclk_table->entries[0].vddc;
885 if (max_vddc == 0 || max_vddc > 2000
886 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
887 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
889 data->odn_dpm_table.min_vddc = min_vddc;
890 data->odn_dpm_table.max_vddc = max_vddc;
893 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
895 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
896 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
897 struct phm_ppt_v1_information *table_info =
898 (struct phm_ppt_v1_information *)(hwmgr->pptable);
899 uint32_t i;
901 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
902 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
904 if (table_info == NULL)
905 return;
907 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
908 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
909 data->dpm_table.sclk_table.dpm_levels[i].value) {
910 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
911 break;
915 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
916 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
917 data->dpm_table.mclk_table.dpm_levels[i].value) {
918 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
919 break;
923 dep_table = table_info->vdd_dep_on_mclk;
924 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
926 for (i = 0; i < dep_table->count; i++) {
927 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
928 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
929 return;
933 dep_table = table_info->vdd_dep_on_sclk;
934 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
935 for (i = 0; i < dep_table->count; i++) {
936 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
937 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
938 return;
941 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
942 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
943 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
947 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
951 smu7_reset_dpm_tables(hwmgr);
953 if (hwmgr->pp_table_version == PP_TABLE_V1)
954 smu7_setup_dpm_tables_v1(hwmgr);
955 else if (hwmgr->pp_table_version == PP_TABLE_V0)
956 smu7_setup_dpm_tables_v0(hwmgr);
958 smu7_setup_default_pcie_table(hwmgr);
960 /* save a copy of the default DPM table */
961 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
962 sizeof(struct smu7_dpm_table));
964 /* initialize ODN table */
965 if (hwmgr->od_enabled) {
966 if (data->odn_dpm_table.max_vddc) {
967 smu7_check_dpm_table_updated(hwmgr);
968 } else {
969 smu7_setup_voltage_range_from_vbios(hwmgr);
970 smu7_odn_initial_default_setting(hwmgr);
973 return 0;
976 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
979 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
980 PHM_PlatformCaps_RegulatorHot))
981 return smum_send_msg_to_smc(hwmgr,
982 PPSMC_MSG_EnableVRHotGPIOInterrupt);
984 return 0;
987 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
989 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
990 SCLK_PWRMGT_OFF, 0);
991 return 0;
994 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
996 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
998 if (data->ulv_supported)
999 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
1001 return 0;
1004 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1006 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1008 if (data->ulv_supported)
1009 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
1011 return 0;
1014 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1016 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1017 PHM_PlatformCaps_SclkDeepSleep)) {
1018 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
1019 PP_ASSERT_WITH_CODE(false,
1020 "Attempt to enable Master Deep Sleep switch failed!",
1021 return -EINVAL);
1022 } else {
1023 if (smum_send_msg_to_smc(hwmgr,
1024 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1025 PP_ASSERT_WITH_CODE(false,
1026 "Attempt to disable Master Deep Sleep switch failed!",
1027 return -EINVAL);
1031 return 0;
1034 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1036 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1037 PHM_PlatformCaps_SclkDeepSleep)) {
1038 if (smum_send_msg_to_smc(hwmgr,
1039 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1040 PP_ASSERT_WITH_CODE(false,
1041 "Attempt to disable Master Deep Sleep switch failed!",
1042 return -EINVAL);
1046 return 0;
1049 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1051 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1052 uint32_t soft_register_value = 0;
1053 uint32_t handshake_disables_offset = data->soft_regs_start
1054 + smum_get_offsetof(hwmgr,
1055 SMU_SoftRegisters, HandshakeDisables);
1057 soft_register_value = cgs_read_ind_register(hwmgr->device,
1058 CGS_IND_REG__SMC, handshake_disables_offset);
1059 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1060 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1061 handshake_disables_offset, soft_register_value);
1062 return 0;
1065 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1067 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1068 uint32_t soft_register_value = 0;
1069 uint32_t handshake_disables_offset = data->soft_regs_start
1070 + smum_get_offsetof(hwmgr,
1071 SMU_SoftRegisters, HandshakeDisables);
1073 soft_register_value = cgs_read_ind_register(hwmgr->device,
1074 CGS_IND_REG__SMC, handshake_disables_offset);
1075 soft_register_value |= smum_get_mac_definition(hwmgr,
1076 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1077 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1078 handshake_disables_offset, soft_register_value);
1079 return 0;
1082 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1084 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1086 /* enable SCLK dpm */
1087 if (!data->sclk_dpm_key_disabled) {
1088 if (hwmgr->chip_id == CHIP_VEGAM)
1089 smu7_disable_sclk_vce_handshake(hwmgr);
1091 PP_ASSERT_WITH_CODE(
1092 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
1093 "Failed to enable SCLK DPM during DPM Start Function!",
1094 return -EINVAL);
1097 /* enable MCLK dpm */
1098 if (0 == data->mclk_dpm_key_disabled) {
1099 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1100 smu7_disable_handshake_uvd(hwmgr);
1102 PP_ASSERT_WITH_CODE(
1103 (0 == smum_send_msg_to_smc(hwmgr,
1104 PPSMC_MSG_MCLKDPM_Enable)),
1105 "Failed to enable MCLK DPM during DPM Start Function!",
1106 return -EINVAL);
1108 if (hwmgr->chip_family != CHIP_VEGAM)
1109 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1112 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1113 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1114 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1115 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1116 udelay(10);
1117 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1118 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1119 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1120 } else {
1121 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1122 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1123 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1124 udelay(10);
1125 if (hwmgr->chip_id == CHIP_VEGAM) {
1126 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1127 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1128 } else {
1129 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1130 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1132 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1136 return 0;
1139 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1141 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1143 /*enable general power management */
1145 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1146 GLOBAL_PWRMGT_EN, 1);
1148 /* enable sclk deep sleep */
1150 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1151 DYNAMIC_PM_EN, 1);
1153 /* prepare for PCIE DPM */
1155 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1156 data->soft_regs_start +
1157 smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1158 VoltageChangeTimeout), 0x1000);
1159 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1160 SWRST_COMMAND_1, RESETLC, 0x0);
1162 if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1163 cgs_write_register(hwmgr->device, 0x1488,
1164 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1166 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1167 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1168 return -EINVAL;
1171 /* enable PCIE dpm */
1172 if (0 == data->pcie_dpm_key_disabled) {
1173 PP_ASSERT_WITH_CODE(
1174 (0 == smum_send_msg_to_smc(hwmgr,
1175 PPSMC_MSG_PCIeDPM_Enable)),
1176 "Failed to enable pcie DPM during DPM Start Function!",
1177 return -EINVAL);
1180 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1181 PHM_PlatformCaps_Falcon_QuickTransition)) {
1182 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1183 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1184 "Failed to enable AC DC GPIO Interrupt!",
1188 return 0;
1191 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1193 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1195 /* disable SCLK dpm */
1196 if (!data->sclk_dpm_key_disabled) {
1197 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1198 "Trying to disable SCLK DPM when DPM is disabled",
1199 return 0);
1200 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
1203 /* disable MCLK dpm */
1204 if (!data->mclk_dpm_key_disabled) {
1205 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1206 "Trying to disable MCLK DPM when DPM is disabled",
1207 return 0);
1208 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
1211 return 0;
1214 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1216 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1218 /* disable general power management */
1219 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1220 GLOBAL_PWRMGT_EN, 0);
1221 /* disable sclk deep sleep */
1222 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1223 DYNAMIC_PM_EN, 0);
1225 /* disable PCIE dpm */
1226 if (!data->pcie_dpm_key_disabled) {
1227 PP_ASSERT_WITH_CODE(
1228 (smum_send_msg_to_smc(hwmgr,
1229 PPSMC_MSG_PCIeDPM_Disable) == 0),
1230 "Failed to disable pcie DPM during DPM Stop Function!",
1231 return -EINVAL);
1234 smu7_disable_sclk_mclk_dpm(hwmgr);
1236 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1237 "Trying to disable voltage DPM when DPM is disabled",
1238 return 0);
1240 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
1242 return 0;
1245 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1247 bool protection;
1248 enum DPM_EVENT_SRC src;
1250 switch (sources) {
1251 default:
1252 pr_err("Unknown throttling event sources.");
1253 /* fall through */
1254 case 0:
1255 protection = false;
1256 /* src is unused */
1257 break;
1258 case (1 << PHM_AutoThrottleSource_Thermal):
1259 protection = true;
1260 src = DPM_EVENT_SRC_DIGITAL;
1261 break;
1262 case (1 << PHM_AutoThrottleSource_External):
1263 protection = true;
1264 src = DPM_EVENT_SRC_EXTERNAL;
1265 break;
1266 case (1 << PHM_AutoThrottleSource_External) |
1267 (1 << PHM_AutoThrottleSource_Thermal):
1268 protection = true;
1269 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1270 break;
1272 /* Order matters - don't enable thermal protection for the wrong source. */
1273 if (protection) {
1274 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1275 DPM_EVENT_SRC, src);
1276 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1277 THERMAL_PROTECTION_DIS,
1278 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1279 PHM_PlatformCaps_ThermalController));
1280 } else
1281 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1282 THERMAL_PROTECTION_DIS, 1);
1285 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1286 PHM_AutoThrottleSource source)
1288 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1290 if (!(data->active_auto_throttle_sources & (1 << source))) {
1291 data->active_auto_throttle_sources |= 1 << source;
1292 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1294 return 0;
1297 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1299 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1302 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1303 PHM_AutoThrottleSource source)
1305 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1307 if (data->active_auto_throttle_sources & (1 << source)) {
1308 data->active_auto_throttle_sources &= ~(1 << source);
1309 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1311 return 0;
1314 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1316 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1319 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1321 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1322 data->pcie_performance_request = true;
1324 return 0;
1327 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1329 int tmp_result = 0;
1330 int result = 0;
1332 if (smu7_voltage_control(hwmgr)) {
1333 tmp_result = smu7_enable_voltage_control(hwmgr);
1334 PP_ASSERT_WITH_CODE(tmp_result == 0,
1335 "Failed to enable voltage control!",
1336 result = tmp_result);
1338 tmp_result = smu7_construct_voltage_tables(hwmgr);
1339 PP_ASSERT_WITH_CODE((0 == tmp_result),
1340 "Failed to construct voltage tables!",
1341 result = tmp_result);
1343 smum_initialize_mc_reg_table(hwmgr);
1345 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1346 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1347 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1348 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1350 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1351 PHM_PlatformCaps_ThermalController))
1352 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1353 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1355 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1356 PP_ASSERT_WITH_CODE((0 == tmp_result),
1357 "Failed to program static screen threshold parameters!",
1358 result = tmp_result);
1360 tmp_result = smu7_enable_display_gap(hwmgr);
1361 PP_ASSERT_WITH_CODE((0 == tmp_result),
1362 "Failed to enable display gap!", result = tmp_result);
1364 tmp_result = smu7_program_voting_clients(hwmgr);
1365 PP_ASSERT_WITH_CODE((0 == tmp_result),
1366 "Failed to program voting clients!", result = tmp_result);
1368 tmp_result = smum_process_firmware_header(hwmgr);
1369 PP_ASSERT_WITH_CODE((0 == tmp_result),
1370 "Failed to process firmware header!", result = tmp_result);
1372 if (hwmgr->chip_id != CHIP_VEGAM) {
1373 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1374 PP_ASSERT_WITH_CODE((0 == tmp_result),
1375 "Failed to initialize switch from ArbF0 to F1!",
1376 result = tmp_result);
1379 result = smu7_setup_default_dpm_tables(hwmgr);
1380 PP_ASSERT_WITH_CODE(0 == result,
1381 "Failed to setup default DPM tables!", return result);
1383 tmp_result = smum_init_smc_table(hwmgr);
1384 PP_ASSERT_WITH_CODE((0 == tmp_result),
1385 "Failed to initialize SMC table!", result = tmp_result);
1387 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1388 PP_ASSERT_WITH_CODE((0 == tmp_result),
1389 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1391 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
1393 tmp_result = smu7_enable_sclk_control(hwmgr);
1394 PP_ASSERT_WITH_CODE((0 == tmp_result),
1395 "Failed to enable SCLK control!", result = tmp_result);
1397 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1398 PP_ASSERT_WITH_CODE((0 == tmp_result),
1399 "Failed to enable voltage control!", result = tmp_result);
1401 tmp_result = smu7_enable_ulv(hwmgr);
1402 PP_ASSERT_WITH_CODE((0 == tmp_result),
1403 "Failed to enable ULV!", result = tmp_result);
1405 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1406 PP_ASSERT_WITH_CODE((0 == tmp_result),
1407 "Failed to enable deep sleep master switch!", result = tmp_result);
1409 tmp_result = smu7_enable_didt_config(hwmgr);
1410 PP_ASSERT_WITH_CODE((tmp_result == 0),
1411 "Failed to enable deep sleep master switch!", result = tmp_result);
1413 tmp_result = smu7_start_dpm(hwmgr);
1414 PP_ASSERT_WITH_CODE((0 == tmp_result),
1415 "Failed to start DPM!", result = tmp_result);
1417 tmp_result = smu7_enable_smc_cac(hwmgr);
1418 PP_ASSERT_WITH_CODE((0 == tmp_result),
1419 "Failed to enable SMC CAC!", result = tmp_result);
1421 tmp_result = smu7_enable_power_containment(hwmgr);
1422 PP_ASSERT_WITH_CODE((0 == tmp_result),
1423 "Failed to enable power containment!", result = tmp_result);
1425 tmp_result = smu7_power_control_set_level(hwmgr);
1426 PP_ASSERT_WITH_CODE((0 == tmp_result),
1427 "Failed to power control set level!", result = tmp_result);
1429 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1430 PP_ASSERT_WITH_CODE((0 == tmp_result),
1431 "Failed to enable thermal auto throttle!", result = tmp_result);
1433 tmp_result = smu7_pcie_performance_request(hwmgr);
1434 PP_ASSERT_WITH_CODE((0 == tmp_result),
1435 "pcie performance request failed!", result = tmp_result);
1437 return 0;
1440 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1442 if (!hwmgr->avfs_supported)
1443 return 0;
1445 if (enable) {
1446 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1447 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1448 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1449 hwmgr, PPSMC_MSG_EnableAvfs),
1450 "Failed to enable AVFS!",
1451 return -EINVAL);
1453 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1454 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1455 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1456 hwmgr, PPSMC_MSG_DisableAvfs),
1457 "Failed to disable AVFS!",
1458 return -EINVAL);
1461 return 0;
1464 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1466 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1468 if (!hwmgr->avfs_supported)
1469 return 0;
1471 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1472 smu7_avfs_control(hwmgr, false);
1473 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1474 smu7_avfs_control(hwmgr, false);
1475 smu7_avfs_control(hwmgr, true);
1476 } else {
1477 smu7_avfs_control(hwmgr, true);
1480 return 0;
1483 int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1485 int tmp_result, result = 0;
1487 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1488 PHM_PlatformCaps_ThermalController))
1489 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1490 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1492 tmp_result = smu7_disable_power_containment(hwmgr);
1493 PP_ASSERT_WITH_CODE((tmp_result == 0),
1494 "Failed to disable power containment!", result = tmp_result);
1496 tmp_result = smu7_disable_smc_cac(hwmgr);
1497 PP_ASSERT_WITH_CODE((tmp_result == 0),
1498 "Failed to disable SMC CAC!", result = tmp_result);
1500 tmp_result = smu7_disable_didt_config(hwmgr);
1501 PP_ASSERT_WITH_CODE((tmp_result == 0),
1502 "Failed to disable DIDT!", result = tmp_result);
1504 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1505 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1506 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1507 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1509 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1510 PP_ASSERT_WITH_CODE((tmp_result == 0),
1511 "Failed to disable thermal auto throttle!", result = tmp_result);
1513 tmp_result = smu7_avfs_control(hwmgr, false);
1514 PP_ASSERT_WITH_CODE((tmp_result == 0),
1515 "Failed to disable AVFS!", result = tmp_result);
1517 tmp_result = smu7_stop_dpm(hwmgr);
1518 PP_ASSERT_WITH_CODE((tmp_result == 0),
1519 "Failed to stop DPM!", result = tmp_result);
1521 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1522 PP_ASSERT_WITH_CODE((tmp_result == 0),
1523 "Failed to disable deep sleep master switch!", result = tmp_result);
1525 tmp_result = smu7_disable_ulv(hwmgr);
1526 PP_ASSERT_WITH_CODE((tmp_result == 0),
1527 "Failed to disable ULV!", result = tmp_result);
1529 tmp_result = smu7_clear_voting_clients(hwmgr);
1530 PP_ASSERT_WITH_CODE((tmp_result == 0),
1531 "Failed to clear voting clients!", result = tmp_result);
1533 tmp_result = smu7_reset_to_default(hwmgr);
1534 PP_ASSERT_WITH_CODE((tmp_result == 0),
1535 "Failed to reset to default!", result = tmp_result);
1537 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1538 PP_ASSERT_WITH_CODE((tmp_result == 0),
1539 "Failed to force to switch arbf0!", result = tmp_result);
1541 return result;
1544 int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1547 return 0;
1550 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1552 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1553 struct phm_ppt_v1_information *table_info =
1554 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1555 struct amdgpu_device *adev = hwmgr->adev;
1557 data->dll_default_on = false;
1558 data->mclk_dpm0_activity_target = 0xa;
1559 data->vddc_vddgfx_delta = 300;
1560 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1561 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1562 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1563 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1564 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1565 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1566 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1567 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1568 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1569 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1571 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1572 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1573 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1574 /* need to set voltage control types before EVV patching */
1575 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1576 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1577 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1578 data->enable_tdc_limit_feature = true;
1579 data->enable_pkg_pwr_tracking_feature = true;
1580 data->force_pcie_gen = PP_PCIEGenInvalid;
1581 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1582 data->current_profile_setting.bupdate_sclk = 1;
1583 data->current_profile_setting.sclk_up_hyst = 0;
1584 data->current_profile_setting.sclk_down_hyst = 100;
1585 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1586 data->current_profile_setting.bupdate_mclk = 1;
1587 data->current_profile_setting.mclk_up_hyst = 0;
1588 data->current_profile_setting.mclk_down_hyst = 100;
1589 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1590 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1591 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1592 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1594 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1595 uint8_t tmp1, tmp2;
1596 uint16_t tmp3 = 0;
1597 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1598 &tmp3);
1599 tmp3 = (tmp3 >> 5) & 0x3;
1600 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1601 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1602 data->vddc_phase_shed_control = 1;
1603 } else {
1604 data->vddc_phase_shed_control = 0;
1607 if (hwmgr->chip_id == CHIP_HAWAII) {
1608 data->thermal_temp_setting.temperature_low = 94500;
1609 data->thermal_temp_setting.temperature_high = 95000;
1610 data->thermal_temp_setting.temperature_shutdown = 104000;
1611 } else {
1612 data->thermal_temp_setting.temperature_low = 99500;
1613 data->thermal_temp_setting.temperature_high = 100000;
1614 data->thermal_temp_setting.temperature_shutdown = 104000;
1617 data->fast_watermark_threshold = 100;
1618 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1619 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1620 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1621 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1622 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1623 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1625 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1626 PHM_PlatformCaps_ControlVDDGFX)) {
1627 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1628 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1629 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1633 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1634 PHM_PlatformCaps_EnableMVDDControl)) {
1635 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1636 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1637 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1638 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1639 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1640 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1643 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1644 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1645 PHM_PlatformCaps_ControlVDDGFX);
1647 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1648 PHM_PlatformCaps_ControlVDDCI)) {
1649 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1650 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1651 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1652 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1653 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1654 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1657 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1658 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1659 PHM_PlatformCaps_EnableMVDDControl);
1661 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1662 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1663 PHM_PlatformCaps_ControlVDDCI);
1665 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1666 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1667 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1668 PHM_PlatformCaps_ClockStretcher);
1670 data->pcie_gen_performance.max = PP_PCIEGen1;
1671 data->pcie_gen_performance.min = PP_PCIEGen3;
1672 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1673 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1674 data->pcie_lane_performance.max = 0;
1675 data->pcie_lane_performance.min = 16;
1676 data->pcie_lane_power_saving.max = 0;
1677 data->pcie_lane_power_saving.min = 16;
1680 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1681 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1682 PHM_PlatformCaps_UVDPowerGating);
1683 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1684 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1685 PHM_PlatformCaps_VCEPowerGating);
1689 * Get Leakage VDDC based on leakage ID.
1691 * @param hwmgr the address of the powerplay hardware manager.
1692 * @return always 0
1694 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1696 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1697 uint16_t vv_id;
1698 uint16_t vddc = 0;
1699 uint16_t vddgfx = 0;
1700 uint16_t i, j;
1701 uint32_t sclk = 0;
1702 struct phm_ppt_v1_information *table_info =
1703 (struct phm_ppt_v1_information *)hwmgr->pptable;
1704 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1707 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1708 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1710 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1711 if ((hwmgr->pp_table_version == PP_TABLE_V1)
1712 && !phm_get_sclk_for_voltage_evv(hwmgr,
1713 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1714 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1715 PHM_PlatformCaps_ClockStretcher)) {
1716 sclk_table = table_info->vdd_dep_on_sclk;
1718 for (j = 1; j < sclk_table->count; j++) {
1719 if (sclk_table->entries[j].clk == sclk &&
1720 sclk_table->entries[j].cks_enable == 0) {
1721 sclk += 5000;
1722 break;
1726 if (0 == atomctrl_get_voltage_evv_on_sclk
1727 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1728 vv_id, &vddgfx)) {
1729 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1730 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1732 /* the voltage should not be zero nor equal to leakage ID */
1733 if (vddgfx != 0 && vddgfx != vv_id) {
1734 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1735 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1736 data->vddcgfx_leakage.count++;
1738 } else {
1739 pr_info("Error retrieving EVV voltage value!\n");
1742 } else {
1743 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1744 || !phm_get_sclk_for_voltage_evv(hwmgr,
1745 table_info->vddc_lookup_table, vv_id, &sclk)) {
1746 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1747 PHM_PlatformCaps_ClockStretcher)) {
1748 if (table_info == NULL)
1749 return -EINVAL;
1750 sclk_table = table_info->vdd_dep_on_sclk;
1752 for (j = 1; j < sclk_table->count; j++) {
1753 if (sclk_table->entries[j].clk == sclk &&
1754 sclk_table->entries[j].cks_enable == 0) {
1755 sclk += 5000;
1756 break;
1761 if (phm_get_voltage_evv_on_sclk(hwmgr,
1762 VOLTAGE_TYPE_VDDC,
1763 sclk, vv_id, &vddc) == 0) {
1764 if (vddc >= 2000 || vddc == 0)
1765 return -EINVAL;
1766 } else {
1767 pr_debug("failed to retrieving EVV voltage!\n");
1768 continue;
1771 /* the voltage should not be zero nor equal to leakage ID */
1772 if (vddc != 0 && vddc != vv_id) {
1773 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1774 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1775 data->vddc_leakage.count++;
1781 return 0;
1785 * Change virtual leakage voltage to actual value.
1787 * @param hwmgr the address of the powerplay hardware manager.
1788 * @param pointer to changing voltage
1789 * @param pointer to leakage table
1791 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1792 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1794 uint32_t index;
1796 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1797 for (index = 0; index < leakage_table->count; index++) {
1798 /* if this voltage matches a leakage voltage ID */
1799 /* patch with actual leakage voltage */
1800 if (leakage_table->leakage_id[index] == *voltage) {
1801 *voltage = leakage_table->actual_voltage[index];
1802 break;
1806 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1807 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1811 * Patch voltage lookup table by EVV leakages.
1813 * @param hwmgr the address of the powerplay hardware manager.
1814 * @param pointer to voltage lookup table
1815 * @param pointer to leakage table
1816 * @return always 0
1818 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1819 phm_ppt_v1_voltage_lookup_table *lookup_table,
1820 struct smu7_leakage_voltage *leakage_table)
1822 uint32_t i;
1824 for (i = 0; i < lookup_table->count; i++)
1825 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1826 &lookup_table->entries[i].us_vdd, leakage_table);
1828 return 0;
1831 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1832 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1833 uint16_t *vddc)
1835 struct phm_ppt_v1_information *table_info =
1836 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1837 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1838 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1839 table_info->max_clock_voltage_on_dc.vddc;
1840 return 0;
1843 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1844 struct pp_hwmgr *hwmgr)
1846 uint8_t entry_id;
1847 uint8_t voltage_id;
1848 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1849 struct phm_ppt_v1_information *table_info =
1850 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1852 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1853 table_info->vdd_dep_on_sclk;
1854 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1855 table_info->vdd_dep_on_mclk;
1856 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1857 table_info->mm_dep_table;
1859 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1860 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1861 voltage_id = sclk_table->entries[entry_id].vddInd;
1862 sclk_table->entries[entry_id].vddgfx =
1863 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1865 } else {
1866 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1867 voltage_id = sclk_table->entries[entry_id].vddInd;
1868 sclk_table->entries[entry_id].vddc =
1869 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1873 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1874 voltage_id = mclk_table->entries[entry_id].vddInd;
1875 mclk_table->entries[entry_id].vddc =
1876 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1879 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1880 voltage_id = mm_table->entries[entry_id].vddcInd;
1881 mm_table->entries[entry_id].vddc =
1882 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1885 return 0;
1889 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1890 phm_ppt_v1_voltage_lookup_table *look_up_table,
1891 phm_ppt_v1_voltage_lookup_record *record)
1893 uint32_t i;
1895 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1896 "Lookup Table empty.", return -EINVAL);
1897 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1898 "Lookup Table empty.", return -EINVAL);
1900 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1901 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1902 "Lookup Table is full.", return -EINVAL);
1904 /* This is to avoid entering duplicate calculated records. */
1905 for (i = 0; i < look_up_table->count; i++) {
1906 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1907 if (look_up_table->entries[i].us_calculated == 1)
1908 return 0;
1909 break;
1913 look_up_table->entries[i].us_calculated = 1;
1914 look_up_table->entries[i].us_vdd = record->us_vdd;
1915 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1916 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1917 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1918 /* Only increment the count when we're appending, not replacing duplicate entry. */
1919 if (i == look_up_table->count)
1920 look_up_table->count++;
1922 return 0;
1926 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1928 uint8_t entry_id;
1929 struct phm_ppt_v1_voltage_lookup_record v_record;
1930 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1931 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1933 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1934 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1936 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1937 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1938 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1939 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1940 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1941 else
1942 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1943 sclk_table->entries[entry_id].vdd_offset;
1945 sclk_table->entries[entry_id].vddc =
1946 v_record.us_cac_low = v_record.us_cac_mid =
1947 v_record.us_cac_high = v_record.us_vdd;
1949 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1952 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1953 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1954 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1955 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1956 else
1957 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1958 mclk_table->entries[entry_id].vdd_offset;
1960 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1961 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1962 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1965 return 0;
1968 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1970 uint8_t entry_id;
1971 struct phm_ppt_v1_voltage_lookup_record v_record;
1972 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1973 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1974 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1976 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1977 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1978 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1979 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1980 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1981 else
1982 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1983 mm_table->entries[entry_id].vddgfx_offset;
1985 /* Add the calculated VDDGFX to the VDDGFX lookup table */
1986 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1987 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1988 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1991 return 0;
1994 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1995 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1997 uint32_t table_size, i, j;
1998 table_size = lookup_table->count;
2000 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2001 "Lookup table is empty", return -EINVAL);
2003 /* Sorting voltages */
2004 for (i = 0; i < table_size - 1; i++) {
2005 for (j = i + 1; j > 0; j--) {
2006 if (lookup_table->entries[j].us_vdd <
2007 lookup_table->entries[j - 1].us_vdd) {
2008 swap(lookup_table->entries[j - 1],
2009 lookup_table->entries[j]);
2014 return 0;
2017 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2019 int result = 0;
2020 int tmp_result;
2021 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2022 struct phm_ppt_v1_information *table_info =
2023 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2025 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2026 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2027 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2028 if (tmp_result != 0)
2029 result = tmp_result;
2031 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2032 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2033 } else {
2035 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2036 table_info->vddc_lookup_table, &(data->vddc_leakage));
2037 if (tmp_result)
2038 result = tmp_result;
2040 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2041 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2042 if (tmp_result)
2043 result = tmp_result;
2046 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2047 if (tmp_result)
2048 result = tmp_result;
2050 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2051 if (tmp_result)
2052 result = tmp_result;
2054 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2055 if (tmp_result)
2056 result = tmp_result;
2058 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2059 if (tmp_result)
2060 result = tmp_result;
2062 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2063 if (tmp_result)
2064 result = tmp_result;
2066 return result;
2069 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2071 struct phm_ppt_v1_information *table_info =
2072 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2074 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2075 table_info->vdd_dep_on_sclk;
2076 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2077 table_info->vdd_dep_on_mclk;
2079 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2080 "VDD dependency on SCLK table is missing.",
2081 return -EINVAL);
2082 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2083 "VDD dependency on SCLK table has to have is missing.",
2084 return -EINVAL);
2086 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2087 "VDD dependency on MCLK table is missing",
2088 return -EINVAL);
2089 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2090 "VDD dependency on MCLK table has to have is missing.",
2091 return -EINVAL);
2093 table_info->max_clock_voltage_on_ac.sclk =
2094 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2095 table_info->max_clock_voltage_on_ac.mclk =
2096 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2097 table_info->max_clock_voltage_on_ac.vddc =
2098 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2099 table_info->max_clock_voltage_on_ac.vddci =
2100 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2102 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2103 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2104 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2105 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2107 return 0;
2110 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2112 struct phm_ppt_v1_information *table_info =
2113 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2114 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2115 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2116 uint32_t i;
2117 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2118 struct amdgpu_device *adev = hwmgr->adev;
2120 if (table_info != NULL) {
2121 dep_mclk_table = table_info->vdd_dep_on_mclk;
2122 lookup_table = table_info->vddc_lookup_table;
2123 } else
2124 return 0;
2126 hw_revision = adev->pdev->revision;
2127 sub_sys_id = adev->pdev->subsystem_device;
2128 sub_vendor_id = adev->pdev->subsystem_vendor;
2130 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2131 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2132 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2133 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2134 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2135 return 0;
2137 for (i = 0; i < lookup_table->count; i++) {
2138 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2139 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2140 return 0;
2144 return 0;
2147 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2149 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2150 uint32_t temp_reg;
2151 struct phm_ppt_v1_information *table_info =
2152 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2155 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2156 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2157 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2158 case 0:
2159 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2160 break;
2161 case 1:
2162 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2163 break;
2164 case 2:
2165 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2166 break;
2167 case 3:
2168 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2169 break;
2170 case 4:
2171 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2172 break;
2173 default:
2174 break;
2176 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2179 if (table_info == NULL)
2180 return 0;
2182 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2183 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2184 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2185 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2187 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2188 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2190 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2192 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2194 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2195 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2197 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2199 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2200 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2202 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2203 table_info->cac_dtp_table->usOperatingTempStep = 1;
2204 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2206 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2207 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2209 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2210 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2212 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2213 table_info->cac_dtp_table->usOperatingTempMinLimit;
2215 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2216 table_info->cac_dtp_table->usOperatingTempMaxLimit;
2218 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2219 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2221 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2222 table_info->cac_dtp_table->usOperatingTempStep;
2224 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2225 table_info->cac_dtp_table->usTargetOperatingTemp;
2226 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2228 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2231 return 0;
2235 * Change virtual leakage voltage to actual value.
2237 * @param hwmgr the address of the powerplay hardware manager.
2238 * @param pointer to changing voltage
2239 * @param pointer to leakage table
2241 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2242 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2244 uint32_t index;
2246 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2247 for (index = 0; index < leakage_table->count; index++) {
2248 /* if this voltage matches a leakage voltage ID */
2249 /* patch with actual leakage voltage */
2250 if (leakage_table->leakage_id[index] == *voltage) {
2251 *voltage = leakage_table->actual_voltage[index];
2252 break;
2256 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2257 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2261 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2262 struct phm_clock_voltage_dependency_table *tab)
2264 uint16_t i;
2265 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2267 if (tab)
2268 for (i = 0; i < tab->count; i++)
2269 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2270 &data->vddc_leakage);
2272 return 0;
2275 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2276 struct phm_clock_voltage_dependency_table *tab)
2278 uint16_t i;
2279 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2281 if (tab)
2282 for (i = 0; i < tab->count; i++)
2283 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2284 &data->vddci_leakage);
2286 return 0;
2289 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2290 struct phm_vce_clock_voltage_dependency_table *tab)
2292 uint16_t i;
2293 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2295 if (tab)
2296 for (i = 0; i < tab->count; i++)
2297 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2298 &data->vddc_leakage);
2300 return 0;
2304 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2305 struct phm_uvd_clock_voltage_dependency_table *tab)
2307 uint16_t i;
2308 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2310 if (tab)
2311 for (i = 0; i < tab->count; i++)
2312 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2313 &data->vddc_leakage);
2315 return 0;
2318 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2319 struct phm_phase_shedding_limits_table *tab)
2321 uint16_t i;
2322 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2324 if (tab)
2325 for (i = 0; i < tab->count; i++)
2326 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2327 &data->vddc_leakage);
2329 return 0;
2332 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2333 struct phm_samu_clock_voltage_dependency_table *tab)
2335 uint16_t i;
2336 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2338 if (tab)
2339 for (i = 0; i < tab->count; i++)
2340 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2341 &data->vddc_leakage);
2343 return 0;
2346 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2347 struct phm_acp_clock_voltage_dependency_table *tab)
2349 uint16_t i;
2350 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2352 if (tab)
2353 for (i = 0; i < tab->count; i++)
2354 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2355 &data->vddc_leakage);
2357 return 0;
2360 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2361 struct phm_clock_and_voltage_limits *tab)
2363 uint32_t vddc, vddci;
2364 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2366 if (tab) {
2367 vddc = tab->vddc;
2368 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2369 &data->vddc_leakage);
2370 tab->vddc = vddc;
2371 vddci = tab->vddci;
2372 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2373 &data->vddci_leakage);
2374 tab->vddci = vddci;
2377 return 0;
2380 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2382 uint32_t i;
2383 uint32_t vddc;
2384 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2386 if (tab) {
2387 for (i = 0; i < tab->count; i++) {
2388 vddc = (uint32_t)(tab->entries[i].Vddc);
2389 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2390 tab->entries[i].Vddc = (uint16_t)vddc;
2394 return 0;
2397 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2399 int tmp;
2401 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2402 if (tmp)
2403 return -EINVAL;
2405 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2406 if (tmp)
2407 return -EINVAL;
2409 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2410 if (tmp)
2411 return -EINVAL;
2413 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2414 if (tmp)
2415 return -EINVAL;
2417 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2418 if (tmp)
2419 return -EINVAL;
2421 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2422 if (tmp)
2423 return -EINVAL;
2425 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2426 if (tmp)
2427 return -EINVAL;
2429 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2430 if (tmp)
2431 return -EINVAL;
2433 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2434 if (tmp)
2435 return -EINVAL;
2437 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2438 if (tmp)
2439 return -EINVAL;
2441 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2442 if (tmp)
2443 return -EINVAL;
2445 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2446 if (tmp)
2447 return -EINVAL;
2449 return 0;
2453 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2455 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2457 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2458 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2459 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2461 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2462 "VDDC dependency on SCLK table is missing. This table is mandatory",
2463 return -EINVAL);
2464 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2465 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2466 return -EINVAL);
2468 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2469 "VDDC dependency on MCLK table is missing. This table is mandatory",
2470 return -EINVAL);
2471 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2472 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2473 return -EINVAL);
2475 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2476 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2478 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2479 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2480 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2481 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2482 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2483 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2485 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2486 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2487 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2490 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2491 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2493 return 0;
2496 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2498 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2499 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2500 kfree(hwmgr->backend);
2501 hwmgr->backend = NULL;
2503 return 0;
2506 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2508 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2509 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2510 int i;
2512 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2513 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2514 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2515 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2516 virtual_voltage_id,
2517 efuse_voltage_id) == 0) {
2518 if (vddc != 0 && vddc != virtual_voltage_id) {
2519 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2520 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2521 data->vddc_leakage.count++;
2523 if (vddci != 0 && vddci != virtual_voltage_id) {
2524 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2525 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2526 data->vddci_leakage.count++;
2531 return 0;
2534 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2536 struct smu7_hwmgr *data;
2537 int result = 0;
2539 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2540 if (data == NULL)
2541 return -ENOMEM;
2543 hwmgr->backend = data;
2544 smu7_patch_voltage_workaround(hwmgr);
2545 smu7_init_dpm_defaults(hwmgr);
2547 /* Get leakage voltage based on leakage ID. */
2548 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2549 PHM_PlatformCaps_EVV)) {
2550 result = smu7_get_evv_voltages(hwmgr);
2551 if (result) {
2552 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
2553 return -EINVAL;
2555 } else {
2556 smu7_get_elb_voltages(hwmgr);
2559 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2560 smu7_complete_dependency_tables(hwmgr);
2561 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2562 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2563 smu7_patch_dependency_tables_with_leakage(hwmgr);
2564 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2567 /* Initalize Dynamic State Adjustment Rule Settings */
2568 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2570 if (0 == result) {
2571 struct amdgpu_device *adev = hwmgr->adev;
2573 data->is_tlu_enabled = false;
2575 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2576 SMU7_MAX_HARDWARE_POWERLEVELS;
2577 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2578 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2580 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2581 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2582 data->pcie_spc_cap = 20;
2583 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2585 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2586 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2587 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2588 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2589 smu7_thermal_parameter_init(hwmgr);
2590 } else {
2591 /* Ignore return value in here, we are cleaning up a mess. */
2592 smu7_hwmgr_backend_fini(hwmgr);
2595 return 0;
2598 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2600 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2601 uint32_t level, tmp;
2603 if (!data->pcie_dpm_key_disabled) {
2604 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2605 level = 0;
2606 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2607 while (tmp >>= 1)
2608 level++;
2610 if (level)
2611 smum_send_msg_to_smc_with_parameter(hwmgr,
2612 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2616 if (!data->sclk_dpm_key_disabled) {
2617 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2618 level = 0;
2619 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2620 while (tmp >>= 1)
2621 level++;
2623 if (level)
2624 smum_send_msg_to_smc_with_parameter(hwmgr,
2625 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2626 (1 << level));
2630 if (!data->mclk_dpm_key_disabled) {
2631 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2632 level = 0;
2633 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2634 while (tmp >>= 1)
2635 level++;
2637 if (level)
2638 smum_send_msg_to_smc_with_parameter(hwmgr,
2639 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2640 (1 << level));
2644 return 0;
2647 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2649 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2651 if (hwmgr->pp_table_version == PP_TABLE_V1)
2652 phm_apply_dal_min_voltage_request(hwmgr);
2653 /* TO DO for v0 iceland and Ci*/
2655 if (!data->sclk_dpm_key_disabled) {
2656 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2657 smum_send_msg_to_smc_with_parameter(hwmgr,
2658 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2659 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2662 if (!data->mclk_dpm_key_disabled) {
2663 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2664 smum_send_msg_to_smc_with_parameter(hwmgr,
2665 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2666 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2669 return 0;
2672 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2674 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2676 if (!smum_is_dpm_running(hwmgr))
2677 return -EINVAL;
2679 if (!data->pcie_dpm_key_disabled) {
2680 smum_send_msg_to_smc(hwmgr,
2681 PPSMC_MSG_PCIeDPM_UnForceLevel);
2684 return smu7_upload_dpm_level_enable_mask(hwmgr);
2687 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2689 struct smu7_hwmgr *data =
2690 (struct smu7_hwmgr *)(hwmgr->backend);
2691 uint32_t level;
2693 if (!data->sclk_dpm_key_disabled)
2694 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2695 level = phm_get_lowest_enabled_level(hwmgr,
2696 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2697 smum_send_msg_to_smc_with_parameter(hwmgr,
2698 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2699 (1 << level));
2703 if (!data->mclk_dpm_key_disabled) {
2704 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2705 level = phm_get_lowest_enabled_level(hwmgr,
2706 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2707 smum_send_msg_to_smc_with_parameter(hwmgr,
2708 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2709 (1 << level));
2713 if (!data->pcie_dpm_key_disabled) {
2714 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2715 level = phm_get_lowest_enabled_level(hwmgr,
2716 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2717 smum_send_msg_to_smc_with_parameter(hwmgr,
2718 PPSMC_MSG_PCIeDPM_ForceLevel,
2719 (level));
2723 return 0;
2726 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2727 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2729 uint32_t percentage;
2730 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2731 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2732 int32_t tmp_mclk;
2733 int32_t tmp_sclk;
2734 int32_t count;
2736 if (golden_dpm_table->mclk_table.count < 1)
2737 return -EINVAL;
2739 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2740 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2742 if (golden_dpm_table->mclk_table.count == 1) {
2743 percentage = 70;
2744 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2745 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2746 } else {
2747 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2748 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2751 tmp_sclk = tmp_mclk * percentage / 100;
2753 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2754 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2755 count >= 0; count--) {
2756 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2757 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2758 *sclk_mask = count;
2759 break;
2762 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2763 *sclk_mask = 0;
2764 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2767 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2768 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2769 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2770 struct phm_ppt_v1_information *table_info =
2771 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2773 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2774 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2775 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2776 *sclk_mask = count;
2777 break;
2780 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2781 *sclk_mask = 0;
2782 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2785 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2786 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2789 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2790 *mclk_mask = 0;
2791 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2792 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2794 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2795 hwmgr->pstate_sclk = tmp_sclk;
2796 hwmgr->pstate_mclk = tmp_mclk;
2798 return 0;
2801 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2802 enum amd_dpm_forced_level level)
2804 int ret = 0;
2805 uint32_t sclk_mask = 0;
2806 uint32_t mclk_mask = 0;
2807 uint32_t pcie_mask = 0;
2809 if (hwmgr->pstate_sclk == 0)
2810 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2812 switch (level) {
2813 case AMD_DPM_FORCED_LEVEL_HIGH:
2814 ret = smu7_force_dpm_highest(hwmgr);
2815 break;
2816 case AMD_DPM_FORCED_LEVEL_LOW:
2817 ret = smu7_force_dpm_lowest(hwmgr);
2818 break;
2819 case AMD_DPM_FORCED_LEVEL_AUTO:
2820 ret = smu7_unforce_dpm_levels(hwmgr);
2821 break;
2822 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2823 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2824 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2825 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2826 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2827 if (ret)
2828 return ret;
2829 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2830 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2831 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2832 break;
2833 case AMD_DPM_FORCED_LEVEL_MANUAL:
2834 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2835 default:
2836 break;
2839 if (!ret) {
2840 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2841 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2842 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2843 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2845 return ret;
2848 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2850 return sizeof(struct smu7_power_state);
2853 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2854 uint32_t vblank_time_us)
2856 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2857 uint32_t switch_limit_us;
2859 switch (hwmgr->chip_id) {
2860 case CHIP_POLARIS10:
2861 case CHIP_POLARIS11:
2862 case CHIP_POLARIS12:
2863 if (hwmgr->is_kicker)
2864 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2865 else
2866 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2867 break;
2868 case CHIP_VEGAM:
2869 switch_limit_us = 30;
2870 break;
2871 default:
2872 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2873 break;
2876 if (vblank_time_us < switch_limit_us)
2877 return true;
2878 else
2879 return false;
2882 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2883 struct pp_power_state *request_ps,
2884 const struct pp_power_state *current_ps)
2886 struct amdgpu_device *adev = hwmgr->adev;
2887 struct smu7_power_state *smu7_ps =
2888 cast_phw_smu7_power_state(&request_ps->hardware);
2889 uint32_t sclk;
2890 uint32_t mclk;
2891 struct PP_Clocks minimum_clocks = {0};
2892 bool disable_mclk_switching;
2893 bool disable_mclk_switching_for_frame_lock;
2894 const struct phm_clock_and_voltage_limits *max_limits;
2895 uint32_t i;
2896 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2897 struct phm_ppt_v1_information *table_info =
2898 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2899 int32_t count;
2900 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2902 data->battery_state = (PP_StateUILabel_Battery ==
2903 request_ps->classification.ui_label);
2905 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2906 "VI should always have 2 performance levels",
2909 max_limits = adev->pm.ac_power ?
2910 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2911 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2913 /* Cap clock DPM tables at DC MAX if it is in DC. */
2914 if (!adev->pm.ac_power) {
2915 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2916 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2917 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2918 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2919 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2923 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2924 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2926 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2927 PHM_PlatformCaps_StablePState)) {
2928 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2929 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2931 for (count = table_info->vdd_dep_on_sclk->count - 1;
2932 count >= 0; count--) {
2933 if (stable_pstate_sclk >=
2934 table_info->vdd_dep_on_sclk->entries[count].clk) {
2935 stable_pstate_sclk =
2936 table_info->vdd_dep_on_sclk->entries[count].clk;
2937 break;
2941 if (count < 0)
2942 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2944 stable_pstate_mclk = max_limits->mclk;
2946 minimum_clocks.engineClock = stable_pstate_sclk;
2947 minimum_clocks.memoryClock = stable_pstate_mclk;
2950 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2951 hwmgr->platform_descriptor.platformCaps,
2952 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2955 if (hwmgr->display_config->num_display == 0)
2956 disable_mclk_switching = false;
2957 else
2958 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2959 !hwmgr->display_config->multi_monitor_in_sync) ||
2960 disable_mclk_switching_for_frame_lock ||
2961 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
2963 sclk = smu7_ps->performance_levels[0].engine_clock;
2964 mclk = smu7_ps->performance_levels[0].memory_clock;
2966 if (disable_mclk_switching)
2967 mclk = smu7_ps->performance_levels
2968 [smu7_ps->performance_level_count - 1].memory_clock;
2970 if (sclk < minimum_clocks.engineClock)
2971 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2972 max_limits->sclk : minimum_clocks.engineClock;
2974 if (mclk < minimum_clocks.memoryClock)
2975 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2976 max_limits->mclk : minimum_clocks.memoryClock;
2978 smu7_ps->performance_levels[0].engine_clock = sclk;
2979 smu7_ps->performance_levels[0].memory_clock = mclk;
2981 smu7_ps->performance_levels[1].engine_clock =
2982 (smu7_ps->performance_levels[1].engine_clock >=
2983 smu7_ps->performance_levels[0].engine_clock) ?
2984 smu7_ps->performance_levels[1].engine_clock :
2985 smu7_ps->performance_levels[0].engine_clock;
2987 if (disable_mclk_switching) {
2988 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2989 mclk = smu7_ps->performance_levels[1].memory_clock;
2991 smu7_ps->performance_levels[0].memory_clock = mclk;
2992 smu7_ps->performance_levels[1].memory_clock = mclk;
2993 } else {
2994 if (smu7_ps->performance_levels[1].memory_clock <
2995 smu7_ps->performance_levels[0].memory_clock)
2996 smu7_ps->performance_levels[1].memory_clock =
2997 smu7_ps->performance_levels[0].memory_clock;
3000 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3001 PHM_PlatformCaps_StablePState)) {
3002 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3003 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3004 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3005 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3006 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3009 return 0;
3013 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3015 struct pp_power_state *ps;
3016 struct smu7_power_state *smu7_ps;
3018 if (hwmgr == NULL)
3019 return -EINVAL;
3021 ps = hwmgr->request_ps;
3023 if (ps == NULL)
3024 return -EINVAL;
3026 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3028 if (low)
3029 return smu7_ps->performance_levels[0].memory_clock;
3030 else
3031 return smu7_ps->performance_levels
3032 [smu7_ps->performance_level_count-1].memory_clock;
3035 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3037 struct pp_power_state *ps;
3038 struct smu7_power_state *smu7_ps;
3040 if (hwmgr == NULL)
3041 return -EINVAL;
3043 ps = hwmgr->request_ps;
3045 if (ps == NULL)
3046 return -EINVAL;
3048 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3050 if (low)
3051 return smu7_ps->performance_levels[0].engine_clock;
3052 else
3053 return smu7_ps->performance_levels
3054 [smu7_ps->performance_level_count-1].engine_clock;
3057 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3058 struct pp_hw_power_state *hw_ps)
3060 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3061 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3062 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3063 uint16_t size;
3064 uint8_t frev, crev;
3065 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3067 /* First retrieve the Boot clocks and VDDC from the firmware info table.
3068 * We assume here that fw_info is unchanged if this call fails.
3070 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3071 &size, &frev, &crev);
3072 if (!fw_info)
3073 /* During a test, there is no firmware info table. */
3074 return 0;
3076 /* Patch the state. */
3077 data->vbios_boot_state.sclk_bootup_value =
3078 le32_to_cpu(fw_info->ulDefaultEngineClock);
3079 data->vbios_boot_state.mclk_bootup_value =
3080 le32_to_cpu(fw_info->ulDefaultMemoryClock);
3081 data->vbios_boot_state.mvdd_bootup_value =
3082 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3083 data->vbios_boot_state.vddc_bootup_value =
3084 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3085 data->vbios_boot_state.vddci_bootup_value =
3086 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3087 data->vbios_boot_state.pcie_gen_bootup_value =
3088 smu7_get_current_pcie_speed(hwmgr);
3090 data->vbios_boot_state.pcie_lane_bootup_value =
3091 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3093 /* set boot power state */
3094 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3095 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3096 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3097 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3099 return 0;
3102 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3104 int result;
3105 unsigned long ret = 0;
3107 if (hwmgr->pp_table_version == PP_TABLE_V0) {
3108 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3109 return result ? 0 : ret;
3110 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3111 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3112 return result;
3114 return 0;
3117 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3118 void *state, struct pp_power_state *power_state,
3119 void *pp_table, uint32_t classification_flag)
3121 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3122 struct smu7_power_state *smu7_power_state =
3123 (struct smu7_power_state *)(&(power_state->hardware));
3124 struct smu7_performance_level *performance_level;
3125 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3126 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3127 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3128 PPTable_Generic_SubTable_Header *sclk_dep_table =
3129 (PPTable_Generic_SubTable_Header *)
3130 (((unsigned long)powerplay_table) +
3131 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3133 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3134 (ATOM_Tonga_MCLK_Dependency_Table *)
3135 (((unsigned long)powerplay_table) +
3136 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3138 /* The following fields are not initialized here: id orderedList allStatesList */
3139 power_state->classification.ui_label =
3140 (le16_to_cpu(state_entry->usClassification) &
3141 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3142 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3143 power_state->classification.flags = classification_flag;
3144 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3146 power_state->classification.temporary_state = false;
3147 power_state->classification.to_be_deleted = false;
3149 power_state->validation.disallowOnDC =
3150 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3151 ATOM_Tonga_DISALLOW_ON_DC));
3153 power_state->pcie.lanes = 0;
3155 power_state->display.disableFrameModulation = false;
3156 power_state->display.limitRefreshrate = false;
3157 power_state->display.enableVariBright =
3158 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3159 ATOM_Tonga_ENABLE_VARIBRIGHT));
3161 power_state->validation.supportedPowerLevels = 0;
3162 power_state->uvd_clocks.VCLK = 0;
3163 power_state->uvd_clocks.DCLK = 0;
3164 power_state->temperatures.min = 0;
3165 power_state->temperatures.max = 0;
3167 performance_level = &(smu7_power_state->performance_levels
3168 [smu7_power_state->performance_level_count++]);
3170 PP_ASSERT_WITH_CODE(
3171 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3172 "Performance levels exceeds SMC limit!",
3173 return -EINVAL);
3175 PP_ASSERT_WITH_CODE(
3176 (smu7_power_state->performance_level_count <=
3177 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3178 "Performance levels exceeds Driver limit!",
3179 return -EINVAL);
3181 /* Performance levels are arranged from low to high. */
3182 performance_level->memory_clock = mclk_dep_table->entries
3183 [state_entry->ucMemoryClockIndexLow].ulMclk;
3184 if (sclk_dep_table->ucRevId == 0)
3185 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3186 [state_entry->ucEngineClockIndexLow].ulSclk;
3187 else if (sclk_dep_table->ucRevId == 1)
3188 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3189 [state_entry->ucEngineClockIndexLow].ulSclk;
3190 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3191 state_entry->ucPCIEGenLow);
3192 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3193 state_entry->ucPCIELaneLow);
3195 performance_level = &(smu7_power_state->performance_levels
3196 [smu7_power_state->performance_level_count++]);
3197 performance_level->memory_clock = mclk_dep_table->entries
3198 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3200 if (sclk_dep_table->ucRevId == 0)
3201 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3202 [state_entry->ucEngineClockIndexHigh].ulSclk;
3203 else if (sclk_dep_table->ucRevId == 1)
3204 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3205 [state_entry->ucEngineClockIndexHigh].ulSclk;
3207 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3208 state_entry->ucPCIEGenHigh);
3209 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3210 state_entry->ucPCIELaneHigh);
3212 return 0;
3215 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3216 unsigned long entry_index, struct pp_power_state *state)
3218 int result;
3219 struct smu7_power_state *ps;
3220 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3221 struct phm_ppt_v1_information *table_info =
3222 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3223 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3224 table_info->vdd_dep_on_mclk;
3226 state->hardware.magic = PHM_VIslands_Magic;
3228 ps = (struct smu7_power_state *)(&state->hardware);
3230 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3231 smu7_get_pp_table_entry_callback_func_v1);
3233 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3234 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3235 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3237 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3238 if (dep_mclk_table->entries[0].clk !=
3239 data->vbios_boot_state.mclk_bootup_value)
3240 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3241 "does not match VBIOS boot MCLK level");
3242 if (dep_mclk_table->entries[0].vddci !=
3243 data->vbios_boot_state.vddci_bootup_value)
3244 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3245 "does not match VBIOS boot VDDCI level");
3248 /* set DC compatible flag if this state supports DC */
3249 if (!state->validation.disallowOnDC)
3250 ps->dc_compatible = true;
3252 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3253 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3255 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3256 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3258 if (!result) {
3259 uint32_t i;
3261 switch (state->classification.ui_label) {
3262 case PP_StateUILabel_Performance:
3263 data->use_pcie_performance_levels = true;
3264 for (i = 0; i < ps->performance_level_count; i++) {
3265 if (data->pcie_gen_performance.max <
3266 ps->performance_levels[i].pcie_gen)
3267 data->pcie_gen_performance.max =
3268 ps->performance_levels[i].pcie_gen;
3270 if (data->pcie_gen_performance.min >
3271 ps->performance_levels[i].pcie_gen)
3272 data->pcie_gen_performance.min =
3273 ps->performance_levels[i].pcie_gen;
3275 if (data->pcie_lane_performance.max <
3276 ps->performance_levels[i].pcie_lane)
3277 data->pcie_lane_performance.max =
3278 ps->performance_levels[i].pcie_lane;
3279 if (data->pcie_lane_performance.min >
3280 ps->performance_levels[i].pcie_lane)
3281 data->pcie_lane_performance.min =
3282 ps->performance_levels[i].pcie_lane;
3284 break;
3285 case PP_StateUILabel_Battery:
3286 data->use_pcie_power_saving_levels = true;
3288 for (i = 0; i < ps->performance_level_count; i++) {
3289 if (data->pcie_gen_power_saving.max <
3290 ps->performance_levels[i].pcie_gen)
3291 data->pcie_gen_power_saving.max =
3292 ps->performance_levels[i].pcie_gen;
3294 if (data->pcie_gen_power_saving.min >
3295 ps->performance_levels[i].pcie_gen)
3296 data->pcie_gen_power_saving.min =
3297 ps->performance_levels[i].pcie_gen;
3299 if (data->pcie_lane_power_saving.max <
3300 ps->performance_levels[i].pcie_lane)
3301 data->pcie_lane_power_saving.max =
3302 ps->performance_levels[i].pcie_lane;
3304 if (data->pcie_lane_power_saving.min >
3305 ps->performance_levels[i].pcie_lane)
3306 data->pcie_lane_power_saving.min =
3307 ps->performance_levels[i].pcie_lane;
3309 break;
3310 default:
3311 break;
3314 return 0;
3317 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3318 struct pp_hw_power_state *power_state,
3319 unsigned int index, const void *clock_info)
3321 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3322 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
3323 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3324 struct smu7_performance_level *performance_level;
3325 uint32_t engine_clock, memory_clock;
3326 uint16_t pcie_gen_from_bios;
3328 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3329 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3331 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3332 data->highest_mclk = memory_clock;
3334 PP_ASSERT_WITH_CODE(
3335 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3336 "Performance levels exceeds SMC limit!",
3337 return -EINVAL);
3339 PP_ASSERT_WITH_CODE(
3340 (ps->performance_level_count <
3341 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3342 "Performance levels exceeds Driver limit, Skip!",
3343 return 0);
3345 performance_level = &(ps->performance_levels
3346 [ps->performance_level_count++]);
3348 /* Performance levels are arranged from low to high. */
3349 performance_level->memory_clock = memory_clock;
3350 performance_level->engine_clock = engine_clock;
3352 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3354 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3355 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3357 return 0;
3360 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3361 unsigned long entry_index, struct pp_power_state *state)
3363 int result;
3364 struct smu7_power_state *ps;
3365 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3366 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3367 hwmgr->dyn_state.vddci_dependency_on_mclk;
3369 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3371 state->hardware.magic = PHM_VIslands_Magic;
3373 ps = (struct smu7_power_state *)(&state->hardware);
3375 result = pp_tables_get_entry(hwmgr, entry_index, state,
3376 smu7_get_pp_table_entry_callback_func_v0);
3379 * This is the earliest time we have all the dependency table
3380 * and the VBIOS boot state as
3381 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3382 * state if there is only one VDDCI/MCLK level, check if it's
3383 * the same as VBIOS boot state
3385 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3386 if (dep_mclk_table->entries[0].clk !=
3387 data->vbios_boot_state.mclk_bootup_value)
3388 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3389 "does not match VBIOS boot MCLK level");
3390 if (dep_mclk_table->entries[0].v !=
3391 data->vbios_boot_state.vddci_bootup_value)
3392 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3393 "does not match VBIOS boot VDDCI level");
3396 /* set DC compatible flag if this state supports DC */
3397 if (!state->validation.disallowOnDC)
3398 ps->dc_compatible = true;
3400 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3401 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3403 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3404 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3406 if (!result) {
3407 uint32_t i;
3409 switch (state->classification.ui_label) {
3410 case PP_StateUILabel_Performance:
3411 data->use_pcie_performance_levels = true;
3413 for (i = 0; i < ps->performance_level_count; i++) {
3414 if (data->pcie_gen_performance.max <
3415 ps->performance_levels[i].pcie_gen)
3416 data->pcie_gen_performance.max =
3417 ps->performance_levels[i].pcie_gen;
3419 if (data->pcie_gen_performance.min >
3420 ps->performance_levels[i].pcie_gen)
3421 data->pcie_gen_performance.min =
3422 ps->performance_levels[i].pcie_gen;
3424 if (data->pcie_lane_performance.max <
3425 ps->performance_levels[i].pcie_lane)
3426 data->pcie_lane_performance.max =
3427 ps->performance_levels[i].pcie_lane;
3429 if (data->pcie_lane_performance.min >
3430 ps->performance_levels[i].pcie_lane)
3431 data->pcie_lane_performance.min =
3432 ps->performance_levels[i].pcie_lane;
3434 break;
3435 case PP_StateUILabel_Battery:
3436 data->use_pcie_power_saving_levels = true;
3438 for (i = 0; i < ps->performance_level_count; i++) {
3439 if (data->pcie_gen_power_saving.max <
3440 ps->performance_levels[i].pcie_gen)
3441 data->pcie_gen_power_saving.max =
3442 ps->performance_levels[i].pcie_gen;
3444 if (data->pcie_gen_power_saving.min >
3445 ps->performance_levels[i].pcie_gen)
3446 data->pcie_gen_power_saving.min =
3447 ps->performance_levels[i].pcie_gen;
3449 if (data->pcie_lane_power_saving.max <
3450 ps->performance_levels[i].pcie_lane)
3451 data->pcie_lane_power_saving.max =
3452 ps->performance_levels[i].pcie_lane;
3454 if (data->pcie_lane_power_saving.min >
3455 ps->performance_levels[i].pcie_lane)
3456 data->pcie_lane_power_saving.min =
3457 ps->performance_levels[i].pcie_lane;
3459 break;
3460 default:
3461 break;
3464 return 0;
3467 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3468 unsigned long entry_index, struct pp_power_state *state)
3470 if (hwmgr->pp_table_version == PP_TABLE_V0)
3471 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3472 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3473 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3475 return 0;
3478 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3480 struct amdgpu_device *adev = hwmgr->adev;
3481 int i;
3482 u32 tmp = 0;
3484 if (!query)
3485 return -EINVAL;
3488 * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3489 * - Hawaii
3490 * - Bonaire
3491 * - Fiji
3492 * - Tonga
3494 if ((adev->asic_type != CHIP_HAWAII) &&
3495 (adev->asic_type != CHIP_BONAIRE) &&
3496 (adev->asic_type != CHIP_FIJI) &&
3497 (adev->asic_type != CHIP_TONGA)) {
3498 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
3499 tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3500 *query = tmp;
3502 if (tmp != 0)
3503 return 0;
3506 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3507 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3508 ixSMU_PM_STATUS_95, 0);
3510 for (i = 0; i < 10; i++) {
3511 msleep(500);
3512 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3513 tmp = cgs_read_ind_register(hwmgr->device,
3514 CGS_IND_REG__SMC,
3515 ixSMU_PM_STATUS_95);
3516 if (tmp != 0)
3517 break;
3519 *query = tmp;
3521 return 0;
3524 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3525 void *value, int *size)
3527 uint32_t sclk, mclk, activity_percent;
3528 uint32_t offset, val_vid;
3529 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3531 /* size must be at least 4 bytes for all sensors */
3532 if (*size < 4)
3533 return -EINVAL;
3535 switch (idx) {
3536 case AMDGPU_PP_SENSOR_GFX_SCLK:
3537 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
3538 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3539 *((uint32_t *)value) = sclk;
3540 *size = 4;
3541 return 0;
3542 case AMDGPU_PP_SENSOR_GFX_MCLK:
3543 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
3544 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3545 *((uint32_t *)value) = mclk;
3546 *size = 4;
3547 return 0;
3548 case AMDGPU_PP_SENSOR_GPU_LOAD:
3549 case AMDGPU_PP_SENSOR_MEM_LOAD:
3550 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3551 SMU_SoftRegisters,
3552 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3553 AverageGraphicsActivity:
3554 AverageMemoryActivity);
3556 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3557 activity_percent += 0x80;
3558 activity_percent >>= 8;
3559 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3560 *size = 4;
3561 return 0;
3562 case AMDGPU_PP_SENSOR_GPU_TEMP:
3563 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3564 *size = 4;
3565 return 0;
3566 case AMDGPU_PP_SENSOR_UVD_POWER:
3567 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3568 *size = 4;
3569 return 0;
3570 case AMDGPU_PP_SENSOR_VCE_POWER:
3571 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3572 *size = 4;
3573 return 0;
3574 case AMDGPU_PP_SENSOR_GPU_POWER:
3575 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3576 case AMDGPU_PP_SENSOR_VDDGFX:
3577 if ((data->vr_config & 0xff) == 0x2)
3578 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3579 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3580 else
3581 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3582 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3584 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3585 return 0;
3586 default:
3587 return -EINVAL;
3591 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3593 const struct phm_set_power_state_input *states =
3594 (const struct phm_set_power_state_input *)input;
3595 const struct smu7_power_state *smu7_ps =
3596 cast_const_phw_smu7_power_state(states->pnew_state);
3597 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3598 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3599 uint32_t sclk = smu7_ps->performance_levels
3600 [smu7_ps->performance_level_count - 1].engine_clock;
3601 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3602 uint32_t mclk = smu7_ps->performance_levels
3603 [smu7_ps->performance_level_count - 1].memory_clock;
3604 struct PP_Clocks min_clocks = {0};
3605 uint32_t i;
3607 for (i = 0; i < sclk_table->count; i++) {
3608 if (sclk == sclk_table->dpm_levels[i].value)
3609 break;
3612 if (i >= sclk_table->count) {
3613 if (sclk > sclk_table->dpm_levels[i-1].value) {
3614 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3615 sclk_table->dpm_levels[i-1].value = sclk;
3617 } else {
3618 /* TODO: Check SCLK in DAL's minimum clocks
3619 * in case DeepSleep divider update is required.
3621 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3622 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3623 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3624 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3627 for (i = 0; i < mclk_table->count; i++) {
3628 if (mclk == mclk_table->dpm_levels[i].value)
3629 break;
3632 if (i >= mclk_table->count) {
3633 if (mclk > mclk_table->dpm_levels[i-1].value) {
3634 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3635 mclk_table->dpm_levels[i-1].value = mclk;
3639 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3640 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3642 return 0;
3645 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3646 const struct smu7_power_state *smu7_ps)
3648 uint32_t i;
3649 uint32_t sclk, max_sclk = 0;
3650 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3651 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3653 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3654 sclk = smu7_ps->performance_levels[i].engine_clock;
3655 if (max_sclk < sclk)
3656 max_sclk = sclk;
3659 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3660 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3661 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3662 dpm_table->pcie_speed_table.dpm_levels
3663 [dpm_table->pcie_speed_table.count - 1].value :
3664 dpm_table->pcie_speed_table.dpm_levels[i].value);
3667 return 0;
3670 static int smu7_request_link_speed_change_before_state_change(
3671 struct pp_hwmgr *hwmgr, const void *input)
3673 const struct phm_set_power_state_input *states =
3674 (const struct phm_set_power_state_input *)input;
3675 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3676 const struct smu7_power_state *smu7_nps =
3677 cast_const_phw_smu7_power_state(states->pnew_state);
3678 const struct smu7_power_state *polaris10_cps =
3679 cast_const_phw_smu7_power_state(states->pcurrent_state);
3681 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3682 uint16_t current_link_speed;
3684 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3685 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3686 else
3687 current_link_speed = data->force_pcie_gen;
3689 data->force_pcie_gen = PP_PCIEGenInvalid;
3690 data->pspp_notify_required = false;
3692 if (target_link_speed > current_link_speed) {
3693 switch (target_link_speed) {
3694 #ifdef CONFIG_ACPI
3695 case PP_PCIEGen3:
3696 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3697 break;
3698 data->force_pcie_gen = PP_PCIEGen2;
3699 if (current_link_speed == PP_PCIEGen2)
3700 break;
3701 /* fall through */
3702 case PP_PCIEGen2:
3703 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3704 break;
3705 #endif
3706 /* fall through */
3707 default:
3708 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3709 break;
3711 } else {
3712 if (target_link_speed < current_link_speed)
3713 data->pspp_notify_required = true;
3716 return 0;
3719 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3721 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3723 if (0 == data->need_update_smu7_dpm_table)
3724 return 0;
3726 if ((0 == data->sclk_dpm_key_disabled) &&
3727 (data->need_update_smu7_dpm_table &
3728 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3729 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3730 "Trying to freeze SCLK DPM when DPM is disabled",
3732 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3733 PPSMC_MSG_SCLKDPM_FreezeLevel),
3734 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3735 return -EINVAL);
3738 if ((0 == data->mclk_dpm_key_disabled) &&
3739 (data->need_update_smu7_dpm_table &
3740 DPMTABLE_OD_UPDATE_MCLK)) {
3741 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3742 "Trying to freeze MCLK DPM when DPM is disabled",
3744 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3745 PPSMC_MSG_MCLKDPM_FreezeLevel),
3746 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3747 return -EINVAL);
3750 return 0;
3753 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3754 struct pp_hwmgr *hwmgr, const void *input)
3756 int result = 0;
3757 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3758 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3759 uint32_t count;
3760 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3761 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3762 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3764 if (0 == data->need_update_smu7_dpm_table)
3765 return 0;
3767 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3768 for (count = 0; count < dpm_table->sclk_table.count; count++) {
3769 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3770 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3774 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3775 for (count = 0; count < dpm_table->mclk_table.count; count++) {
3776 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3777 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3781 if (data->need_update_smu7_dpm_table &
3782 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3783 result = smum_populate_all_graphic_levels(hwmgr);
3784 PP_ASSERT_WITH_CODE((0 == result),
3785 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3786 return result);
3789 if (data->need_update_smu7_dpm_table &
3790 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3791 /*populate MCLK dpm table to SMU7 */
3792 result = smum_populate_all_memory_levels(hwmgr);
3793 PP_ASSERT_WITH_CODE((0 == result),
3794 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3795 return result);
3798 return result;
3801 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3802 struct smu7_single_dpm_table *dpm_table,
3803 uint32_t low_limit, uint32_t high_limit)
3805 uint32_t i;
3807 for (i = 0; i < dpm_table->count; i++) {
3808 /*skip the trim if od is enabled*/
3809 if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
3810 || dpm_table->dpm_levels[i].value > high_limit))
3811 dpm_table->dpm_levels[i].enabled = false;
3812 else
3813 dpm_table->dpm_levels[i].enabled = true;
3816 return 0;
3819 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3820 const struct smu7_power_state *smu7_ps)
3822 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3823 uint32_t high_limit_count;
3825 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3826 "power state did not have any performance level",
3827 return -EINVAL);
3829 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3831 smu7_trim_single_dpm_states(hwmgr,
3832 &(data->dpm_table.sclk_table),
3833 smu7_ps->performance_levels[0].engine_clock,
3834 smu7_ps->performance_levels[high_limit_count].engine_clock);
3836 smu7_trim_single_dpm_states(hwmgr,
3837 &(data->dpm_table.mclk_table),
3838 smu7_ps->performance_levels[0].memory_clock,
3839 smu7_ps->performance_levels[high_limit_count].memory_clock);
3841 return 0;
3844 static int smu7_generate_dpm_level_enable_mask(
3845 struct pp_hwmgr *hwmgr, const void *input)
3847 int result = 0;
3848 const struct phm_set_power_state_input *states =
3849 (const struct phm_set_power_state_input *)input;
3850 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3851 const struct smu7_power_state *smu7_ps =
3852 cast_const_phw_smu7_power_state(states->pnew_state);
3855 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3856 if (result)
3857 return result;
3859 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3860 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3861 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3862 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3863 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3864 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3866 return 0;
3869 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3871 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3873 if (0 == data->need_update_smu7_dpm_table)
3874 return 0;
3876 if ((0 == data->sclk_dpm_key_disabled) &&
3877 (data->need_update_smu7_dpm_table &
3878 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3880 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3881 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3883 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3884 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3885 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3886 return -EINVAL);
3889 if ((0 == data->mclk_dpm_key_disabled) &&
3890 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3892 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3893 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3895 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3896 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
3897 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3898 return -EINVAL);
3901 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3903 return 0;
3906 static int smu7_notify_link_speed_change_after_state_change(
3907 struct pp_hwmgr *hwmgr, const void *input)
3909 const struct phm_set_power_state_input *states =
3910 (const struct phm_set_power_state_input *)input;
3911 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3912 const struct smu7_power_state *smu7_ps =
3913 cast_const_phw_smu7_power_state(states->pnew_state);
3914 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3915 uint8_t request;
3917 if (data->pspp_notify_required) {
3918 if (target_link_speed == PP_PCIEGen3)
3919 request = PCIE_PERF_REQ_GEN3;
3920 else if (target_link_speed == PP_PCIEGen2)
3921 request = PCIE_PERF_REQ_GEN2;
3922 else
3923 request = PCIE_PERF_REQ_GEN1;
3925 if (request == PCIE_PERF_REQ_GEN1 &&
3926 smu7_get_current_pcie_speed(hwmgr) > 0)
3927 return 0;
3929 #ifdef CONFIG_ACPI
3930 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3931 if (PP_PCIEGen2 == target_link_speed)
3932 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3933 else
3934 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3936 #endif
3939 return 0;
3942 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3944 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3946 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3947 if (hwmgr->chip_id == CHIP_VEGAM)
3948 smum_send_msg_to_smc_with_parameter(hwmgr,
3949 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
3950 else
3951 smum_send_msg_to_smc_with_parameter(hwmgr,
3952 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3954 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3957 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3959 int tmp_result, result = 0;
3960 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3962 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3963 PP_ASSERT_WITH_CODE((0 == tmp_result),
3964 "Failed to find DPM states clocks in DPM table!",
3965 result = tmp_result);
3967 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3968 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3969 tmp_result =
3970 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3971 PP_ASSERT_WITH_CODE((0 == tmp_result),
3972 "Failed to request link speed change before state change!",
3973 result = tmp_result);
3976 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3977 PP_ASSERT_WITH_CODE((0 == tmp_result),
3978 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3980 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3981 PP_ASSERT_WITH_CODE((0 == tmp_result),
3982 "Failed to populate and upload SCLK MCLK DPM levels!",
3983 result = tmp_result);
3986 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
3987 * That effectively disables AVFS feature.
3989 if (hwmgr->hardcode_pp_table != NULL)
3990 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
3992 tmp_result = smu7_update_avfs(hwmgr);
3993 PP_ASSERT_WITH_CODE((0 == tmp_result),
3994 "Failed to update avfs voltages!",
3995 result = tmp_result);
3997 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3998 PP_ASSERT_WITH_CODE((0 == tmp_result),
3999 "Failed to generate DPM level enabled mask!",
4000 result = tmp_result);
4002 tmp_result = smum_update_sclk_threshold(hwmgr);
4003 PP_ASSERT_WITH_CODE((0 == tmp_result),
4004 "Failed to update SCLK threshold!",
4005 result = tmp_result);
4007 tmp_result = smu7_notify_smc_display(hwmgr);
4008 PP_ASSERT_WITH_CODE((0 == tmp_result),
4009 "Failed to notify smc display settings!",
4010 result = tmp_result);
4012 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4013 PP_ASSERT_WITH_CODE((0 == tmp_result),
4014 "Failed to unfreeze SCLK MCLK DPM!",
4015 result = tmp_result);
4017 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4018 PP_ASSERT_WITH_CODE((0 == tmp_result),
4019 "Failed to upload DPM level enabled mask!",
4020 result = tmp_result);
4022 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4023 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4024 tmp_result =
4025 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4026 PP_ASSERT_WITH_CODE((0 == tmp_result),
4027 "Failed to notify link speed change after state change!",
4028 result = tmp_result);
4030 data->apply_optimized_settings = false;
4031 return result;
4034 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4036 hwmgr->thermal_controller.
4037 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4039 return smum_send_msg_to_smc_with_parameter(hwmgr,
4040 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4043 static int
4044 smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4046 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4048 return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
4051 static int
4052 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4054 if (hwmgr->display_config->num_display > 1 &&
4055 !hwmgr->display_config->multi_monitor_in_sync)
4056 smu7_notify_smc_display_change(hwmgr, false);
4058 return 0;
4062 * Programs the display gap
4064 * @param hwmgr the address of the powerplay hardware manager.
4065 * @return always OK
4067 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4069 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4070 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4071 uint32_t display_gap2;
4072 uint32_t pre_vbi_time_in_us;
4073 uint32_t frame_time_in_us;
4074 uint32_t ref_clock, refresh_rate;
4076 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4077 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4079 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4080 refresh_rate = hwmgr->display_config->vrefresh;
4082 if (0 == refresh_rate)
4083 refresh_rate = 60;
4085 frame_time_in_us = 1000000 / refresh_rate;
4087 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4089 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4091 if (data->frame_time_x2 < 280) {
4092 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4093 data->frame_time_x2 = 280;
4096 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4098 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4100 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4101 data->soft_regs_start + smum_get_offsetof(hwmgr,
4102 SMU_SoftRegisters,
4103 PreVBlankGap), 0x64);
4105 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4106 data->soft_regs_start + smum_get_offsetof(hwmgr,
4107 SMU_SoftRegisters,
4108 VBlankTimeout),
4109 (frame_time_in_us - pre_vbi_time_in_us));
4111 return 0;
4114 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4116 return smu7_program_display_gap(hwmgr);
4120 * Set maximum target operating fan output RPM
4122 * @param hwmgr: the address of the powerplay hardware manager.
4123 * @param usMaxFanRpm: max operating fan RPM value.
4124 * @return The response that came from the SMC.
4126 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4128 hwmgr->thermal_controller.
4129 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4131 return smum_send_msg_to_smc_with_parameter(hwmgr,
4132 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4135 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4136 .process = phm_irq_process,
4139 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4141 struct amdgpu_irq_src *source =
4142 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4144 if (!source)
4145 return -ENOMEM;
4147 source->funcs = &smu7_irq_funcs;
4149 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4150 AMDGPU_IRQ_CLIENTID_LEGACY,
4151 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4152 source);
4153 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4154 AMDGPU_IRQ_CLIENTID_LEGACY,
4155 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4156 source);
4158 /* Register CTF(GPIO_19) interrupt */
4159 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4160 AMDGPU_IRQ_CLIENTID_LEGACY,
4161 VISLANDS30_IV_SRCID_GPIO_19,
4162 source);
4164 return 0;
4167 static bool
4168 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4170 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4171 bool is_update_required = false;
4173 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4174 is_update_required = true;
4176 if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4177 is_update_required = true;
4179 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4180 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4181 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4182 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4183 is_update_required = true;
4185 return is_update_required;
4188 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4189 const struct smu7_performance_level *pl2)
4191 return ((pl1->memory_clock == pl2->memory_clock) &&
4192 (pl1->engine_clock == pl2->engine_clock) &&
4193 (pl1->pcie_gen == pl2->pcie_gen) &&
4194 (pl1->pcie_lane == pl2->pcie_lane));
4197 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4198 const struct pp_hw_power_state *pstate1,
4199 const struct pp_hw_power_state *pstate2, bool *equal)
4201 const struct smu7_power_state *psa;
4202 const struct smu7_power_state *psb;
4203 int i;
4204 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4206 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4207 return -EINVAL;
4209 psa = cast_const_phw_smu7_power_state(pstate1);
4210 psb = cast_const_phw_smu7_power_state(pstate2);
4211 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4212 if (psa->performance_level_count != psb->performance_level_count) {
4213 *equal = false;
4214 return 0;
4217 for (i = 0; i < psa->performance_level_count; i++) {
4218 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4219 /* If we have found even one performance level pair that is different the states are different. */
4220 *equal = false;
4221 return 0;
4225 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4226 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4227 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4228 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4229 /* For OD call, set value based on flag */
4230 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4231 DPMTABLE_OD_UPDATE_MCLK |
4232 DPMTABLE_OD_UPDATE_VDDC));
4234 return 0;
4237 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4239 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4241 uint32_t tmp;
4243 /* Read MC indirect register offset 0x9F bits [3:0] to see
4244 * if VBIOS has already loaded a full version of MC ucode
4245 * or not.
4248 smu7_get_mc_microcode_version(hwmgr);
4250 data->need_long_memory_training = false;
4252 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4253 ixMC_IO_DEBUG_UP_13);
4254 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4256 if (tmp & (1 << 23)) {
4257 data->mem_latency_high = MEM_LATENCY_HIGH;
4258 data->mem_latency_low = MEM_LATENCY_LOW;
4259 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4260 (hwmgr->chip_id == CHIP_POLARIS11) ||
4261 (hwmgr->chip_id == CHIP_POLARIS12))
4262 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
4263 } else {
4264 data->mem_latency_high = 330;
4265 data->mem_latency_low = 330;
4266 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4267 (hwmgr->chip_id == CHIP_POLARIS11) ||
4268 (hwmgr->chip_id == CHIP_POLARIS12))
4269 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
4272 return 0;
4275 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4277 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4279 data->clock_registers.vCG_SPLL_FUNC_CNTL =
4280 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4281 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
4282 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4283 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
4284 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4285 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
4286 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4287 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
4288 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4289 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4290 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4291 data->clock_registers.vDLL_CNTL =
4292 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4293 data->clock_registers.vMCLK_PWRMGT_CNTL =
4294 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4295 data->clock_registers.vMPLL_AD_FUNC_CNTL =
4296 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4297 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
4298 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4299 data->clock_registers.vMPLL_FUNC_CNTL =
4300 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4301 data->clock_registers.vMPLL_FUNC_CNTL_1 =
4302 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4303 data->clock_registers.vMPLL_FUNC_CNTL_2 =
4304 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4305 data->clock_registers.vMPLL_SS1 =
4306 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4307 data->clock_registers.vMPLL_SS2 =
4308 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4309 return 0;
4314 * Find out if memory is GDDR5.
4316 * @param hwmgr the address of the powerplay hardware manager.
4317 * @return always 0
4319 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4321 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4322 struct amdgpu_device *adev = hwmgr->adev;
4324 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4326 return 0;
4330 * Enables Dynamic Power Management by SMC
4332 * @param hwmgr the address of the powerplay hardware manager.
4333 * @return always 0
4335 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4337 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4338 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4340 return 0;
4344 * Initialize PowerGating States for different engines
4346 * @param hwmgr the address of the powerplay hardware manager.
4347 * @return always 0
4349 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4351 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4353 data->uvd_power_gated = false;
4354 data->vce_power_gated = false;
4356 return 0;
4359 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4361 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4363 data->low_sclk_interrupt_threshold = 0;
4364 return 0;
4367 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4369 int tmp_result, result = 0;
4371 smu7_check_mc_firmware(hwmgr);
4373 tmp_result = smu7_read_clock_registers(hwmgr);
4374 PP_ASSERT_WITH_CODE((0 == tmp_result),
4375 "Failed to read clock registers!", result = tmp_result);
4377 tmp_result = smu7_get_memory_type(hwmgr);
4378 PP_ASSERT_WITH_CODE((0 == tmp_result),
4379 "Failed to get memory type!", result = tmp_result);
4381 tmp_result = smu7_enable_acpi_power_management(hwmgr);
4382 PP_ASSERT_WITH_CODE((0 == tmp_result),
4383 "Failed to enable ACPI power management!", result = tmp_result);
4385 tmp_result = smu7_init_power_gate_state(hwmgr);
4386 PP_ASSERT_WITH_CODE((0 == tmp_result),
4387 "Failed to init power gate state!", result = tmp_result);
4389 tmp_result = smu7_get_mc_microcode_version(hwmgr);
4390 PP_ASSERT_WITH_CODE((0 == tmp_result),
4391 "Failed to get MC microcode version!", result = tmp_result);
4393 tmp_result = smu7_init_sclk_threshold(hwmgr);
4394 PP_ASSERT_WITH_CODE((0 == tmp_result),
4395 "Failed to init sclk threshold!", result = tmp_result);
4397 return result;
4400 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4401 enum pp_clock_type type, uint32_t mask)
4403 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4405 if (mask == 0)
4406 return -EINVAL;
4408 switch (type) {
4409 case PP_SCLK:
4410 if (!data->sclk_dpm_key_disabled)
4411 smum_send_msg_to_smc_with_parameter(hwmgr,
4412 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4413 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4414 break;
4415 case PP_MCLK:
4416 if (!data->mclk_dpm_key_disabled)
4417 smum_send_msg_to_smc_with_parameter(hwmgr,
4418 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4419 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4420 break;
4421 case PP_PCIE:
4423 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4425 if (!data->pcie_dpm_key_disabled) {
4426 if (fls(tmp) != ffs(tmp))
4427 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
4428 else
4429 smum_send_msg_to_smc_with_parameter(hwmgr,
4430 PPSMC_MSG_PCIeDPM_ForceLevel,
4431 fls(tmp) - 1);
4433 break;
4435 default:
4436 break;
4439 return 0;
4442 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4443 enum pp_clock_type type, char *buf)
4445 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4446 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4447 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4448 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4449 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4450 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4451 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4452 int i, now, size = 0;
4453 uint32_t clock, pcie_speed;
4455 switch (type) {
4456 case PP_SCLK:
4457 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
4458 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4460 for (i = 0; i < sclk_table->count; i++) {
4461 if (clock > sclk_table->dpm_levels[i].value)
4462 continue;
4463 break;
4465 now = i;
4467 for (i = 0; i < sclk_table->count; i++)
4468 size += sprintf(buf + size, "%d: %uMhz %s\n",
4469 i, sclk_table->dpm_levels[i].value / 100,
4470 (i == now) ? "*" : "");
4471 break;
4472 case PP_MCLK:
4473 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
4474 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4476 for (i = 0; i < mclk_table->count; i++) {
4477 if (clock > mclk_table->dpm_levels[i].value)
4478 continue;
4479 break;
4481 now = i;
4483 for (i = 0; i < mclk_table->count; i++)
4484 size += sprintf(buf + size, "%d: %uMhz %s\n",
4485 i, mclk_table->dpm_levels[i].value / 100,
4486 (i == now) ? "*" : "");
4487 break;
4488 case PP_PCIE:
4489 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4490 for (i = 0; i < pcie_table->count; i++) {
4491 if (pcie_speed != pcie_table->dpm_levels[i].value)
4492 continue;
4493 break;
4495 now = i;
4497 for (i = 0; i < pcie_table->count; i++)
4498 size += sprintf(buf + size, "%d: %s %s\n", i,
4499 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4500 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4501 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4502 (i == now) ? "*" : "");
4503 break;
4504 case OD_SCLK:
4505 if (hwmgr->od_enabled) {
4506 size = sprintf(buf, "%s:\n", "OD_SCLK");
4507 for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4508 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4509 i, odn_sclk_table->entries[i].clock/100,
4510 odn_sclk_table->entries[i].vddc);
4512 break;
4513 case OD_MCLK:
4514 if (hwmgr->od_enabled) {
4515 size = sprintf(buf, "%s:\n", "OD_MCLK");
4516 for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4517 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4518 i, odn_mclk_table->entries[i].clock/100,
4519 odn_mclk_table->entries[i].vddc);
4521 break;
4522 case OD_RANGE:
4523 if (hwmgr->od_enabled) {
4524 size = sprintf(buf, "%s:\n", "OD_RANGE");
4525 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4526 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4527 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4528 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4529 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4530 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4531 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4532 data->odn_dpm_table.min_vddc,
4533 data->odn_dpm_table.max_vddc);
4535 break;
4536 default:
4537 break;
4539 return size;
4542 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4544 switch (mode) {
4545 case AMD_FAN_CTRL_NONE:
4546 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4547 break;
4548 case AMD_FAN_CTRL_MANUAL:
4549 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4550 PHM_PlatformCaps_MicrocodeFanControl))
4551 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4552 break;
4553 case AMD_FAN_CTRL_AUTO:
4554 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4555 smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4556 break;
4557 default:
4558 break;
4562 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4564 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4567 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4569 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4570 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4571 struct smu7_single_dpm_table *golden_sclk_table =
4572 &(data->golden_dpm_table.sclk_table);
4573 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4574 int golden_value = golden_sclk_table->dpm_levels
4575 [golden_sclk_table->count - 1].value;
4577 value -= golden_value;
4578 value = DIV_ROUND_UP(value * 100, golden_value);
4580 return value;
4583 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4585 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4586 struct smu7_single_dpm_table *golden_sclk_table =
4587 &(data->golden_dpm_table.sclk_table);
4588 struct pp_power_state *ps;
4589 struct smu7_power_state *smu7_ps;
4591 if (value > 20)
4592 value = 20;
4594 ps = hwmgr->request_ps;
4596 if (ps == NULL)
4597 return -EINVAL;
4599 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4601 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4602 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4603 value / 100 +
4604 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4606 return 0;
4609 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4611 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4612 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4613 struct smu7_single_dpm_table *golden_mclk_table =
4614 &(data->golden_dpm_table.mclk_table);
4615 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4616 int golden_value = golden_mclk_table->dpm_levels
4617 [golden_mclk_table->count - 1].value;
4619 value -= golden_value;
4620 value = DIV_ROUND_UP(value * 100, golden_value);
4622 return value;
4625 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4627 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4628 struct smu7_single_dpm_table *golden_mclk_table =
4629 &(data->golden_dpm_table.mclk_table);
4630 struct pp_power_state *ps;
4631 struct smu7_power_state *smu7_ps;
4633 if (value > 20)
4634 value = 20;
4636 ps = hwmgr->request_ps;
4638 if (ps == NULL)
4639 return -EINVAL;
4641 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4643 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4644 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4645 value / 100 +
4646 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4648 return 0;
4652 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4654 struct phm_ppt_v1_information *table_info =
4655 (struct phm_ppt_v1_information *)hwmgr->pptable;
4656 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4657 struct phm_clock_voltage_dependency_table *sclk_table;
4658 int i;
4660 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4661 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4662 return -EINVAL;
4663 dep_sclk_table = table_info->vdd_dep_on_sclk;
4664 for (i = 0; i < dep_sclk_table->count; i++)
4665 clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
4666 clocks->count = dep_sclk_table->count;
4667 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4668 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4669 for (i = 0; i < sclk_table->count; i++)
4670 clocks->clock[i] = sclk_table->entries[i].clk * 10;
4671 clocks->count = sclk_table->count;
4674 return 0;
4677 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4679 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4681 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4682 return data->mem_latency_high;
4683 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4684 return data->mem_latency_low;
4685 else
4686 return MEM_LATENCY_ERR;
4689 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4691 struct phm_ppt_v1_information *table_info =
4692 (struct phm_ppt_v1_information *)hwmgr->pptable;
4693 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4694 int i;
4695 struct phm_clock_voltage_dependency_table *mclk_table;
4697 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4698 if (table_info == NULL)
4699 return -EINVAL;
4700 dep_mclk_table = table_info->vdd_dep_on_mclk;
4701 for (i = 0; i < dep_mclk_table->count; i++) {
4702 clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
4703 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4704 dep_mclk_table->entries[i].clk);
4706 clocks->count = dep_mclk_table->count;
4707 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4708 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4709 for (i = 0; i < mclk_table->count; i++)
4710 clocks->clock[i] = mclk_table->entries[i].clk * 10;
4711 clocks->count = mclk_table->count;
4713 return 0;
4716 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4717 struct amd_pp_clocks *clocks)
4719 switch (type) {
4720 case amd_pp_sys_clock:
4721 smu7_get_sclks(hwmgr, clocks);
4722 break;
4723 case amd_pp_mem_clock:
4724 smu7_get_mclks(hwmgr, clocks);
4725 break;
4726 default:
4727 return -EINVAL;
4730 return 0;
4733 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4734 uint32_t virtual_addr_low,
4735 uint32_t virtual_addr_hi,
4736 uint32_t mc_addr_low,
4737 uint32_t mc_addr_hi,
4738 uint32_t size)
4740 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4742 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4743 data->soft_regs_start +
4744 smum_get_offsetof(hwmgr,
4745 SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4746 mc_addr_hi);
4748 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4749 data->soft_regs_start +
4750 smum_get_offsetof(hwmgr,
4751 SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4752 mc_addr_low);
4754 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4755 data->soft_regs_start +
4756 smum_get_offsetof(hwmgr,
4757 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4758 virtual_addr_hi);
4760 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4761 data->soft_regs_start +
4762 smum_get_offsetof(hwmgr,
4763 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4764 virtual_addr_low);
4766 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4767 data->soft_regs_start +
4768 smum_get_offsetof(hwmgr,
4769 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4770 size);
4771 return 0;
4774 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4775 struct amd_pp_simple_clock_info *clocks)
4777 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4778 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4779 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4781 if (clocks == NULL)
4782 return -EINVAL;
4784 clocks->memory_max_clock = mclk_table->count > 1 ?
4785 mclk_table->dpm_levels[mclk_table->count-1].value :
4786 mclk_table->dpm_levels[0].value;
4787 clocks->engine_max_clock = sclk_table->count > 1 ?
4788 sclk_table->dpm_levels[sclk_table->count-1].value :
4789 sclk_table->dpm_levels[0].value;
4790 return 0;
4793 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4794 struct PP_TemperatureRange *thermal_data)
4796 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4797 struct phm_ppt_v1_information *table_info =
4798 (struct phm_ppt_v1_information *)hwmgr->pptable;
4800 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4802 if (hwmgr->pp_table_version == PP_TABLE_V1)
4803 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4804 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4805 else if (hwmgr->pp_table_version == PP_TABLE_V0)
4806 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4807 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4809 return 0;
4812 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4813 enum PP_OD_DPM_TABLE_COMMAND type,
4814 uint32_t clk,
4815 uint32_t voltage)
4817 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4819 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
4820 pr_info("OD voltage is out of range [%d - %d] mV\n",
4821 data->odn_dpm_table.min_vddc,
4822 data->odn_dpm_table.max_vddc);
4823 return false;
4826 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4827 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
4828 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4829 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4830 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4831 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4832 return false;
4834 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4835 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
4836 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4837 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4838 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4839 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4840 return false;
4842 } else {
4843 return false;
4846 return true;
4849 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4850 enum PP_OD_DPM_TABLE_COMMAND type,
4851 long *input, uint32_t size)
4853 uint32_t i;
4854 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4855 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4856 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4858 uint32_t input_clk;
4859 uint32_t input_vol;
4860 uint32_t input_level;
4862 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4863 return -EINVAL);
4865 if (!hwmgr->od_enabled) {
4866 pr_info("OverDrive feature not enabled\n");
4867 return -EINVAL;
4870 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4871 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4872 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4873 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4874 "Failed to get ODN SCLK and Voltage tables",
4875 return -EINVAL);
4876 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4877 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4878 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4880 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4881 "Failed to get ODN MCLK and Voltage tables",
4882 return -EINVAL);
4883 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4884 smu7_odn_initial_default_setting(hwmgr);
4885 return 0;
4886 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4887 smu7_check_dpm_table_updated(hwmgr);
4888 return 0;
4889 } else {
4890 return -EINVAL;
4893 for (i = 0; i < size; i += 3) {
4894 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4895 pr_info("invalid clock voltage input \n");
4896 return 0;
4898 input_level = input[i];
4899 input_clk = input[i+1] * 100;
4900 input_vol = input[i+2];
4902 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4903 podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4904 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4905 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4906 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4907 podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
4908 } else {
4909 return -EINVAL;
4913 return 0;
4916 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4918 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4919 uint32_t i, size = 0;
4920 uint32_t len;
4922 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4923 "3D_FULL_SCREEN",
4924 "POWER_SAVING",
4925 "VIDEO",
4926 "VR",
4927 "COMPUTE",
4928 "CUSTOM"};
4930 static const char *title[8] = {"NUM",
4931 "MODE_NAME",
4932 "SCLK_UP_HYST",
4933 "SCLK_DOWN_HYST",
4934 "SCLK_ACTIVE_LEVEL",
4935 "MCLK_UP_HYST",
4936 "MCLK_DOWN_HYST",
4937 "MCLK_ACTIVE_LEVEL"};
4939 if (!buf)
4940 return -EINVAL;
4942 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4943 title[0], title[1], title[2], title[3],
4944 title[4], title[5], title[6], title[7]);
4946 len = ARRAY_SIZE(smu7_profiling);
4948 for (i = 0; i < len; i++) {
4949 if (i == hwmgr->power_profile_mode) {
4950 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4951 i, profile_name[i], "*",
4952 data->current_profile_setting.sclk_up_hyst,
4953 data->current_profile_setting.sclk_down_hyst,
4954 data->current_profile_setting.sclk_activity,
4955 data->current_profile_setting.mclk_up_hyst,
4956 data->current_profile_setting.mclk_down_hyst,
4957 data->current_profile_setting.mclk_activity);
4958 continue;
4960 if (smu7_profiling[i].bupdate_sclk)
4961 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4962 i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4963 smu7_profiling[i].sclk_down_hyst,
4964 smu7_profiling[i].sclk_activity);
4965 else
4966 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4967 i, profile_name[i], "-", "-", "-");
4969 if (smu7_profiling[i].bupdate_mclk)
4970 size += sprintf(buf + size, "%16d %16d %16d\n",
4971 smu7_profiling[i].mclk_up_hyst,
4972 smu7_profiling[i].mclk_down_hyst,
4973 smu7_profiling[i].mclk_activity);
4974 else
4975 size += sprintf(buf + size, "%16s %16s %16s\n",
4976 "-", "-", "-");
4979 return size;
4982 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
4983 enum PP_SMC_POWER_PROFILE requst)
4985 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4986 uint32_t tmp, level;
4988 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
4989 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4990 level = 0;
4991 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4992 while (tmp >>= 1)
4993 level++;
4994 if (level > 0)
4995 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
4997 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
4998 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5002 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5004 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5005 struct profile_mode_setting tmp;
5006 enum PP_SMC_POWER_PROFILE mode;
5008 if (input == NULL)
5009 return -EINVAL;
5011 mode = input[size];
5012 switch (mode) {
5013 case PP_SMC_POWER_PROFILE_CUSTOM:
5014 if (size < 8 && size != 0)
5015 return -EINVAL;
5016 /* If only CUSTOM is passed in, use the saved values. Check
5017 * that we actually have a CUSTOM profile by ensuring that
5018 * the "use sclk" or the "use mclk" bits are set
5020 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5021 if (size == 0) {
5022 if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5023 return -EINVAL;
5024 } else {
5025 tmp.bupdate_sclk = input[0];
5026 tmp.sclk_up_hyst = input[1];
5027 tmp.sclk_down_hyst = input[2];
5028 tmp.sclk_activity = input[3];
5029 tmp.bupdate_mclk = input[4];
5030 tmp.mclk_up_hyst = input[5];
5031 tmp.mclk_down_hyst = input[6];
5032 tmp.mclk_activity = input[7];
5033 smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5035 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5036 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5037 hwmgr->power_profile_mode = mode;
5039 break;
5040 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5041 case PP_SMC_POWER_PROFILE_POWERSAVING:
5042 case PP_SMC_POWER_PROFILE_VIDEO:
5043 case PP_SMC_POWER_PROFILE_VR:
5044 case PP_SMC_POWER_PROFILE_COMPUTE:
5045 if (mode == hwmgr->power_profile_mode)
5046 return 0;
5048 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5049 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5050 if (tmp.bupdate_sclk) {
5051 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5052 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5053 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5054 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5056 if (tmp.bupdate_mclk) {
5057 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5058 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5059 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5060 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5062 smu7_patch_compute_profile_mode(hwmgr, mode);
5063 hwmgr->power_profile_mode = mode;
5065 break;
5066 default:
5067 return -EINVAL;
5070 return 0;
5073 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5074 PHM_PerformanceLevelDesignation designation, uint32_t index,
5075 PHM_PerformanceLevel *level)
5077 const struct smu7_power_state *ps;
5078 uint32_t i;
5080 if (level == NULL || hwmgr == NULL || state == NULL)
5081 return -EINVAL;
5083 ps = cast_const_phw_smu7_power_state(state);
5085 i = index > ps->performance_level_count - 1 ?
5086 ps->performance_level_count - 1 : index;
5088 level->coreClock = ps->performance_levels[i].engine_clock;
5089 level->memory_clock = ps->performance_levels[i].memory_clock;
5091 return 0;
5094 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5096 int result;
5098 result = smu7_disable_dpm_tasks(hwmgr);
5099 PP_ASSERT_WITH_CODE((0 == result),
5100 "[disable_dpm_tasks] Failed to disable DPM!",
5103 return result;
5106 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5107 .backend_init = &smu7_hwmgr_backend_init,
5108 .backend_fini = &smu7_hwmgr_backend_fini,
5109 .asic_setup = &smu7_setup_asic_task,
5110 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5111 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5112 .force_dpm_level = &smu7_force_dpm_level,
5113 .power_state_set = smu7_set_power_state_tasks,
5114 .get_power_state_size = smu7_get_power_state_size,
5115 .get_mclk = smu7_dpm_get_mclk,
5116 .get_sclk = smu7_dpm_get_sclk,
5117 .patch_boot_state = smu7_dpm_patch_boot_state,
5118 .get_pp_table_entry = smu7_get_pp_table_entry,
5119 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5120 .powerdown_uvd = smu7_powerdown_uvd,
5121 .powergate_uvd = smu7_powergate_uvd,
5122 .powergate_vce = smu7_powergate_vce,
5123 .disable_clock_power_gating = smu7_disable_clock_power_gating,
5124 .update_clock_gatings = smu7_update_clock_gatings,
5125 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5126 .display_config_changed = smu7_display_configuration_changed_task,
5127 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5128 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5129 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5130 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5131 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5132 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5133 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5134 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5135 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5136 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5137 .register_irq_handlers = smu7_register_irq_handlers,
5138 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5139 .check_states_equal = smu7_check_states_equal,
5140 .set_fan_control_mode = smu7_set_fan_control_mode,
5141 .get_fan_control_mode = smu7_get_fan_control_mode,
5142 .force_clock_level = smu7_force_clock_level,
5143 .print_clock_levels = smu7_print_clock_levels,
5144 .powergate_gfx = smu7_powergate_gfx,
5145 .get_sclk_od = smu7_get_sclk_od,
5146 .set_sclk_od = smu7_set_sclk_od,
5147 .get_mclk_od = smu7_get_mclk_od,
5148 .set_mclk_od = smu7_set_mclk_od,
5149 .get_clock_by_type = smu7_get_clock_by_type,
5150 .read_sensor = smu7_read_sensor,
5151 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5152 .avfs_control = smu7_avfs_control,
5153 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5154 .start_thermal_controller = smu7_start_thermal_controller,
5155 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5156 .get_max_high_clocks = smu7_get_max_high_clocks,
5157 .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5158 .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5159 .set_power_limit = smu7_set_power_limit,
5160 .get_power_profile_mode = smu7_get_power_profile_mode,
5161 .set_power_profile_mode = smu7_set_power_profile_mode,
5162 .get_performance_level = smu7_get_performance_level,
5163 .get_asic_baco_capability = smu7_baco_get_capability,
5164 .get_asic_baco_state = smu7_baco_get_state,
5165 .set_asic_baco_state = smu7_baco_set_state,
5166 .power_off_asic = smu7_power_off_asic,
5169 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5170 uint32_t clock_insr)
5172 uint8_t i;
5173 uint32_t temp;
5174 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5176 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5177 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
5178 temp = clock >> i;
5180 if (temp >= min || i == 0)
5181 break;
5183 return i;
5186 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5188 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5189 if (hwmgr->pp_table_version == PP_TABLE_V0)
5190 hwmgr->pptable_func = &pptable_funcs;
5191 else if (hwmgr->pp_table_version == PP_TABLE_V1)
5192 hwmgr->pptable_func = &pptable_v1_0_funcs;
5194 return 0;