2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/seq_file.h>
28 #include "amdgpu_pm.h"
29 #include "amdgpu_atombios.h"
32 #include "amdgpu_dpm.h"
37 #include "smu/smu_8_0_d.h"
38 #include "smu/smu_8_0_sh_mask.h"
39 #include "gca/gfx_8_0_d.h"
40 #include "gca/gfx_8_0_sh_mask.h"
41 #include "gmc/gmc_8_1_d.h"
42 #include "bif/bif_5_1_d.h"
45 static void cz_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
);
46 static void cz_dpm_powergate_vce(struct amdgpu_device
*adev
, bool gate
);
48 static struct cz_ps
*cz_get_ps(struct amdgpu_ps
*rps
)
50 struct cz_ps
*ps
= rps
->ps_priv
;
55 static struct cz_power_info
*cz_get_pi(struct amdgpu_device
*adev
)
57 struct cz_power_info
*pi
= adev
->pm
.dpm
.priv
;
62 static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device
*adev
,
65 uint16_t tmp
= 6200 - voltage
* 25;
70 static void cz_construct_max_power_limits_table(struct amdgpu_device
*adev
,
71 struct amdgpu_clock_and_voltage_limits
*table
)
73 struct cz_power_info
*pi
= cz_get_pi(adev
);
74 struct amdgpu_clock_voltage_dependency_table
*dep_table
=
75 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
77 if (dep_table
->count
> 0) {
78 table
->sclk
= dep_table
->entries
[dep_table
->count
- 1].clk
;
79 table
->vddc
= cz_convert_8bit_index_to_voltage(adev
,
80 dep_table
->entries
[dep_table
->count
- 1].v
);
83 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
88 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
89 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
90 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
91 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9
;
94 static int cz_parse_sys_info_table(struct amdgpu_device
*adev
)
96 struct cz_power_info
*pi
= cz_get_pi(adev
);
97 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
98 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
99 union igp_info
*igp_info
;
104 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
105 &frev
, &crev
, &data_offset
)) {
106 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
110 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
113 pi
->sys_info
.bootup_sclk
=
114 le32_to_cpu(igp_info
->info_9
.ulBootUpEngineClock
);
115 pi
->sys_info
.bootup_uma_clk
=
116 le32_to_cpu(igp_info
->info_9
.ulBootUpUMAClock
);
117 pi
->sys_info
.dentist_vco_freq
=
118 le32_to_cpu(igp_info
->info_9
.ulDentistVCOFreq
);
119 pi
->sys_info
.bootup_nb_voltage_index
=
120 le16_to_cpu(igp_info
->info_9
.usBootUpNBVoltage
);
122 if (igp_info
->info_9
.ucHtcTmpLmt
== 0)
123 pi
->sys_info
.htc_tmp_lmt
= 203;
125 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_9
.ucHtcTmpLmt
;
127 if (igp_info
->info_9
.ucHtcHystLmt
== 0)
128 pi
->sys_info
.htc_hyst_lmt
= 5;
130 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_9
.ucHtcHystLmt
;
132 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
133 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
137 if (le32_to_cpu(igp_info
->info_9
.ulSystemConfig
) & (1 << 3) &&
138 pi
->enable_nb_ps_policy
)
139 pi
->sys_info
.nb_dpm_enable
= true;
141 pi
->sys_info
.nb_dpm_enable
= false;
143 for (i
= 0; i
< CZ_NUM_NBPSTATES
; i
++) {
144 if (i
< CZ_NUM_NBPMEMORY_CLOCK
)
145 pi
->sys_info
.nbp_memory_clock
[i
] =
146 le32_to_cpu(igp_info
->info_9
.ulNbpStateMemclkFreq
[i
]);
147 pi
->sys_info
.nbp_n_clock
[i
] =
148 le32_to_cpu(igp_info
->info_9
.ulNbpStateNClkFreq
[i
]);
151 for (i
= 0; i
< CZ_MAX_DISPLAY_CLOCK_LEVEL
; i
++)
152 pi
->sys_info
.display_clock
[i
] =
153 le32_to_cpu(igp_info
->info_9
.sDispClkVoltageMapping
[i
].ulMaximumSupportedCLK
);
155 for (i
= 0; i
< CZ_NUM_NBPSTATES
; i
++)
156 pi
->sys_info
.nbp_voltage_index
[i
] =
157 le32_to_cpu(igp_info
->info_9
.usNBPStateVoltage
[i
]);
159 if (le32_to_cpu(igp_info
->info_9
.ulGPUCapInfo
) &
160 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
161 pi
->caps_enable_dfs_bypass
= true;
163 pi
->sys_info
.uma_channel_number
=
164 igp_info
->info_9
.ucUMAChannelNumber
;
166 cz_construct_max_power_limits_table(adev
,
167 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
173 static void cz_patch_voltage_values(struct amdgpu_device
*adev
)
176 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
177 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
178 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
179 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
180 struct amdgpu_clock_voltage_dependency_table
*acp_table
=
181 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
183 if (uvd_table
->count
) {
184 for (i
= 0; i
< uvd_table
->count
; i
++)
185 uvd_table
->entries
[i
].v
=
186 cz_convert_8bit_index_to_voltage(adev
,
187 uvd_table
->entries
[i
].v
);
190 if (vce_table
->count
) {
191 for (i
= 0; i
< vce_table
->count
; i
++)
192 vce_table
->entries
[i
].v
=
193 cz_convert_8bit_index_to_voltage(adev
,
194 vce_table
->entries
[i
].v
);
197 if (acp_table
->count
) {
198 for (i
= 0; i
< acp_table
->count
; i
++)
199 acp_table
->entries
[i
].v
=
200 cz_convert_8bit_index_to_voltage(adev
,
201 acp_table
->entries
[i
].v
);
206 static void cz_construct_boot_state(struct amdgpu_device
*adev
)
208 struct cz_power_info
*pi
= cz_get_pi(adev
);
210 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
211 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
212 pi
->boot_pl
.ds_divider_index
= 0;
213 pi
->boot_pl
.ss_divider_index
= 0;
214 pi
->boot_pl
.allow_gnb_slow
= 1;
215 pi
->boot_pl
.force_nbp_state
= 0;
216 pi
->boot_pl
.display_wm
= 0;
217 pi
->boot_pl
.vce_wm
= 0;
221 static void cz_patch_boot_state(struct amdgpu_device
*adev
,
224 struct cz_power_info
*pi
= cz_get_pi(adev
);
227 ps
->levels
[0] = pi
->boot_pl
;
230 union pplib_clock_info
{
231 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
232 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
233 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo
;
236 static void cz_parse_pplib_clock_info(struct amdgpu_device
*adev
,
237 struct amdgpu_ps
*rps
, int index
,
238 union pplib_clock_info
*clock_info
)
240 struct cz_power_info
*pi
= cz_get_pi(adev
);
241 struct cz_ps
*ps
= cz_get_ps(rps
);
242 struct cz_pl
*pl
= &ps
->levels
[index
];
243 struct amdgpu_clock_voltage_dependency_table
*table
=
244 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
246 pl
->sclk
= table
->entries
[clock_info
->carrizo
.index
].clk
;
247 pl
->vddc_index
= table
->entries
[clock_info
->carrizo
.index
].v
;
249 ps
->num_levels
= index
+ 1;
251 if (pi
->caps_sclk_ds
) {
252 pl
->ds_divider_index
= 5;
253 pl
->ss_divider_index
= 5;
258 static void cz_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
259 struct amdgpu_ps
*rps
,
260 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
263 struct cz_ps
*ps
= cz_get_ps(rps
);
265 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
266 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
267 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
269 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
270 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
271 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
277 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
278 adev
->pm
.dpm
.boot_ps
= rps
;
279 cz_patch_boot_state(adev
, ps
);
281 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
282 adev
->pm
.dpm
.uvd_ps
= rps
;
287 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
288 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
289 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
290 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
291 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
294 union pplib_power_state
{
295 struct _ATOM_PPLIB_STATE v1
;
296 struct _ATOM_PPLIB_STATE_V2 v2
;
299 static int cz_parse_power_table(struct amdgpu_device
*adev
)
301 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
302 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
303 union pplib_power_state
*power_state
;
304 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
305 union pplib_clock_info
*clock_info
;
306 struct _StateArray
*state_array
;
307 struct _ClockInfoArray
*clock_info_array
;
308 struct _NonClockInfoArray
*non_clock_info_array
;
309 union power_info
*power_info
;
310 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
313 u8
*power_state_offset
;
316 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
317 &frev
, &crev
, &data_offset
))
319 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
321 state_array
= (struct _StateArray
*)
322 (mode_info
->atom_context
->bios
+ data_offset
+
323 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
324 clock_info_array
= (struct _ClockInfoArray
*)
325 (mode_info
->atom_context
->bios
+ data_offset
+
326 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
327 non_clock_info_array
= (struct _NonClockInfoArray
*)
328 (mode_info
->atom_context
->bios
+ data_offset
+
329 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
331 adev
->pm
.dpm
.ps
= kzalloc(sizeof(struct amdgpu_ps
) *
332 state_array
->ucNumEntries
, GFP_KERNEL
);
334 if (!adev
->pm
.dpm
.ps
)
337 power_state_offset
= (u8
*)state_array
->states
;
338 adev
->pm
.dpm
.platform_caps
=
339 le32_to_cpu(power_info
->pplib
.ulPlatformCaps
);
340 adev
->pm
.dpm
.backbias_response_time
=
341 le16_to_cpu(power_info
->pplib
.usBackbiasTime
);
342 adev
->pm
.dpm
.voltage_response_time
=
343 le16_to_cpu(power_info
->pplib
.usVoltageTime
);
345 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
346 power_state
= (union pplib_power_state
*)power_state_offset
;
347 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
348 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
349 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
351 ps
= kzalloc(sizeof(struct cz_ps
), GFP_KERNEL
);
353 kfree(adev
->pm
.dpm
.ps
);
357 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
359 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
360 clock_array_index
= power_state
->v2
.clockInfoIndex
[j
];
361 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
363 if (k
>= CZ_MAX_HARDWARE_POWERLEVELS
)
365 clock_info
= (union pplib_clock_info
*)
366 &clock_info_array
->clockInfo
[clock_array_index
*
367 clock_info_array
->ucEntrySize
];
368 cz_parse_pplib_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
372 cz_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
374 non_clock_info_array
->ucEntrySize
);
375 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
377 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
382 static int cz_process_firmware_header(struct amdgpu_device
*adev
)
384 struct cz_power_info
*pi
= cz_get_pi(adev
);
388 ret
= cz_read_smc_sram_dword(adev
, SMU8_FIRMWARE_HEADER_LOCATION
+
389 offsetof(struct SMU8_Firmware_Header
,
394 pi
->dpm_table_start
= tmp
;
399 static int cz_dpm_init(struct amdgpu_device
*adev
)
401 struct cz_power_info
*pi
;
404 pi
= kzalloc(sizeof(struct cz_power_info
), GFP_KERNEL
);
408 adev
->pm
.dpm
.priv
= pi
;
410 ret
= amdgpu_get_platform_caps(adev
);
414 ret
= amdgpu_parse_extended_power_table(adev
);
418 pi
->sram_end
= SMC_RAM_END
;
420 /* set up DPM defaults */
421 for (i
= 0; i
< CZ_MAX_HARDWARE_POWERLEVELS
; i
++)
422 pi
->active_target
[i
] = CZ_AT_DFLT
;
424 pi
->mgcg_cgtt_local0
= 0x0;
425 pi
->mgcg_cgtt_local1
= 0x0;
426 pi
->clock_slow_down_step
= 25000;
427 pi
->skip_clock_slow_down
= 1;
428 pi
->enable_nb_ps_policy
= 0;
429 pi
->caps_power_containment
= true;
431 pi
->didt_enabled
= false;
432 if (pi
->didt_enabled
) {
433 pi
->caps_sq_ramping
= true;
434 pi
->caps_db_ramping
= true;
435 pi
->caps_td_ramping
= true;
436 pi
->caps_tcp_ramping
= true;
438 pi
->caps_sclk_ds
= true;
439 pi
->voting_clients
= 0x00c00033;
440 pi
->auto_thermal_throttling_enabled
= true;
441 pi
->bapm_enabled
= false;
442 pi
->disable_nb_ps3_in_battery
= false;
443 pi
->voltage_drop_threshold
= 0;
444 pi
->caps_sclk_throttle_low_notification
= false;
445 pi
->gfx_pg_threshold
= 500;
448 pi
->caps_uvd_pg
= (adev
->pg_flags
& AMDGPU_PG_SUPPORT_UVD
) ? true : false;
449 pi
->caps_uvd_dpm
= true;
451 pi
->caps_vce_pg
= (adev
->pg_flags
& AMDGPU_PG_SUPPORT_VCE
) ? true : false;
452 pi
->caps_vce_dpm
= true;
454 pi
->caps_acp_pg
= (adev
->pg_flags
& AMDGPU_PG_SUPPORT_ACP
) ? true : false;
455 pi
->caps_acp_dpm
= true;
457 pi
->caps_stable_power_state
= false;
458 pi
->nb_dpm_enabled_by_driver
= true;
459 pi
->nb_dpm_enabled
= false;
460 pi
->caps_voltage_island
= false;
461 /* flags which indicate need to upload pptable */
462 pi
->need_pptable_upload
= true;
464 ret
= cz_parse_sys_info_table(adev
);
468 cz_patch_voltage_values(adev
);
469 cz_construct_boot_state(adev
);
471 ret
= cz_parse_power_table(adev
);
475 ret
= cz_process_firmware_header(adev
);
479 pi
->dpm_enabled
= true;
480 pi
->uvd_dynamic_pg
= false;
485 static void cz_dpm_fini(struct amdgpu_device
*adev
)
489 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
490 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
492 kfree(adev
->pm
.dpm
.ps
);
493 kfree(adev
->pm
.dpm
.priv
);
494 amdgpu_free_extended_power_table(adev
);
497 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
498 #define CURRENT_NB_VID_MASK 0xff000000
499 #define CURRENT_NB_VID__SHIFT 24
500 #define ixSMUSVI_GFX_CURRENTVID 0xD8230048
501 #define CURRENT_GFX_VID_MASK 0xff000000
502 #define CURRENT_GFX_VID__SHIFT 24
505 cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device
*adev
,
508 struct cz_power_info
*pi
= cz_get_pi(adev
);
509 struct amdgpu_clock_voltage_dependency_table
*table
=
510 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
511 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
512 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
513 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
514 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
515 u32 sclk_index
= REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
),
516 TARGET_AND_CURRENT_PROFILE_INDEX
, CURR_SCLK_INDEX
);
517 u32 uvd_index
= REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2
),
518 TARGET_AND_CURRENT_PROFILE_INDEX_2
, CURR_UVD_INDEX
);
519 u32 vce_index
= REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2
),
520 TARGET_AND_CURRENT_PROFILE_INDEX_2
, CURR_VCE_INDEX
);
521 u32 sclk
, vclk
, dclk
, ecclk
, tmp
;
524 if (sclk_index
>= NUM_SCLK_LEVELS
) {
525 seq_printf(m
, "invalid sclk dpm profile %d\n", sclk_index
);
527 sclk
= table
->entries
[sclk_index
].clk
;
528 seq_printf(m
, "%u sclk: %u\n", sclk_index
, sclk
);
531 tmp
= (RREG32_SMC(ixSMUSVI_NB_CURRENTVID
) &
532 CURRENT_NB_VID_MASK
) >> CURRENT_NB_VID__SHIFT
;
533 vddnb
= cz_convert_8bit_index_to_voltage(adev
, (u16
)tmp
);
534 tmp
= (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID
) &
535 CURRENT_GFX_VID_MASK
) >> CURRENT_GFX_VID__SHIFT
;
536 vddgfx
= cz_convert_8bit_index_to_voltage(adev
, (u16
)tmp
);
537 seq_printf(m
, "vddnb: %u vddgfx: %u\n", vddnb
, vddgfx
);
539 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
540 if (!pi
->uvd_power_gated
) {
541 if (uvd_index
>= CZ_MAX_HARDWARE_POWERLEVELS
) {
542 seq_printf(m
, "invalid uvd dpm level %d\n", uvd_index
);
544 vclk
= uvd_table
->entries
[uvd_index
].vclk
;
545 dclk
= uvd_table
->entries
[uvd_index
].dclk
;
546 seq_printf(m
, "%u uvd vclk: %u dclk: %u\n", uvd_index
, vclk
, dclk
);
550 seq_printf(m
, "vce %sabled\n", pi
->vce_power_gated
? "dis" : "en");
551 if (!pi
->vce_power_gated
) {
552 if (vce_index
>= CZ_MAX_HARDWARE_POWERLEVELS
) {
553 seq_printf(m
, "invalid vce dpm level %d\n", vce_index
);
555 ecclk
= vce_table
->entries
[vce_index
].ecclk
;
556 seq_printf(m
, "%u vce ecclk: %u\n", vce_index
, ecclk
);
561 static void cz_dpm_print_power_state(struct amdgpu_device
*adev
,
562 struct amdgpu_ps
*rps
)
565 struct cz_ps
*ps
= cz_get_ps(rps
);
567 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
568 amdgpu_dpm_print_cap_info(rps
->caps
);
570 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
571 for (i
= 0; i
< ps
->num_levels
; i
++) {
572 struct cz_pl
*pl
= &ps
->levels
[i
];
574 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
576 cz_convert_8bit_index_to_voltage(adev
, pl
->vddc_index
));
579 amdgpu_dpm_print_ps_status(adev
, rps
);
582 static void cz_dpm_set_funcs(struct amdgpu_device
*adev
);
584 static int cz_dpm_early_init(void *handle
)
586 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
588 cz_dpm_set_funcs(adev
);
594 static int cz_dpm_late_init(void *handle
)
596 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
599 /* powerdown unused blocks for now */
600 cz_dpm_powergate_uvd(adev
, true);
601 cz_dpm_powergate_vce(adev
, true);
607 static int cz_dpm_sw_init(void *handle
)
609 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
611 /* fix me to add thermal support TODO */
613 /* default to balanced state */
614 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
615 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
616 adev
->pm
.dpm
.forced_level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
617 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
618 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
619 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
620 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
621 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
626 mutex_lock(&adev
->pm
.mutex
);
627 ret
= cz_dpm_init(adev
);
629 goto dpm_init_failed
;
631 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
633 amdgpu_pm_print_power_states(adev
);
635 ret
= amdgpu_pm_sysfs_init(adev
);
637 goto dpm_init_failed
;
639 mutex_unlock(&adev
->pm
.mutex
);
640 DRM_INFO("amdgpu: dpm initialized\n");
646 mutex_unlock(&adev
->pm
.mutex
);
647 DRM_ERROR("amdgpu: dpm initialization failed\n");
652 static int cz_dpm_sw_fini(void *handle
)
654 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
656 mutex_lock(&adev
->pm
.mutex
);
657 amdgpu_pm_sysfs_fini(adev
);
659 mutex_unlock(&adev
->pm
.mutex
);
664 static void cz_reset_ap_mask(struct amdgpu_device
*adev
)
666 struct cz_power_info
*pi
= cz_get_pi(adev
);
668 pi
->active_process_mask
= 0;
672 static int cz_dpm_download_pptable_from_smu(struct amdgpu_device
*adev
,
677 ret
= cz_smu_download_pptable(adev
, table
);
682 static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device
*adev
)
684 struct cz_power_info
*pi
= cz_get_pi(adev
);
685 struct SMU8_Fusion_ClkTable
*clock_table
;
686 struct atom_clock_dividers dividers
;
691 struct amdgpu_clock_voltage_dependency_table
*vddc_table
=
692 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
693 struct amdgpu_clock_voltage_dependency_table
*vddgfx_table
=
694 &adev
->pm
.dpm
.dyn_state
.vddgfx_dependency_on_sclk
;
695 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
696 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
697 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
698 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
699 struct amdgpu_clock_voltage_dependency_table
*acp_table
=
700 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
702 if (!pi
->need_pptable_upload
)
705 ret
= cz_dpm_download_pptable_from_smu(adev
, &table
);
707 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
711 clock_table
= (struct SMU8_Fusion_ClkTable
*)table
;
712 /* patch clock table */
713 if (vddc_table
->count
> CZ_MAX_HARDWARE_POWERLEVELS
||
714 vddgfx_table
->count
> CZ_MAX_HARDWARE_POWERLEVELS
||
715 uvd_table
->count
> CZ_MAX_HARDWARE_POWERLEVELS
||
716 vce_table
->count
> CZ_MAX_HARDWARE_POWERLEVELS
||
717 acp_table
->count
> CZ_MAX_HARDWARE_POWERLEVELS
) {
718 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
722 for (i
= 0; i
< CZ_MAX_HARDWARE_POWERLEVELS
; i
++) {
725 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].GnbVid
=
726 (i
< vddc_table
->count
) ? (uint8_t)vddc_table
->entries
[i
].v
: 0;
727 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].Frequency
=
728 (i
< vddc_table
->count
) ? vddc_table
->entries
[i
].clk
: 0;
729 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
730 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].Frequency
,
734 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
735 (uint8_t)dividers
.post_divider
;
738 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
739 (i
< vddgfx_table
->count
) ? (uint8_t)vddgfx_table
->entries
[i
].v
: 0;
742 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
743 (i
< acp_table
->count
) ? (uint8_t)acp_table
->entries
[i
].v
: 0;
744 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].Frequency
=
745 (i
< acp_table
->count
) ? acp_table
->entries
[i
].clk
: 0;
746 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
747 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].Frequency
,
751 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
752 (uint8_t)dividers
.post_divider
;
755 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
756 (i
< uvd_table
->count
) ? (uint8_t)uvd_table
->entries
[i
].v
: 0;
757 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].Frequency
=
758 (i
< uvd_table
->count
) ? uvd_table
->entries
[i
].vclk
: 0;
759 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
760 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].Frequency
,
764 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
765 (uint8_t)dividers
.post_divider
;
767 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
768 (i
< uvd_table
->count
) ? (uint8_t)uvd_table
->entries
[i
].v
: 0;
769 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].Frequency
=
770 (i
< uvd_table
->count
) ? uvd_table
->entries
[i
].dclk
: 0;
771 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
772 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].Frequency
,
776 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
777 (uint8_t)dividers
.post_divider
;
780 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
781 (i
< vce_table
->count
) ? (uint8_t)vce_table
->entries
[i
].v
: 0;
782 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].Frequency
=
783 (i
< vce_table
->count
) ? vce_table
->entries
[i
].ecclk
: 0;
784 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
785 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].Frequency
,
789 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
790 (uint8_t)dividers
.post_divider
;
793 /* its time to upload to SMU */
794 ret
= cz_smu_upload_pptable(adev
);
796 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
803 static void cz_init_sclk_limit(struct amdgpu_device
*adev
)
805 struct cz_power_info
*pi
= cz_get_pi(adev
);
806 struct amdgpu_clock_voltage_dependency_table
*table
=
807 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
808 uint32_t clock
= 0, level
;
810 if (!table
|| !table
->count
) {
811 DRM_ERROR("Invalid Voltage Dependency table.\n");
815 pi
->sclk_dpm
.soft_min_clk
= 0;
816 pi
->sclk_dpm
.hard_min_clk
= 0;
817 cz_send_msg_to_smc(adev
, PPSMC_MSG_GetMaxSclkLevel
);
818 level
= cz_get_argument(adev
);
819 if (level
< table
->count
)
820 clock
= table
->entries
[level
].clk
;
822 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
823 clock
= table
->entries
[table
->count
- 1].clk
;
826 pi
->sclk_dpm
.soft_max_clk
= clock
;
827 pi
->sclk_dpm
.hard_max_clk
= clock
;
831 static void cz_init_uvd_limit(struct amdgpu_device
*adev
)
833 struct cz_power_info
*pi
= cz_get_pi(adev
);
834 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
835 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
836 uint32_t clock
= 0, level
;
838 if (!table
|| !table
->count
) {
839 DRM_ERROR("Invalid Voltage Dependency table.\n");
843 pi
->uvd_dpm
.soft_min_clk
= 0;
844 pi
->uvd_dpm
.hard_min_clk
= 0;
845 cz_send_msg_to_smc(adev
, PPSMC_MSG_GetMaxUvdLevel
);
846 level
= cz_get_argument(adev
);
847 if (level
< table
->count
)
848 clock
= table
->entries
[level
].vclk
;
850 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
851 clock
= table
->entries
[table
->count
- 1].vclk
;
854 pi
->uvd_dpm
.soft_max_clk
= clock
;
855 pi
->uvd_dpm
.hard_max_clk
= clock
;
859 static void cz_init_vce_limit(struct amdgpu_device
*adev
)
861 struct cz_power_info
*pi
= cz_get_pi(adev
);
862 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
863 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
864 uint32_t clock
= 0, level
;
866 if (!table
|| !table
->count
) {
867 DRM_ERROR("Invalid Voltage Dependency table.\n");
871 pi
->vce_dpm
.soft_min_clk
= table
->entries
[0].ecclk
;
872 pi
->vce_dpm
.hard_min_clk
= table
->entries
[0].ecclk
;
873 cz_send_msg_to_smc(adev
, PPSMC_MSG_GetMaxEclkLevel
);
874 level
= cz_get_argument(adev
);
875 if (level
< table
->count
)
876 clock
= table
->entries
[level
].ecclk
;
878 /* future BIOS would fix this error */
879 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
880 clock
= table
->entries
[table
->count
- 1].ecclk
;
883 pi
->vce_dpm
.soft_max_clk
= clock
;
884 pi
->vce_dpm
.hard_max_clk
= clock
;
888 static void cz_init_acp_limit(struct amdgpu_device
*adev
)
890 struct cz_power_info
*pi
= cz_get_pi(adev
);
891 struct amdgpu_clock_voltage_dependency_table
*table
=
892 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
893 uint32_t clock
= 0, level
;
895 if (!table
|| !table
->count
) {
896 DRM_ERROR("Invalid Voltage Dependency table.\n");
900 pi
->acp_dpm
.soft_min_clk
= 0;
901 pi
->acp_dpm
.hard_min_clk
= 0;
902 cz_send_msg_to_smc(adev
, PPSMC_MSG_GetMaxAclkLevel
);
903 level
= cz_get_argument(adev
);
904 if (level
< table
->count
)
905 clock
= table
->entries
[level
].clk
;
907 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
908 clock
= table
->entries
[table
->count
- 1].clk
;
911 pi
->acp_dpm
.soft_max_clk
= clock
;
912 pi
->acp_dpm
.hard_max_clk
= clock
;
916 static void cz_init_pg_state(struct amdgpu_device
*adev
)
918 struct cz_power_info
*pi
= cz_get_pi(adev
);
920 pi
->uvd_power_gated
= false;
921 pi
->vce_power_gated
= false;
922 pi
->acp_power_gated
= false;
926 static void cz_init_sclk_threshold(struct amdgpu_device
*adev
)
928 struct cz_power_info
*pi
= cz_get_pi(adev
);
930 pi
->low_sclk_interrupt_threshold
= 0;
934 static void cz_dpm_setup_asic(struct amdgpu_device
*adev
)
936 cz_reset_ap_mask(adev
);
937 cz_dpm_upload_pptable_to_smu(adev
);
938 cz_init_sclk_limit(adev
);
939 cz_init_uvd_limit(adev
);
940 cz_init_vce_limit(adev
);
941 cz_init_acp_limit(adev
);
942 cz_init_pg_state(adev
);
943 cz_init_sclk_threshold(adev
);
947 static bool cz_check_smu_feature(struct amdgpu_device
*adev
,
950 uint32_t smu_feature
= 0;
953 ret
= cz_send_msg_to_smc_with_parameter(adev
,
954 PPSMC_MSG_GetFeatureStatus
, 0);
956 DRM_ERROR("Failed to get SMU features from SMC.\n");
959 smu_feature
= cz_get_argument(adev
);
960 if (feature
& smu_feature
)
967 static bool cz_check_for_dpm_enabled(struct amdgpu_device
*adev
)
969 if (cz_check_smu_feature(adev
,
970 SMU_EnabledFeatureScoreboard_SclkDpmOn
))
976 static void cz_program_voting_clients(struct amdgpu_device
*adev
)
978 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0
);
981 static void cz_clear_voting_clients(struct amdgpu_device
*adev
)
983 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
986 static int cz_start_dpm(struct amdgpu_device
*adev
)
991 ret
= cz_send_msg_to_smc_with_parameter(adev
,
992 PPSMC_MSG_EnableAllSmuFeatures
, SCLK_DPM_MASK
);
994 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
1002 static int cz_stop_dpm(struct amdgpu_device
*adev
)
1006 if (amdgpu_dpm
&& adev
->pm
.dpm_enabled
) {
1007 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1008 PPSMC_MSG_DisableAllSmuFeatures
, SCLK_DPM_MASK
);
1010 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
1018 static uint32_t cz_get_sclk_level(struct amdgpu_device
*adev
,
1019 uint32_t clock
, uint16_t msg
)
1022 struct amdgpu_clock_voltage_dependency_table
*table
=
1023 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1026 case PPSMC_MSG_SetSclkSoftMin
:
1027 case PPSMC_MSG_SetSclkHardMin
:
1028 for (i
= 0; i
< table
->count
; i
++)
1029 if (clock
<= table
->entries
[i
].clk
)
1031 if (i
== table
->count
)
1032 i
= table
->count
- 1;
1034 case PPSMC_MSG_SetSclkSoftMax
:
1035 case PPSMC_MSG_SetSclkHardMax
:
1036 for (i
= table
->count
- 1; i
>= 0; i
--)
1037 if (clock
>= table
->entries
[i
].clk
)
1049 static uint32_t cz_get_eclk_level(struct amdgpu_device
*adev
,
1050 uint32_t clock
, uint16_t msg
)
1053 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1054 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1056 if (table
->count
== 0)
1060 case PPSMC_MSG_SetEclkSoftMin
:
1061 case PPSMC_MSG_SetEclkHardMin
:
1062 for (i
= 0; i
< table
->count
-1; i
++)
1063 if (clock
<= table
->entries
[i
].ecclk
)
1066 case PPSMC_MSG_SetEclkSoftMax
:
1067 case PPSMC_MSG_SetEclkHardMax
:
1068 for (i
= table
->count
- 1; i
> 0; i
--)
1069 if (clock
>= table
->entries
[i
].ecclk
)
1079 static int cz_program_bootup_state(struct amdgpu_device
*adev
)
1081 struct cz_power_info
*pi
= cz_get_pi(adev
);
1082 uint32_t soft_min_clk
= 0;
1083 uint32_t soft_max_clk
= 0;
1086 pi
->sclk_dpm
.soft_min_clk
= pi
->sys_info
.bootup_sclk
;
1087 pi
->sclk_dpm
.soft_max_clk
= pi
->sys_info
.bootup_sclk
;
1089 soft_min_clk
= cz_get_sclk_level(adev
,
1090 pi
->sclk_dpm
.soft_min_clk
,
1091 PPSMC_MSG_SetSclkSoftMin
);
1092 soft_max_clk
= cz_get_sclk_level(adev
,
1093 pi
->sclk_dpm
.soft_max_clk
,
1094 PPSMC_MSG_SetSclkSoftMax
);
1096 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1097 PPSMC_MSG_SetSclkSoftMin
, soft_min_clk
);
1101 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1102 PPSMC_MSG_SetSclkSoftMax
, soft_max_clk
);
1110 static int cz_disable_cgpg(struct amdgpu_device
*adev
)
1116 static int cz_enable_cgpg(struct amdgpu_device
*adev
)
1122 static int cz_program_pt_config_registers(struct amdgpu_device
*adev
)
1127 static void cz_do_enable_didt(struct amdgpu_device
*adev
, bool enable
)
1129 struct cz_power_info
*pi
= cz_get_pi(adev
);
1132 if (pi
->caps_sq_ramping
) {
1133 reg
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
1135 reg
= REG_SET_FIELD(reg
, DIDT_SQ_CTRL0
, DIDT_CTRL_EN
, 1);
1137 reg
= REG_SET_FIELD(reg
, DIDT_SQ_CTRL0
, DIDT_CTRL_EN
, 0);
1138 WREG32_DIDT(ixDIDT_SQ_CTRL0
, reg
);
1140 if (pi
->caps_db_ramping
) {
1141 reg
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
1143 reg
= REG_SET_FIELD(reg
, DIDT_DB_CTRL0
, DIDT_CTRL_EN
, 1);
1145 reg
= REG_SET_FIELD(reg
, DIDT_DB_CTRL0
, DIDT_CTRL_EN
, 0);
1146 WREG32_DIDT(ixDIDT_DB_CTRL0
, reg
);
1148 if (pi
->caps_td_ramping
) {
1149 reg
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
1151 reg
= REG_SET_FIELD(reg
, DIDT_TD_CTRL0
, DIDT_CTRL_EN
, 1);
1153 reg
= REG_SET_FIELD(reg
, DIDT_TD_CTRL0
, DIDT_CTRL_EN
, 0);
1154 WREG32_DIDT(ixDIDT_TD_CTRL0
, reg
);
1156 if (pi
->caps_tcp_ramping
) {
1157 reg
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
1159 reg
= REG_SET_FIELD(reg
, DIDT_SQ_CTRL0
, DIDT_CTRL_EN
, 1);
1161 reg
= REG_SET_FIELD(reg
, DIDT_SQ_CTRL0
, DIDT_CTRL_EN
, 0);
1162 WREG32_DIDT(ixDIDT_TCP_CTRL0
, reg
);
1167 static int cz_enable_didt(struct amdgpu_device
*adev
, bool enable
)
1169 struct cz_power_info
*pi
= cz_get_pi(adev
);
1172 if (pi
->caps_sq_ramping
|| pi
->caps_db_ramping
||
1173 pi
->caps_td_ramping
|| pi
->caps_tcp_ramping
) {
1174 if (adev
->gfx
.gfx_current_status
!= AMDGPU_GFX_SAFE_MODE
) {
1175 ret
= cz_disable_cgpg(adev
);
1177 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
1180 adev
->gfx
.gfx_current_status
= AMDGPU_GFX_SAFE_MODE
;
1183 ret
= cz_program_pt_config_registers(adev
);
1185 DRM_ERROR("Di/Dt config failed\n");
1188 cz_do_enable_didt(adev
, enable
);
1190 if (adev
->gfx
.gfx_current_status
== AMDGPU_GFX_SAFE_MODE
) {
1191 ret
= cz_enable_cgpg(adev
);
1193 DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
1196 adev
->gfx
.gfx_current_status
= AMDGPU_GFX_NORMAL_MODE
;
1204 static void cz_reset_acp_boot_level(struct amdgpu_device
*adev
)
1208 static void cz_update_current_ps(struct amdgpu_device
*adev
,
1209 struct amdgpu_ps
*rps
)
1211 struct cz_power_info
*pi
= cz_get_pi(adev
);
1212 struct cz_ps
*ps
= cz_get_ps(rps
);
1214 pi
->current_ps
= *ps
;
1215 pi
->current_rps
= *rps
;
1216 pi
->current_rps
.ps_priv
= ps
;
1220 static void cz_update_requested_ps(struct amdgpu_device
*adev
,
1221 struct amdgpu_ps
*rps
)
1223 struct cz_power_info
*pi
= cz_get_pi(adev
);
1224 struct cz_ps
*ps
= cz_get_ps(rps
);
1226 pi
->requested_ps
= *ps
;
1227 pi
->requested_rps
= *rps
;
1228 pi
->requested_rps
.ps_priv
= ps
;
1232 /* PP arbiter support needed TODO */
1233 static void cz_apply_state_adjust_rules(struct amdgpu_device
*adev
,
1234 struct amdgpu_ps
*new_rps
,
1235 struct amdgpu_ps
*old_rps
)
1237 struct cz_ps
*ps
= cz_get_ps(new_rps
);
1238 struct cz_power_info
*pi
= cz_get_pi(adev
);
1239 struct amdgpu_clock_and_voltage_limits
*limits
=
1240 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
1241 /* 10kHz memory clock */
1244 ps
->force_high
= false;
1245 ps
->need_dfs_bypass
= true;
1246 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
1247 new_rps
->evclk
|| new_rps
->ecclk
;
1249 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
1250 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
1251 pi
->battery_state
= true;
1253 pi
->battery_state
= false;
1255 if (pi
->caps_stable_power_state
)
1256 mclk
= limits
->mclk
;
1258 if (mclk
> pi
->sys_info
.nbp_memory_clock
[CZ_NUM_NBPMEMORY_CLOCK
- 1])
1259 ps
->force_high
= true;
1263 static int cz_dpm_enable(struct amdgpu_device
*adev
)
1267 /* renable will hang up SMU, so check first */
1268 if (cz_check_for_dpm_enabled(adev
))
1271 cz_program_voting_clients(adev
);
1273 ret
= cz_start_dpm(adev
);
1275 DRM_ERROR("Carrizo DPM enable failed\n");
1279 ret
= cz_program_bootup_state(adev
);
1281 DRM_ERROR("Carrizo bootup state program failed\n");
1285 ret
= cz_enable_didt(adev
, true);
1287 DRM_ERROR("Carrizo enable di/dt failed\n");
1291 cz_reset_acp_boot_level(adev
);
1293 cz_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1298 static int cz_dpm_hw_init(void *handle
)
1300 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1303 mutex_lock(&adev
->pm
.mutex
);
1305 /* smu init only needs to be called at startup, not resume.
1306 * It should be in sw_init, but requires the fw info gathered
1307 * in sw_init from other IP modules.
1309 ret
= cz_smu_init(adev
);
1311 DRM_ERROR("amdgpu: smc initialization failed\n");
1312 mutex_unlock(&adev
->pm
.mutex
);
1316 /* do the actual fw loading */
1317 ret
= cz_smu_start(adev
);
1319 DRM_ERROR("amdgpu: smc start failed\n");
1320 mutex_unlock(&adev
->pm
.mutex
);
1325 adev
->pm
.dpm_enabled
= false;
1326 mutex_unlock(&adev
->pm
.mutex
);
1330 /* cz dpm setup asic */
1331 cz_dpm_setup_asic(adev
);
1334 ret
= cz_dpm_enable(adev
);
1336 adev
->pm
.dpm_enabled
= false;
1338 adev
->pm
.dpm_enabled
= true;
1340 mutex_unlock(&adev
->pm
.mutex
);
1345 static int cz_dpm_disable(struct amdgpu_device
*adev
)
1349 if (!cz_check_for_dpm_enabled(adev
))
1352 ret
= cz_enable_didt(adev
, false);
1354 DRM_ERROR("Carrizo disable di/dt failed\n");
1358 /* powerup blocks */
1359 cz_dpm_powergate_uvd(adev
, false);
1360 cz_dpm_powergate_vce(adev
, false);
1362 cz_clear_voting_clients(adev
);
1364 cz_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1369 static int cz_dpm_hw_fini(void *handle
)
1372 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1374 mutex_lock(&adev
->pm
.mutex
);
1376 /* smu fini only needs to be called at teardown, not suspend.
1377 * It should be in sw_fini, but we put it here for symmetry
1382 if (adev
->pm
.dpm_enabled
) {
1383 ret
= cz_dpm_disable(adev
);
1385 adev
->pm
.dpm
.current_ps
=
1386 adev
->pm
.dpm
.requested_ps
=
1387 adev
->pm
.dpm
.boot_ps
;
1390 adev
->pm
.dpm_enabled
= false;
1392 mutex_unlock(&adev
->pm
.mutex
);
1397 static int cz_dpm_suspend(void *handle
)
1400 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1402 if (adev
->pm
.dpm_enabled
) {
1403 mutex_lock(&adev
->pm
.mutex
);
1405 ret
= cz_dpm_disable(adev
);
1407 adev
->pm
.dpm
.current_ps
=
1408 adev
->pm
.dpm
.requested_ps
=
1409 adev
->pm
.dpm
.boot_ps
;
1411 mutex_unlock(&adev
->pm
.mutex
);
1417 static int cz_dpm_resume(void *handle
)
1420 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1422 mutex_lock(&adev
->pm
.mutex
);
1424 /* do the actual fw loading */
1425 ret
= cz_smu_start(adev
);
1427 DRM_ERROR("amdgpu: smc start failed\n");
1428 mutex_unlock(&adev
->pm
.mutex
);
1433 adev
->pm
.dpm_enabled
= false;
1434 mutex_unlock(&adev
->pm
.mutex
);
1438 /* cz dpm setup asic */
1439 cz_dpm_setup_asic(adev
);
1442 ret
= cz_dpm_enable(adev
);
1444 adev
->pm
.dpm_enabled
= false;
1446 adev
->pm
.dpm_enabled
= true;
1448 mutex_unlock(&adev
->pm
.mutex
);
1449 /* upon resume, re-compute the clocks */
1450 if (adev
->pm
.dpm_enabled
)
1451 amdgpu_pm_compute_clocks(adev
);
1456 static int cz_dpm_set_clockgating_state(void *handle
,
1457 enum amd_clockgating_state state
)
1462 static int cz_dpm_set_powergating_state(void *handle
,
1463 enum amd_powergating_state state
)
1468 /* borrowed from KV, need future unify */
1469 static int cz_dpm_get_temperature(struct amdgpu_device
*adev
)
1471 int actual_temp
= 0;
1472 uint32_t temp
= RREG32_SMC(0xC0300E0C);
1475 actual_temp
= 1000 * ((temp
/ 8) - 49);
1480 static int cz_dpm_pre_set_power_state(struct amdgpu_device
*adev
)
1482 struct cz_power_info
*pi
= cz_get_pi(adev
);
1483 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
1484 struct amdgpu_ps
*new_ps
= &requested_ps
;
1486 cz_update_requested_ps(adev
, new_ps
);
1487 cz_apply_state_adjust_rules(adev
, &pi
->requested_rps
,
1493 static int cz_dpm_update_sclk_limit(struct amdgpu_device
*adev
)
1495 struct cz_power_info
*pi
= cz_get_pi(adev
);
1496 struct amdgpu_clock_and_voltage_limits
*limits
=
1497 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
1498 uint32_t clock
, stable_ps_clock
= 0;
1500 clock
= pi
->sclk_dpm
.soft_min_clk
;
1502 if (pi
->caps_stable_power_state
) {
1503 stable_ps_clock
= limits
->sclk
* 75 / 100;
1504 if (clock
< stable_ps_clock
)
1505 clock
= stable_ps_clock
;
1508 if (clock
!= pi
->sclk_dpm
.soft_min_clk
) {
1509 pi
->sclk_dpm
.soft_min_clk
= clock
;
1510 cz_send_msg_to_smc_with_parameter(adev
,
1511 PPSMC_MSG_SetSclkSoftMin
,
1512 cz_get_sclk_level(adev
, clock
,
1513 PPSMC_MSG_SetSclkSoftMin
));
1516 if (pi
->caps_stable_power_state
&&
1517 pi
->sclk_dpm
.soft_max_clk
!= clock
) {
1518 pi
->sclk_dpm
.soft_max_clk
= clock
;
1519 cz_send_msg_to_smc_with_parameter(adev
,
1520 PPSMC_MSG_SetSclkSoftMax
,
1521 cz_get_sclk_level(adev
, clock
,
1522 PPSMC_MSG_SetSclkSoftMax
));
1524 cz_send_msg_to_smc_with_parameter(adev
,
1525 PPSMC_MSG_SetSclkSoftMax
,
1526 cz_get_sclk_level(adev
,
1527 pi
->sclk_dpm
.soft_max_clk
,
1528 PPSMC_MSG_SetSclkSoftMax
));
1534 static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device
*adev
)
1537 struct cz_power_info
*pi
= cz_get_pi(adev
);
1539 if (pi
->caps_sclk_ds
) {
1540 cz_send_msg_to_smc_with_parameter(adev
,
1541 PPSMC_MSG_SetMinDeepSleepSclk
,
1542 CZ_MIN_DEEP_SLEEP_SCLK
);
1548 /* ?? without dal support, is this still needed in setpowerstate list*/
1549 static int cz_dpm_set_watermark_threshold(struct amdgpu_device
*adev
)
1552 struct cz_power_info
*pi
= cz_get_pi(adev
);
1554 cz_send_msg_to_smc_with_parameter(adev
,
1555 PPSMC_MSG_SetWatermarkFrequency
,
1556 pi
->sclk_dpm
.soft_max_clk
);
1561 static int cz_dpm_enable_nbdpm(struct amdgpu_device
*adev
)
1564 struct cz_power_info
*pi
= cz_get_pi(adev
);
1566 /* also depend on dal NBPStateDisableRequired */
1567 if (pi
->nb_dpm_enabled_by_driver
&& !pi
->nb_dpm_enabled
) {
1568 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1569 PPSMC_MSG_EnableAllSmuFeatures
,
1572 DRM_ERROR("amdgpu: nb dpm enable failed\n");
1575 pi
->nb_dpm_enabled
= true;
1581 static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device
*adev
,
1585 cz_send_msg_to_smc(adev
, PPSMC_MSG_EnableLowMemoryPstate
);
1587 cz_send_msg_to_smc(adev
, PPSMC_MSG_DisableLowMemoryPstate
);
1591 static int cz_dpm_update_low_memory_pstate(struct amdgpu_device
*adev
)
1594 struct cz_power_info
*pi
= cz_get_pi(adev
);
1595 struct cz_ps
*ps
= &pi
->requested_ps
;
1597 if (pi
->sys_info
.nb_dpm_enable
) {
1599 cz_dpm_nbdpm_lm_pstate_enable(adev
, true);
1601 cz_dpm_nbdpm_lm_pstate_enable(adev
, false);
1607 /* with dpm enabled */
1608 static int cz_dpm_set_power_state(struct amdgpu_device
*adev
)
1612 cz_dpm_update_sclk_limit(adev
);
1613 cz_dpm_set_deep_sleep_sclk_threshold(adev
);
1614 cz_dpm_set_watermark_threshold(adev
);
1615 cz_dpm_enable_nbdpm(adev
);
1616 cz_dpm_update_low_memory_pstate(adev
);
1621 static void cz_dpm_post_set_power_state(struct amdgpu_device
*adev
)
1623 struct cz_power_info
*pi
= cz_get_pi(adev
);
1624 struct amdgpu_ps
*ps
= &pi
->requested_rps
;
1626 cz_update_current_ps(adev
, ps
);
1630 static int cz_dpm_force_highest(struct amdgpu_device
*adev
)
1632 struct cz_power_info
*pi
= cz_get_pi(adev
);
1635 if (pi
->sclk_dpm
.soft_min_clk
!= pi
->sclk_dpm
.soft_max_clk
) {
1636 pi
->sclk_dpm
.soft_min_clk
=
1637 pi
->sclk_dpm
.soft_max_clk
;
1638 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1639 PPSMC_MSG_SetSclkSoftMin
,
1640 cz_get_sclk_level(adev
,
1641 pi
->sclk_dpm
.soft_min_clk
,
1642 PPSMC_MSG_SetSclkSoftMin
));
1650 static int cz_dpm_force_lowest(struct amdgpu_device
*adev
)
1652 struct cz_power_info
*pi
= cz_get_pi(adev
);
1655 if (pi
->sclk_dpm
.soft_max_clk
!= pi
->sclk_dpm
.soft_min_clk
) {
1656 pi
->sclk_dpm
.soft_max_clk
= pi
->sclk_dpm
.soft_min_clk
;
1657 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1658 PPSMC_MSG_SetSclkSoftMax
,
1659 cz_get_sclk_level(adev
,
1660 pi
->sclk_dpm
.soft_max_clk
,
1661 PPSMC_MSG_SetSclkSoftMax
));
1669 static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device
*adev
)
1671 struct cz_power_info
*pi
= cz_get_pi(adev
);
1673 if (!pi
->max_sclk_level
) {
1674 cz_send_msg_to_smc(adev
, PPSMC_MSG_GetMaxSclkLevel
);
1675 pi
->max_sclk_level
= cz_get_argument(adev
) + 1;
1678 if (pi
->max_sclk_level
> CZ_MAX_HARDWARE_POWERLEVELS
) {
1679 DRM_ERROR("Invalid max sclk level!\n");
1683 return pi
->max_sclk_level
;
1686 static int cz_dpm_unforce_dpm_levels(struct amdgpu_device
*adev
)
1688 struct cz_power_info
*pi
= cz_get_pi(adev
);
1689 struct amdgpu_clock_voltage_dependency_table
*dep_table
=
1690 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1694 pi
->sclk_dpm
.soft_min_clk
= dep_table
->entries
[0].clk
;
1695 level
= cz_dpm_get_max_sclk_level(adev
) - 1;
1696 if (level
< dep_table
->count
)
1697 pi
->sclk_dpm
.soft_max_clk
= dep_table
->entries
[level
].clk
;
1699 pi
->sclk_dpm
.soft_max_clk
=
1700 dep_table
->entries
[dep_table
->count
- 1].clk
;
1702 /* get min/max sclk soft value
1703 * notify SMU to execute */
1704 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1705 PPSMC_MSG_SetSclkSoftMin
,
1706 cz_get_sclk_level(adev
,
1707 pi
->sclk_dpm
.soft_min_clk
,
1708 PPSMC_MSG_SetSclkSoftMin
));
1712 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1713 PPSMC_MSG_SetSclkSoftMax
,
1714 cz_get_sclk_level(adev
,
1715 pi
->sclk_dpm
.soft_max_clk
,
1716 PPSMC_MSG_SetSclkSoftMax
));
1720 DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
1721 pi
->sclk_dpm
.soft_min_clk
,
1722 pi
->sclk_dpm
.soft_max_clk
);
1727 static int cz_dpm_force_dpm_level(struct amdgpu_device
*adev
,
1728 enum amdgpu_dpm_forced_level level
)
1733 case AMDGPU_DPM_FORCED_LEVEL_HIGH
:
1734 ret
= cz_dpm_unforce_dpm_levels(adev
);
1737 ret
= cz_dpm_force_highest(adev
);
1741 case AMDGPU_DPM_FORCED_LEVEL_LOW
:
1742 ret
= cz_dpm_unforce_dpm_levels(adev
);
1745 ret
= cz_dpm_force_lowest(adev
);
1749 case AMDGPU_DPM_FORCED_LEVEL_AUTO
:
1750 ret
= cz_dpm_unforce_dpm_levels(adev
);
1758 adev
->pm
.dpm
.forced_level
= level
;
1763 /* fix me, display configuration change lists here
1764 * mostly dal related*/
1765 static void cz_dpm_display_configuration_changed(struct amdgpu_device
*adev
)
1769 static uint32_t cz_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
1771 struct cz_power_info
*pi
= cz_get_pi(adev
);
1772 struct cz_ps
*requested_state
= cz_get_ps(&pi
->requested_rps
);
1775 return requested_state
->levels
[0].sclk
;
1777 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
1781 static uint32_t cz_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
1783 struct cz_power_info
*pi
= cz_get_pi(adev
);
1785 return pi
->sys_info
.bootup_uma_clk
;
1788 static int cz_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
1790 struct cz_power_info
*pi
= cz_get_pi(adev
);
1793 if (enable
&& pi
->caps_uvd_dpm
) {
1794 pi
->dpm_flags
|= DPMFlags_UVD_Enabled
;
1795 DRM_DEBUG("UVD DPM Enabled.\n");
1797 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1798 PPSMC_MSG_EnableAllSmuFeatures
, UVD_DPM_MASK
);
1800 pi
->dpm_flags
&= ~DPMFlags_UVD_Enabled
;
1801 DRM_DEBUG("UVD DPM Stopped\n");
1803 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1804 PPSMC_MSG_DisableAllSmuFeatures
, UVD_DPM_MASK
);
1810 static int cz_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
1812 return cz_enable_uvd_dpm(adev
, !gate
);
1816 static void cz_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
)
1818 struct cz_power_info
*pi
= cz_get_pi(adev
);
1821 if (pi
->uvd_power_gated
== gate
)
1824 pi
->uvd_power_gated
= gate
;
1827 if (pi
->caps_uvd_pg
) {
1828 /* disable clockgating so we can properly shut down the block */
1829 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1830 AMD_CG_STATE_UNGATE
);
1831 /* shutdown the UVD block */
1832 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1834 /* XXX: check for errors */
1836 cz_update_uvd_dpm(adev
, gate
);
1837 if (pi
->caps_uvd_pg
)
1838 /* power off the UVD block */
1839 cz_send_msg_to_smc(adev
, PPSMC_MSG_UVDPowerOFF
);
1841 if (pi
->caps_uvd_pg
) {
1842 /* power on the UVD block */
1843 if (pi
->uvd_dynamic_pg
)
1844 cz_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_UVDPowerON
, 1);
1846 cz_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_UVDPowerON
, 0);
1847 /* re-init the UVD block */
1848 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1849 AMD_PG_STATE_UNGATE
);
1850 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
1851 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1853 /* XXX: check for errors */
1855 cz_update_uvd_dpm(adev
, gate
);
1859 static int cz_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
1861 struct cz_power_info
*pi
= cz_get_pi(adev
);
1864 if (enable
&& pi
->caps_vce_dpm
) {
1865 pi
->dpm_flags
|= DPMFlags_VCE_Enabled
;
1866 DRM_DEBUG("VCE DPM Enabled.\n");
1868 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1869 PPSMC_MSG_EnableAllSmuFeatures
, VCE_DPM_MASK
);
1872 pi
->dpm_flags
&= ~DPMFlags_VCE_Enabled
;
1873 DRM_DEBUG("VCE DPM Stopped\n");
1875 ret
= cz_send_msg_to_smc_with_parameter(adev
,
1876 PPSMC_MSG_DisableAllSmuFeatures
, VCE_DPM_MASK
);
1882 static int cz_update_vce_dpm(struct amdgpu_device
*adev
)
1884 struct cz_power_info
*pi
= cz_get_pi(adev
);
1885 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1886 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1888 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1889 if (pi
->caps_stable_power_state
) {
1890 pi
->vce_dpm
.hard_min_clk
= table
->entries
[table
->count
-1].ecclk
;
1892 } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
1893 pi
->vce_dpm
.hard_min_clk
= table
->entries
[0].ecclk
;
1896 cz_send_msg_to_smc_with_parameter(adev
,
1897 PPSMC_MSG_SetEclkHardMin
,
1898 cz_get_eclk_level(adev
,
1899 pi
->vce_dpm
.hard_min_clk
,
1900 PPSMC_MSG_SetEclkHardMin
));
1904 static void cz_dpm_powergate_vce(struct amdgpu_device
*adev
, bool gate
)
1906 struct cz_power_info
*pi
= cz_get_pi(adev
);
1908 if (pi
->caps_vce_pg
) {
1909 if (pi
->vce_power_gated
!= gate
) {
1911 /* disable clockgating so we can properly shut down the block */
1912 amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1913 AMD_CG_STATE_UNGATE
);
1914 /* shutdown the VCE block */
1915 amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1918 cz_enable_vce_dpm(adev
, false);
1919 /* TODO: to figure out why vce can't be poweroff. */
1920 /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
1921 pi
->vce_power_gated
= true;
1923 cz_send_msg_to_smc(adev
, PPSMC_MSG_VCEPowerON
);
1924 pi
->vce_power_gated
= false;
1926 /* re-init the VCE block */
1927 amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1928 AMD_PG_STATE_UNGATE
);
1929 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
1930 amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1933 cz_update_vce_dpm(adev
);
1934 cz_enable_vce_dpm(adev
, true);
1937 if (! pi
->vce_power_gated
) {
1938 cz_update_vce_dpm(adev
);
1941 } else { /*pi->caps_vce_pg*/
1942 cz_update_vce_dpm(adev
);
1943 cz_enable_vce_dpm(adev
, true);
1949 const struct amd_ip_funcs cz_dpm_ip_funcs
= {
1950 .early_init
= cz_dpm_early_init
,
1951 .late_init
= cz_dpm_late_init
,
1952 .sw_init
= cz_dpm_sw_init
,
1953 .sw_fini
= cz_dpm_sw_fini
,
1954 .hw_init
= cz_dpm_hw_init
,
1955 .hw_fini
= cz_dpm_hw_fini
,
1956 .suspend
= cz_dpm_suspend
,
1957 .resume
= cz_dpm_resume
,
1959 .wait_for_idle
= NULL
,
1961 .print_status
= NULL
,
1962 .set_clockgating_state
= cz_dpm_set_clockgating_state
,
1963 .set_powergating_state
= cz_dpm_set_powergating_state
,
1966 static const struct amdgpu_dpm_funcs cz_dpm_funcs
= {
1967 .get_temperature
= cz_dpm_get_temperature
,
1968 .pre_set_power_state
= cz_dpm_pre_set_power_state
,
1969 .set_power_state
= cz_dpm_set_power_state
,
1970 .post_set_power_state
= cz_dpm_post_set_power_state
,
1971 .display_configuration_changed
= cz_dpm_display_configuration_changed
,
1972 .get_sclk
= cz_dpm_get_sclk
,
1973 .get_mclk
= cz_dpm_get_mclk
,
1974 .print_power_state
= cz_dpm_print_power_state
,
1975 .debugfs_print_current_performance_level
=
1976 cz_dpm_debugfs_print_current_performance_level
,
1977 .force_performance_level
= cz_dpm_force_dpm_level
,
1978 .vblank_too_short
= NULL
,
1979 .powergate_uvd
= cz_dpm_powergate_uvd
,
1980 .powergate_vce
= cz_dpm_powergate_vce
,
1983 static void cz_dpm_set_funcs(struct amdgpu_device
*adev
)
1985 if (NULL
== adev
->pm
.funcs
)
1986 adev
->pm
.funcs
= &cz_dpm_funcs
;