2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_dpm.h"
33 #include <linux/seq_file.h>
35 #include "smu/smu_7_0_0_d.h"
36 #include "smu/smu_7_0_0_sh_mask.h"
38 #include "gca/gfx_7_2_d.h"
39 #include "gca/gfx_7_2_sh_mask.h"
41 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
42 #define KV_MINIMUM_ENGINE_CLOCK 800
43 #define SMC_RAM_END 0x40000
45 static const struct amd_pm_funcs kv_dpm_funcs
;
47 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
48 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
50 static void kv_init_graphics_levels(struct amdgpu_device
*adev
);
51 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
);
52 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
);
53 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
);
54 static void kv_enable_new_levels(struct amdgpu_device
*adev
);
55 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
56 struct amdgpu_ps
*new_rps
);
57 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
);
58 static int kv_set_enabled_levels(struct amdgpu_device
*adev
);
59 static int kv_force_dpm_highest(struct amdgpu_device
*adev
);
60 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
);
61 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
62 struct amdgpu_ps
*new_rps
,
63 struct amdgpu_ps
*old_rps
);
64 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
65 int min_temp
, int max_temp
);
66 static int kv_init_fps_limits(struct amdgpu_device
*adev
);
68 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
);
69 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
);
72 static u32
kv_convert_vid2_to_vid7(struct amdgpu_device
*adev
,
73 struct sumo_vid_mapping_table
*vid_mapping_table
,
76 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
77 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
80 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
81 if (vid_2bit
< vddc_sclk_table
->count
)
82 return vddc_sclk_table
->entries
[vid_2bit
].v
;
84 return vddc_sclk_table
->entries
[vddc_sclk_table
->count
- 1].v
;
86 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
87 if (vid_mapping_table
->entries
[i
].vid_2bit
== vid_2bit
)
88 return vid_mapping_table
->entries
[i
].vid_7bit
;
90 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_7bit
;
94 static u32
kv_convert_vid7_to_vid2(struct amdgpu_device
*adev
,
95 struct sumo_vid_mapping_table
*vid_mapping_table
,
98 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
99 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
102 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
103 for (i
= 0; i
< vddc_sclk_table
->count
; i
++) {
104 if (vddc_sclk_table
->entries
[i
].v
== vid_7bit
)
107 return vddc_sclk_table
->count
- 1;
109 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
110 if (vid_mapping_table
->entries
[i
].vid_7bit
== vid_7bit
)
111 return vid_mapping_table
->entries
[i
].vid_2bit
;
114 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_2bit
;
118 static void sumo_take_smu_control(struct amdgpu_device
*adev
, bool enable
)
120 /* This bit selects who handles display phy powergating.
121 * Clear the bit to let atom handle it.
122 * Set it to let the driver handle it.
123 * For now we just let atom handle it.
126 u32 v
= RREG32(mmDOUT_SCRATCH3
);
133 WREG32(mmDOUT_SCRATCH3
, v
);
137 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device
*adev
,
138 struct sumo_sclk_voltage_mapping_table
*sclk_voltage_mapping_table
,
139 ATOM_AVAILABLE_SCLK_LIST
*table
)
145 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
146 if (table
[i
].ulSupportedSCLK
> prev_sclk
) {
147 sclk_voltage_mapping_table
->entries
[n
].sclk_frequency
=
148 table
[i
].ulSupportedSCLK
;
149 sclk_voltage_mapping_table
->entries
[n
].vid_2bit
=
150 table
[i
].usVoltageIndex
;
151 prev_sclk
= table
[i
].ulSupportedSCLK
;
156 sclk_voltage_mapping_table
->num_max_dpm_entries
= n
;
159 static void sumo_construct_vid_mapping_table(struct amdgpu_device
*adev
,
160 struct sumo_vid_mapping_table
*vid_mapping_table
,
161 ATOM_AVAILABLE_SCLK_LIST
*table
)
165 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
166 if (table
[i
].ulSupportedSCLK
!= 0) {
167 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_7bit
=
168 table
[i
].usVoltageID
;
169 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_2bit
=
170 table
[i
].usVoltageIndex
;
174 for (i
= 0; i
< SUMO_MAX_NUMBER_VOLTAGES
; i
++) {
175 if (vid_mapping_table
->entries
[i
].vid_7bit
== 0) {
176 for (j
= i
+ 1; j
< SUMO_MAX_NUMBER_VOLTAGES
; j
++) {
177 if (vid_mapping_table
->entries
[j
].vid_7bit
!= 0) {
178 vid_mapping_table
->entries
[i
] =
179 vid_mapping_table
->entries
[j
];
180 vid_mapping_table
->entries
[j
].vid_7bit
= 0;
185 if (j
== SUMO_MAX_NUMBER_VOLTAGES
)
190 vid_mapping_table
->num_entries
= i
;
194 static const struct kv_lcac_config_values sx_local_cac_cfg_kv
[] =
207 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv
[] =
213 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv
[] =
219 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv
[] =
225 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv
[] =
231 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv
[] =
263 static const struct kv_lcac_config_reg sx0_cac_config_reg
[] =
265 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
268 static const struct kv_lcac_config_reg mc0_cac_config_reg
[] =
270 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
273 static const struct kv_lcac_config_reg mc1_cac_config_reg
[] =
275 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
278 static const struct kv_lcac_config_reg mc2_cac_config_reg
[] =
280 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
283 static const struct kv_lcac_config_reg mc3_cac_config_reg
[] =
285 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
288 static const struct kv_lcac_config_reg cpl_cac_config_reg
[] =
290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
294 static const struct kv_pt_config_reg didt_config_kv
[] =
296 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
297 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
298 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
299 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
300 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
301 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
302 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
303 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
304 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
305 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
306 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
307 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
308 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
309 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
310 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
311 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
312 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
313 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
314 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
315 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
316 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
317 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
318 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
319 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
320 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
321 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
322 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
323 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
324 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
325 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
326 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
327 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
328 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
329 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
330 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
331 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
332 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
333 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
334 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
335 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
336 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
337 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
338 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
339 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
340 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
341 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
342 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
343 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
344 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
345 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
346 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
347 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
348 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
349 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
350 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
351 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
352 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
353 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
354 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
355 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
356 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
357 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
358 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
359 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
360 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
361 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
362 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
363 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
364 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
365 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
366 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
367 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
371 static struct kv_ps
*kv_get_ps(struct amdgpu_ps
*rps
)
373 struct kv_ps
*ps
= rps
->ps_priv
;
378 static struct kv_power_info
*kv_get_pi(struct amdgpu_device
*adev
)
380 struct kv_power_info
*pi
= adev
->pm
.dpm
.priv
;
386 static void kv_program_local_cac_table(struct amdgpu_device
*adev
,
387 const struct kv_lcac_config_values
*local_cac_table
,
388 const struct kv_lcac_config_reg
*local_cac_reg
)
391 const struct kv_lcac_config_values
*values
= local_cac_table
;
393 while (values
->block_id
!= 0xffffffff) {
394 count
= values
->signal_id
;
395 for (i
= 0; i
< count
; i
++) {
396 data
= ((values
->block_id
<< local_cac_reg
->block_shift
) &
397 local_cac_reg
->block_mask
);
398 data
|= ((i
<< local_cac_reg
->signal_shift
) &
399 local_cac_reg
->signal_mask
);
400 data
|= ((values
->t
<< local_cac_reg
->t_shift
) &
401 local_cac_reg
->t_mask
);
402 data
|= ((1 << local_cac_reg
->enable_shift
) &
403 local_cac_reg
->enable_mask
);
404 WREG32_SMC(local_cac_reg
->cntl
, data
);
411 static int kv_program_pt_config_registers(struct amdgpu_device
*adev
,
412 const struct kv_pt_config_reg
*cac_config_regs
)
414 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
418 if (config_regs
== NULL
)
421 while (config_regs
->offset
!= 0xFFFFFFFF) {
422 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
423 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
425 switch (config_regs
->type
) {
426 case KV_CONFIGREG_SMC_IND
:
427 data
= RREG32_SMC(config_regs
->offset
);
429 case KV_CONFIGREG_DIDT_IND
:
430 data
= RREG32_DIDT(config_regs
->offset
);
433 data
= RREG32(config_regs
->offset
);
437 data
&= ~config_regs
->mask
;
438 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
442 switch (config_regs
->type
) {
443 case KV_CONFIGREG_SMC_IND
:
444 WREG32_SMC(config_regs
->offset
, data
);
446 case KV_CONFIGREG_DIDT_IND
:
447 WREG32_DIDT(config_regs
->offset
, data
);
450 WREG32(config_regs
->offset
, data
);
460 static void kv_do_enable_didt(struct amdgpu_device
*adev
, bool enable
)
462 struct kv_power_info
*pi
= kv_get_pi(adev
);
465 if (pi
->caps_sq_ramping
) {
466 data
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
468 data
|= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
470 data
&= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
471 WREG32_DIDT(ixDIDT_SQ_CTRL0
, data
);
474 if (pi
->caps_db_ramping
) {
475 data
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
477 data
|= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
479 data
&= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
480 WREG32_DIDT(ixDIDT_DB_CTRL0
, data
);
483 if (pi
->caps_td_ramping
) {
484 data
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
486 data
|= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
488 data
&= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
489 WREG32_DIDT(ixDIDT_TD_CTRL0
, data
);
492 if (pi
->caps_tcp_ramping
) {
493 data
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
495 data
|= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
497 data
&= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
498 WREG32_DIDT(ixDIDT_TCP_CTRL0
, data
);
502 static int kv_enable_didt(struct amdgpu_device
*adev
, bool enable
)
504 struct kv_power_info
*pi
= kv_get_pi(adev
);
507 if (pi
->caps_sq_ramping
||
508 pi
->caps_db_ramping
||
509 pi
->caps_td_ramping
||
510 pi
->caps_tcp_ramping
) {
511 amdgpu_gfx_rlc_enter_safe_mode(adev
);
514 ret
= kv_program_pt_config_registers(adev
, didt_config_kv
);
516 amdgpu_gfx_rlc_exit_safe_mode(adev
);
521 kv_do_enable_didt(adev
, enable
);
523 amdgpu_gfx_rlc_exit_safe_mode(adev
);
530 static void kv_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
532 struct kv_power_info
*pi
= kv_get_pi(adev
);
535 WREG32_SMC(ixLCAC_SX0_OVR_SEL
, 0);
536 WREG32_SMC(ixLCAC_SX0_OVR_VAL
, 0);
537 kv_program_local_cac_table(adev
, sx_local_cac_cfg_kv
, sx0_cac_config_reg
);
539 WREG32_SMC(ixLCAC_MC0_OVR_SEL
, 0);
540 WREG32_SMC(ixLCAC_MC0_OVR_VAL
, 0);
541 kv_program_local_cac_table(adev
, mc0_local_cac_cfg_kv
, mc0_cac_config_reg
);
543 WREG32_SMC(ixLCAC_MC1_OVR_SEL
, 0);
544 WREG32_SMC(ixLCAC_MC1_OVR_VAL
, 0);
545 kv_program_local_cac_table(adev
, mc1_local_cac_cfg_kv
, mc1_cac_config_reg
);
547 WREG32_SMC(ixLCAC_MC2_OVR_SEL
, 0);
548 WREG32_SMC(ixLCAC_MC2_OVR_VAL
, 0);
549 kv_program_local_cac_table(adev
, mc2_local_cac_cfg_kv
, mc2_cac_config_reg
);
551 WREG32_SMC(ixLCAC_MC3_OVR_SEL
, 0);
552 WREG32_SMC(ixLCAC_MC3_OVR_VAL
, 0);
553 kv_program_local_cac_table(adev
, mc3_local_cac_cfg_kv
, mc3_cac_config_reg
);
555 WREG32_SMC(ixLCAC_CPL_OVR_SEL
, 0);
556 WREG32_SMC(ixLCAC_CPL_OVR_VAL
, 0);
557 kv_program_local_cac_table(adev
, cpl_local_cac_cfg_kv
, cpl_cac_config_reg
);
562 static int kv_enable_smc_cac(struct amdgpu_device
*adev
, bool enable
)
564 struct kv_power_info
*pi
= kv_get_pi(adev
);
569 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_EnableCac
);
571 pi
->cac_enabled
= false;
573 pi
->cac_enabled
= true;
574 } else if (pi
->cac_enabled
) {
575 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_DisableCac
);
576 pi
->cac_enabled
= false;
583 static int kv_process_firmware_header(struct amdgpu_device
*adev
)
585 struct kv_power_info
*pi
= kv_get_pi(adev
);
589 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
590 offsetof(SMU7_Firmware_Header
, DpmTable
),
594 pi
->dpm_table_start
= tmp
;
596 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
597 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
601 pi
->soft_regs_start
= tmp
;
606 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device
*adev
)
608 struct kv_power_info
*pi
= kv_get_pi(adev
);
611 pi
->graphics_voltage_change_enable
= 1;
613 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
614 pi
->dpm_table_start
+
615 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
616 &pi
->graphics_voltage_change_enable
,
617 sizeof(u8
), pi
->sram_end
);
622 static int kv_set_dpm_interval(struct amdgpu_device
*adev
)
624 struct kv_power_info
*pi
= kv_get_pi(adev
);
627 pi
->graphics_interval
= 1;
629 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
630 pi
->dpm_table_start
+
631 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
632 &pi
->graphics_interval
,
633 sizeof(u8
), pi
->sram_end
);
638 static int kv_set_dpm_boot_state(struct amdgpu_device
*adev
)
640 struct kv_power_info
*pi
= kv_get_pi(adev
);
643 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
644 pi
->dpm_table_start
+
645 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
646 &pi
->graphics_boot_level
,
647 sizeof(u8
), pi
->sram_end
);
652 static void kv_program_vc(struct amdgpu_device
*adev
)
654 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0x3FFFC100);
657 static void kv_clear_vc(struct amdgpu_device
*adev
)
659 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
662 static int kv_set_divider_value(struct amdgpu_device
*adev
,
665 struct kv_power_info
*pi
= kv_get_pi(adev
);
666 struct atom_clock_dividers dividers
;
669 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
670 sclk
, false, ÷rs
);
674 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
675 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
680 static u16
kv_convert_8bit_index_to_voltage(struct amdgpu_device
*adev
,
683 return 6200 - (voltage
* 25);
686 static u16
kv_convert_2bit_index_to_voltage(struct amdgpu_device
*adev
,
689 struct kv_power_info
*pi
= kv_get_pi(adev
);
690 u32 vid_8bit
= kv_convert_vid2_to_vid7(adev
,
691 &pi
->sys_info
.vid_mapping_table
,
694 return kv_convert_8bit_index_to_voltage(adev
, (u16
)vid_8bit
);
698 static int kv_set_vid(struct amdgpu_device
*adev
, u32 index
, u32 vid
)
700 struct kv_power_info
*pi
= kv_get_pi(adev
);
702 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
703 pi
->graphics_level
[index
].MinVddNb
=
704 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev
, vid
));
709 static int kv_set_at(struct amdgpu_device
*adev
, u32 index
, u32 at
)
711 struct kv_power_info
*pi
= kv_get_pi(adev
);
713 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
718 static void kv_dpm_power_level_enable(struct amdgpu_device
*adev
,
719 u32 index
, bool enable
)
721 struct kv_power_info
*pi
= kv_get_pi(adev
);
723 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
726 static void kv_start_dpm(struct amdgpu_device
*adev
)
728 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
730 tmp
|= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
731 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
733 amdgpu_kv_smc_dpm_enable(adev
, true);
736 static void kv_stop_dpm(struct amdgpu_device
*adev
)
738 amdgpu_kv_smc_dpm_enable(adev
, false);
741 static void kv_start_am(struct amdgpu_device
*adev
)
743 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
745 sclk_pwrmgt_cntl
&= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
746 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
747 sclk_pwrmgt_cntl
|= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
749 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
752 static void kv_reset_am(struct amdgpu_device
*adev
)
754 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
756 sclk_pwrmgt_cntl
|= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
757 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
759 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
762 static int kv_freeze_sclk_dpm(struct amdgpu_device
*adev
, bool freeze
)
764 return amdgpu_kv_notify_message_to_smu(adev
, freeze
?
765 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
768 static int kv_force_lowest_valid(struct amdgpu_device
*adev
)
770 return kv_force_dpm_lowest(adev
);
773 static int kv_unforce_levels(struct amdgpu_device
*adev
)
775 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
776 return amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NoForcedLevel
);
778 return kv_set_enabled_levels(adev
);
781 static int kv_update_sclk_t(struct amdgpu_device
*adev
)
783 struct kv_power_info
*pi
= kv_get_pi(adev
);
784 u32 low_sclk_interrupt_t
= 0;
787 if (pi
->caps_sclk_throttle_low_notification
) {
788 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
790 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
791 pi
->dpm_table_start
+
792 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
793 (u8
*)&low_sclk_interrupt_t
,
794 sizeof(u32
), pi
->sram_end
);
799 static int kv_program_bootup_state(struct amdgpu_device
*adev
)
801 struct kv_power_info
*pi
= kv_get_pi(adev
);
803 struct amdgpu_clock_voltage_dependency_table
*table
=
804 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
806 if (table
&& table
->count
) {
807 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
808 if (table
->entries
[i
].clk
== pi
->boot_pl
.sclk
)
812 pi
->graphics_boot_level
= (u8
)i
;
813 kv_dpm_power_level_enable(adev
, i
, true);
815 struct sumo_sclk_voltage_mapping_table
*table
=
816 &pi
->sys_info
.sclk_voltage_mapping_table
;
818 if (table
->num_max_dpm_entries
== 0)
821 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
822 if (table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
)
826 pi
->graphics_boot_level
= (u8
)i
;
827 kv_dpm_power_level_enable(adev
, i
, true);
832 static int kv_enable_auto_thermal_throttling(struct amdgpu_device
*adev
)
834 struct kv_power_info
*pi
= kv_get_pi(adev
);
837 pi
->graphics_therm_throttle_enable
= 1;
839 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
840 pi
->dpm_table_start
+
841 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
842 &pi
->graphics_therm_throttle_enable
,
843 sizeof(u8
), pi
->sram_end
);
848 static int kv_upload_dpm_settings(struct amdgpu_device
*adev
)
850 struct kv_power_info
*pi
= kv_get_pi(adev
);
853 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
854 pi
->dpm_table_start
+
855 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
856 (u8
*)&pi
->graphics_level
,
857 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
863 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
864 pi
->dpm_table_start
+
865 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
866 &pi
->graphics_dpm_level_count
,
867 sizeof(u8
), pi
->sram_end
);
872 static u32
kv_get_clock_difference(u32 a
, u32 b
)
874 return (a
>= b
) ? a
- b
: b
- a
;
877 static u32
kv_get_clk_bypass(struct amdgpu_device
*adev
, u32 clk
)
879 struct kv_power_info
*pi
= kv_get_pi(adev
);
882 if (pi
->caps_enable_dfs_bypass
) {
883 if (kv_get_clock_difference(clk
, 40000) < 200)
885 else if (kv_get_clock_difference(clk
, 30000) < 200)
887 else if (kv_get_clock_difference(clk
, 20000) < 200)
889 else if (kv_get_clock_difference(clk
, 15000) < 200)
891 else if (kv_get_clock_difference(clk
, 10000) < 200)
902 static int kv_populate_uvd_table(struct amdgpu_device
*adev
)
904 struct kv_power_info
*pi
= kv_get_pi(adev
);
905 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
906 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
907 struct atom_clock_dividers dividers
;
911 if (table
== NULL
|| table
->count
== 0)
914 pi
->uvd_level_count
= 0;
915 for (i
= 0; i
< table
->count
; i
++) {
916 if (pi
->high_voltage_t
&&
917 (pi
->high_voltage_t
< table
->entries
[i
].v
))
920 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
921 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
922 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
924 pi
->uvd_level
[i
].VClkBypassCntl
=
925 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].vclk
);
926 pi
->uvd_level
[i
].DClkBypassCntl
=
927 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].dclk
);
929 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
930 table
->entries
[i
].vclk
, false, ÷rs
);
933 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
935 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
936 table
->entries
[i
].dclk
, false, ÷rs
);
939 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
941 pi
->uvd_level_count
++;
944 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
945 pi
->dpm_table_start
+
946 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
947 (u8
*)&pi
->uvd_level_count
,
948 sizeof(u8
), pi
->sram_end
);
952 pi
->uvd_interval
= 1;
954 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
955 pi
->dpm_table_start
+
956 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
958 sizeof(u8
), pi
->sram_end
);
962 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
963 pi
->dpm_table_start
+
964 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
965 (u8
*)&pi
->uvd_level
,
966 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
973 static int kv_populate_vce_table(struct amdgpu_device
*adev
)
975 struct kv_power_info
*pi
= kv_get_pi(adev
);
978 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
979 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
980 struct atom_clock_dividers dividers
;
982 if (table
== NULL
|| table
->count
== 0)
985 pi
->vce_level_count
= 0;
986 for (i
= 0; i
< table
->count
; i
++) {
987 if (pi
->high_voltage_t
&&
988 pi
->high_voltage_t
< table
->entries
[i
].v
)
991 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
992 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
994 pi
->vce_level
[i
].ClkBypassCntl
=
995 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].evclk
);
997 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
998 table
->entries
[i
].evclk
, false, ÷rs
);
1001 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
1003 pi
->vce_level_count
++;
1006 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1007 pi
->dpm_table_start
+
1008 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
1009 (u8
*)&pi
->vce_level_count
,
1015 pi
->vce_interval
= 1;
1017 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1018 pi
->dpm_table_start
+
1019 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
1020 (u8
*)&pi
->vce_interval
,
1026 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1027 pi
->dpm_table_start
+
1028 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
1029 (u8
*)&pi
->vce_level
,
1030 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
1036 static int kv_populate_samu_table(struct amdgpu_device
*adev
)
1038 struct kv_power_info
*pi
= kv_get_pi(adev
);
1039 struct amdgpu_clock_voltage_dependency_table
*table
=
1040 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1041 struct atom_clock_dividers dividers
;
1045 if (table
== NULL
|| table
->count
== 0)
1048 pi
->samu_level_count
= 0;
1049 for (i
= 0; i
< table
->count
; i
++) {
1050 if (pi
->high_voltage_t
&&
1051 pi
->high_voltage_t
< table
->entries
[i
].v
)
1054 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1055 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1057 pi
->samu_level
[i
].ClkBypassCntl
=
1058 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].clk
);
1060 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1061 table
->entries
[i
].clk
, false, ÷rs
);
1064 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
1066 pi
->samu_level_count
++;
1069 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1070 pi
->dpm_table_start
+
1071 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
1072 (u8
*)&pi
->samu_level_count
,
1078 pi
->samu_interval
= 1;
1080 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1081 pi
->dpm_table_start
+
1082 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
1083 (u8
*)&pi
->samu_interval
,
1089 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1090 pi
->dpm_table_start
+
1091 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
1092 (u8
*)&pi
->samu_level
,
1093 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
1102 static int kv_populate_acp_table(struct amdgpu_device
*adev
)
1104 struct kv_power_info
*pi
= kv_get_pi(adev
);
1105 struct amdgpu_clock_voltage_dependency_table
*table
=
1106 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1107 struct atom_clock_dividers dividers
;
1111 if (table
== NULL
|| table
->count
== 0)
1114 pi
->acp_level_count
= 0;
1115 for (i
= 0; i
< table
->count
; i
++) {
1116 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1117 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1119 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1120 table
->entries
[i
].clk
, false, ÷rs
);
1123 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
1125 pi
->acp_level_count
++;
1128 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1129 pi
->dpm_table_start
+
1130 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
1131 (u8
*)&pi
->acp_level_count
,
1137 pi
->acp_interval
= 1;
1139 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1140 pi
->dpm_table_start
+
1141 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
1142 (u8
*)&pi
->acp_interval
,
1148 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1149 pi
->dpm_table_start
+
1150 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
1151 (u8
*)&pi
->acp_level
,
1152 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
1160 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device
*adev
)
1162 struct kv_power_info
*pi
= kv_get_pi(adev
);
1164 struct amdgpu_clock_voltage_dependency_table
*table
=
1165 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1167 if (table
&& table
->count
) {
1168 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1169 if (pi
->caps_enable_dfs_bypass
) {
1170 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
1171 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1172 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
1173 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1174 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
1175 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1176 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
1177 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1178 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
1179 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1181 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1183 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1187 struct sumo_sclk_voltage_mapping_table
*table
=
1188 &pi
->sys_info
.sclk_voltage_mapping_table
;
1189 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1190 if (pi
->caps_enable_dfs_bypass
) {
1191 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
1192 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1193 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
1194 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1195 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
1196 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1197 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
1198 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1199 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
1200 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1202 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1204 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1210 static int kv_enable_ulv(struct amdgpu_device
*adev
, bool enable
)
1212 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1213 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
1216 static void kv_reset_acp_boot_level(struct amdgpu_device
*adev
)
1218 struct kv_power_info
*pi
= kv_get_pi(adev
);
1220 pi
->acp_boot_level
= 0xff;
1223 static void kv_update_current_ps(struct amdgpu_device
*adev
,
1224 struct amdgpu_ps
*rps
)
1226 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1227 struct kv_power_info
*pi
= kv_get_pi(adev
);
1229 pi
->current_rps
= *rps
;
1230 pi
->current_ps
= *new_ps
;
1231 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
1232 adev
->pm
.dpm
.current_ps
= &pi
->current_rps
;
1235 static void kv_update_requested_ps(struct amdgpu_device
*adev
,
1236 struct amdgpu_ps
*rps
)
1238 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1239 struct kv_power_info
*pi
= kv_get_pi(adev
);
1241 pi
->requested_rps
= *rps
;
1242 pi
->requested_ps
= *new_ps
;
1243 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1244 adev
->pm
.dpm
.requested_ps
= &pi
->requested_rps
;
1247 static void kv_dpm_enable_bapm(void *handle
, bool enable
)
1249 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1250 struct kv_power_info
*pi
= kv_get_pi(adev
);
1253 if (pi
->bapm_enable
) {
1254 ret
= amdgpu_kv_smc_bapm_enable(adev
, enable
);
1256 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1260 static int kv_dpm_enable(struct amdgpu_device
*adev
)
1262 struct kv_power_info
*pi
= kv_get_pi(adev
);
1265 ret
= kv_process_firmware_header(adev
);
1267 DRM_ERROR("kv_process_firmware_header failed\n");
1270 kv_init_fps_limits(adev
);
1271 kv_init_graphics_levels(adev
);
1272 ret
= kv_program_bootup_state(adev
);
1274 DRM_ERROR("kv_program_bootup_state failed\n");
1277 kv_calculate_dfs_bypass_settings(adev
);
1278 ret
= kv_upload_dpm_settings(adev
);
1280 DRM_ERROR("kv_upload_dpm_settings failed\n");
1283 ret
= kv_populate_uvd_table(adev
);
1285 DRM_ERROR("kv_populate_uvd_table failed\n");
1288 ret
= kv_populate_vce_table(adev
);
1290 DRM_ERROR("kv_populate_vce_table failed\n");
1293 ret
= kv_populate_samu_table(adev
);
1295 DRM_ERROR("kv_populate_samu_table failed\n");
1298 ret
= kv_populate_acp_table(adev
);
1300 DRM_ERROR("kv_populate_acp_table failed\n");
1303 kv_program_vc(adev
);
1305 kv_initialize_hardware_cac_manager(adev
);
1308 if (pi
->enable_auto_thermal_throttling
) {
1309 ret
= kv_enable_auto_thermal_throttling(adev
);
1311 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1315 ret
= kv_enable_dpm_voltage_scaling(adev
);
1317 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1320 ret
= kv_set_dpm_interval(adev
);
1322 DRM_ERROR("kv_set_dpm_interval failed\n");
1325 ret
= kv_set_dpm_boot_state(adev
);
1327 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1330 ret
= kv_enable_ulv(adev
, true);
1332 DRM_ERROR("kv_enable_ulv failed\n");
1336 ret
= kv_enable_didt(adev
, true);
1338 DRM_ERROR("kv_enable_didt failed\n");
1341 ret
= kv_enable_smc_cac(adev
, true);
1343 DRM_ERROR("kv_enable_smc_cac failed\n");
1347 kv_reset_acp_boot_level(adev
);
1349 ret
= amdgpu_kv_smc_bapm_enable(adev
, false);
1351 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1355 if (adev
->irq
.installed
&&
1356 amdgpu_is_internal_thermal_sensor(adev
->pm
.int_thermal_type
)) {
1357 ret
= kv_set_thermal_temperature_range(adev
, KV_TEMP_RANGE_MIN
, KV_TEMP_RANGE_MAX
);
1359 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1362 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1363 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1364 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1365 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1371 static void kv_dpm_disable(struct amdgpu_device
*adev
)
1373 struct kv_power_info
*pi
= kv_get_pi(adev
);
1375 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1377 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1378 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1380 amdgpu_kv_smc_bapm_enable(adev
, false);
1382 if (adev
->asic_type
== CHIP_MULLINS
)
1383 kv_enable_nb_dpm(adev
, false);
1385 /* powerup blocks */
1386 kv_dpm_powergate_acp(adev
, false);
1387 kv_dpm_powergate_samu(adev
, false);
1388 if (pi
->caps_vce_pg
) /* power on the VCE block */
1389 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerON
);
1390 if (pi
->caps_uvd_pg
) /* power on the UVD block */
1391 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerON
);
1393 kv_enable_smc_cac(adev
, false);
1394 kv_enable_didt(adev
, false);
1397 kv_enable_ulv(adev
, false);
1400 kv_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1404 static int kv_write_smc_soft_register(struct amdgpu_device
*adev
,
1405 u16 reg_offset
, u32 value
)
1407 struct kv_power_info
*pi
= kv_get_pi(adev
);
1409 return amdgpu_kv_copy_bytes_to_smc(adev
, pi
->soft_regs_start
+ reg_offset
,
1410 (u8
*)&value
, sizeof(u16
), pi
->sram_end
);
1413 static int kv_read_smc_soft_register(struct amdgpu_device
*adev
,
1414 u16 reg_offset
, u32
*value
)
1416 struct kv_power_info
*pi
= kv_get_pi(adev
);
1418 return amdgpu_kv_read_smc_sram_dword(adev
, pi
->soft_regs_start
+ reg_offset
,
1419 value
, pi
->sram_end
);
1423 static void kv_init_sclk_t(struct amdgpu_device
*adev
)
1425 struct kv_power_info
*pi
= kv_get_pi(adev
);
1427 pi
->low_sclk_interrupt_t
= 0;
1430 static int kv_init_fps_limits(struct amdgpu_device
*adev
)
1432 struct kv_power_info
*pi
= kv_get_pi(adev
);
1439 pi
->fps_high_t
= cpu_to_be16(tmp
);
1440 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1441 pi
->dpm_table_start
+
1442 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1443 (u8
*)&pi
->fps_high_t
,
1444 sizeof(u16
), pi
->sram_end
);
1447 pi
->fps_low_t
= cpu_to_be16(tmp
);
1449 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1450 pi
->dpm_table_start
+
1451 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1452 (u8
*)&pi
->fps_low_t
,
1453 sizeof(u16
), pi
->sram_end
);
1459 static void kv_init_powergate_state(struct amdgpu_device
*adev
)
1461 struct kv_power_info
*pi
= kv_get_pi(adev
);
1463 pi
->uvd_power_gated
= false;
1464 pi
->vce_power_gated
= false;
1465 pi
->samu_power_gated
= false;
1466 pi
->acp_power_gated
= false;
1470 static int kv_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
1472 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1473 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1476 static int kv_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
1478 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1479 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1482 static int kv_enable_samu_dpm(struct amdgpu_device
*adev
, bool enable
)
1484 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1485 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1488 static int kv_enable_acp_dpm(struct amdgpu_device
*adev
, bool enable
)
1490 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1491 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1494 static int kv_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
1496 struct kv_power_info
*pi
= kv_get_pi(adev
);
1497 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
1498 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1504 pi
->uvd_boot_level
= table
->count
- 1;
1506 pi
->uvd_boot_level
= 0;
1508 if (!pi
->caps_uvd_dpm
|| pi
->caps_stable_p_state
) {
1509 mask
= 1 << pi
->uvd_boot_level
;
1514 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1515 pi
->dpm_table_start
+
1516 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1517 (uint8_t *)&pi
->uvd_boot_level
,
1518 sizeof(u8
), pi
->sram_end
);
1522 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1523 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1527 return kv_enable_uvd_dpm(adev
, !gate
);
1530 static u8
kv_get_vce_boot_level(struct amdgpu_device
*adev
, u32 evclk
)
1533 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1534 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1536 for (i
= 0; i
< table
->count
; i
++) {
1537 if (table
->entries
[i
].evclk
>= evclk
)
1544 static int kv_update_vce_dpm(struct amdgpu_device
*adev
,
1545 struct amdgpu_ps
*amdgpu_new_state
,
1546 struct amdgpu_ps
*amdgpu_current_state
)
1548 struct kv_power_info
*pi
= kv_get_pi(adev
);
1549 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1550 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1553 if (amdgpu_new_state
->evclk
> 0 && amdgpu_current_state
->evclk
== 0) {
1554 if (pi
->caps_stable_p_state
)
1555 pi
->vce_boot_level
= table
->count
- 1;
1557 pi
->vce_boot_level
= kv_get_vce_boot_level(adev
, amdgpu_new_state
->evclk
);
1559 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1560 pi
->dpm_table_start
+
1561 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1562 (u8
*)&pi
->vce_boot_level
,
1568 if (pi
->caps_stable_p_state
)
1569 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1570 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1571 (1 << pi
->vce_boot_level
));
1572 kv_enable_vce_dpm(adev
, true);
1573 } else if (amdgpu_new_state
->evclk
== 0 && amdgpu_current_state
->evclk
> 0) {
1574 kv_enable_vce_dpm(adev
, false);
1580 static int kv_update_samu_dpm(struct amdgpu_device
*adev
, bool gate
)
1582 struct kv_power_info
*pi
= kv_get_pi(adev
);
1583 struct amdgpu_clock_voltage_dependency_table
*table
=
1584 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1588 if (pi
->caps_stable_p_state
)
1589 pi
->samu_boot_level
= table
->count
- 1;
1591 pi
->samu_boot_level
= 0;
1593 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1594 pi
->dpm_table_start
+
1595 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1596 (u8
*)&pi
->samu_boot_level
,
1602 if (pi
->caps_stable_p_state
)
1603 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1604 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1605 (1 << pi
->samu_boot_level
));
1608 return kv_enable_samu_dpm(adev
, !gate
);
1611 static u8
kv_get_acp_boot_level(struct amdgpu_device
*adev
)
1614 struct amdgpu_clock_voltage_dependency_table
*table
=
1615 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1617 for (i
= 0; i
< table
->count
; i
++) {
1618 if (table
->entries
[i
].clk
>= 0) /* XXX */
1622 if (i
>= table
->count
)
1623 i
= table
->count
- 1;
1628 static void kv_update_acp_boot_level(struct amdgpu_device
*adev
)
1630 struct kv_power_info
*pi
= kv_get_pi(adev
);
1633 if (!pi
->caps_stable_p_state
) {
1634 acp_boot_level
= kv_get_acp_boot_level(adev
);
1635 if (acp_boot_level
!= pi
->acp_boot_level
) {
1636 pi
->acp_boot_level
= acp_boot_level
;
1637 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1638 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1639 (1 << pi
->acp_boot_level
));
1644 static int kv_update_acp_dpm(struct amdgpu_device
*adev
, bool gate
)
1646 struct kv_power_info
*pi
= kv_get_pi(adev
);
1647 struct amdgpu_clock_voltage_dependency_table
*table
=
1648 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1652 if (pi
->caps_stable_p_state
)
1653 pi
->acp_boot_level
= table
->count
- 1;
1655 pi
->acp_boot_level
= kv_get_acp_boot_level(adev
);
1657 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1658 pi
->dpm_table_start
+
1659 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1660 (u8
*)&pi
->acp_boot_level
,
1666 if (pi
->caps_stable_p_state
)
1667 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1668 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1669 (1 << pi
->acp_boot_level
));
1672 return kv_enable_acp_dpm(adev
, !gate
);
1675 static void kv_dpm_powergate_uvd(void *handle
, bool gate
)
1677 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1678 struct kv_power_info
*pi
= kv_get_pi(adev
);
1681 pi
->uvd_power_gated
= gate
;
1684 /* stop the UVD block */
1685 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1687 kv_update_uvd_dpm(adev
, gate
);
1688 if (pi
->caps_uvd_pg
)
1689 /* power off the UVD block */
1690 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerOFF
);
1692 if (pi
->caps_uvd_pg
)
1693 /* power on the UVD block */
1694 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerON
);
1695 /* re-init the UVD block */
1696 kv_update_uvd_dpm(adev
, gate
);
1698 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1699 AMD_PG_STATE_UNGATE
);
1703 static void kv_dpm_powergate_vce(void *handle
, bool gate
)
1705 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1706 struct kv_power_info
*pi
= kv_get_pi(adev
);
1709 pi
->vce_power_gated
= gate
;
1712 /* stop the VCE block */
1713 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1715 kv_enable_vce_dpm(adev
, false);
1716 if (pi
->caps_vce_pg
) /* power off the VCE block */
1717 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerOFF
);
1719 if (pi
->caps_vce_pg
) /* power on the VCE block */
1720 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerON
);
1721 kv_enable_vce_dpm(adev
, true);
1722 /* re-init the VCE block */
1723 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1724 AMD_PG_STATE_UNGATE
);
1729 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
)
1731 struct kv_power_info
*pi
= kv_get_pi(adev
);
1733 if (pi
->samu_power_gated
== gate
)
1736 pi
->samu_power_gated
= gate
;
1739 kv_update_samu_dpm(adev
, true);
1740 if (pi
->caps_samu_pg
)
1741 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerOFF
);
1743 if (pi
->caps_samu_pg
)
1744 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerON
);
1745 kv_update_samu_dpm(adev
, false);
1749 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
)
1751 struct kv_power_info
*pi
= kv_get_pi(adev
);
1753 if (pi
->acp_power_gated
== gate
)
1756 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
1759 pi
->acp_power_gated
= gate
;
1762 kv_update_acp_dpm(adev
, true);
1763 if (pi
->caps_acp_pg
)
1764 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerOFF
);
1766 if (pi
->caps_acp_pg
)
1767 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerON
);
1768 kv_update_acp_dpm(adev
, false);
1772 static void kv_set_valid_clock_range(struct amdgpu_device
*adev
,
1773 struct amdgpu_ps
*new_rps
)
1775 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1776 struct kv_power_info
*pi
= kv_get_pi(adev
);
1778 struct amdgpu_clock_voltage_dependency_table
*table
=
1779 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1781 if (table
&& table
->count
) {
1782 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1783 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1784 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1785 pi
->lowest_valid
= i
;
1790 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1791 if (table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1794 pi
->highest_valid
= i
;
1796 if (pi
->lowest_valid
> pi
->highest_valid
) {
1797 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1798 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1799 pi
->highest_valid
= pi
->lowest_valid
;
1801 pi
->lowest_valid
= pi
->highest_valid
;
1804 struct sumo_sclk_voltage_mapping_table
*table
=
1805 &pi
->sys_info
.sclk_voltage_mapping_table
;
1807 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1808 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1809 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1810 pi
->lowest_valid
= i
;
1815 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1816 if (table
->entries
[i
].sclk_frequency
<=
1817 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1820 pi
->highest_valid
= i
;
1822 if (pi
->lowest_valid
> pi
->highest_valid
) {
1823 if ((new_ps
->levels
[0].sclk
-
1824 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1825 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1826 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1827 pi
->highest_valid
= pi
->lowest_valid
;
1829 pi
->lowest_valid
= pi
->highest_valid
;
1834 static int kv_update_dfs_bypass_settings(struct amdgpu_device
*adev
,
1835 struct amdgpu_ps
*new_rps
)
1837 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1838 struct kv_power_info
*pi
= kv_get_pi(adev
);
1842 if (pi
->caps_enable_dfs_bypass
) {
1843 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1844 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1845 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1846 (pi
->dpm_table_start
+
1847 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1848 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1849 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1851 sizeof(u8
), pi
->sram_end
);
1857 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
1860 struct kv_power_info
*pi
= kv_get_pi(adev
);
1864 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1865 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Enable
);
1867 pi
->nb_dpm_enabled
= true;
1870 if (pi
->enable_nb_dpm
&& pi
->nb_dpm_enabled
) {
1871 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Disable
);
1873 pi
->nb_dpm_enabled
= false;
1880 static int kv_dpm_force_performance_level(void *handle
,
1881 enum amd_dpm_forced_level level
)
1884 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1886 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
1887 ret
= kv_force_dpm_highest(adev
);
1890 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
1891 ret
= kv_force_dpm_lowest(adev
);
1894 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
1895 ret
= kv_unforce_levels(adev
);
1900 adev
->pm
.dpm
.forced_level
= level
;
1905 static int kv_dpm_pre_set_power_state(void *handle
)
1907 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1908 struct kv_power_info
*pi
= kv_get_pi(adev
);
1909 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
1910 struct amdgpu_ps
*new_ps
= &requested_ps
;
1912 kv_update_requested_ps(adev
, new_ps
);
1914 kv_apply_state_adjust_rules(adev
,
1921 static int kv_dpm_set_power_state(void *handle
)
1923 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1924 struct kv_power_info
*pi
= kv_get_pi(adev
);
1925 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
1926 struct amdgpu_ps
*old_ps
= &pi
->current_rps
;
1929 if (pi
->bapm_enable
) {
1930 ret
= amdgpu_kv_smc_bapm_enable(adev
, adev
->pm
.ac_power
);
1932 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1937 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
1938 if (pi
->enable_dpm
) {
1939 kv_set_valid_clock_range(adev
, new_ps
);
1940 kv_update_dfs_bypass_settings(adev
, new_ps
);
1941 ret
= kv_calculate_ds_divider(adev
);
1943 DRM_ERROR("kv_calculate_ds_divider failed\n");
1946 kv_calculate_nbps_level_settings(adev
);
1947 kv_calculate_dpm_settings(adev
);
1948 kv_force_lowest_valid(adev
);
1949 kv_enable_new_levels(adev
);
1950 kv_upload_dpm_settings(adev
);
1951 kv_program_nbps_index_settings(adev
, new_ps
);
1952 kv_unforce_levels(adev
);
1953 kv_set_enabled_levels(adev
);
1954 kv_force_lowest_valid(adev
);
1955 kv_unforce_levels(adev
);
1957 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1959 DRM_ERROR("kv_update_vce_dpm failed\n");
1962 kv_update_sclk_t(adev
);
1963 if (adev
->asic_type
== CHIP_MULLINS
)
1964 kv_enable_nb_dpm(adev
, true);
1967 if (pi
->enable_dpm
) {
1968 kv_set_valid_clock_range(adev
, new_ps
);
1969 kv_update_dfs_bypass_settings(adev
, new_ps
);
1970 ret
= kv_calculate_ds_divider(adev
);
1972 DRM_ERROR("kv_calculate_ds_divider failed\n");
1975 kv_calculate_nbps_level_settings(adev
);
1976 kv_calculate_dpm_settings(adev
);
1977 kv_freeze_sclk_dpm(adev
, true);
1978 kv_upload_dpm_settings(adev
);
1979 kv_program_nbps_index_settings(adev
, new_ps
);
1980 kv_freeze_sclk_dpm(adev
, false);
1981 kv_set_enabled_levels(adev
);
1982 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1984 DRM_ERROR("kv_update_vce_dpm failed\n");
1987 kv_update_acp_boot_level(adev
);
1988 kv_update_sclk_t(adev
);
1989 kv_enable_nb_dpm(adev
, true);
1996 static void kv_dpm_post_set_power_state(void *handle
)
1998 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1999 struct kv_power_info
*pi
= kv_get_pi(adev
);
2000 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
2002 kv_update_current_ps(adev
, new_ps
);
2005 static void kv_dpm_setup_asic(struct amdgpu_device
*adev
)
2007 sumo_take_smu_control(adev
, true);
2008 kv_init_powergate_state(adev
);
2009 kv_init_sclk_t(adev
);
2013 static void kv_dpm_reset_asic(struct amdgpu_device
*adev
)
2015 struct kv_power_info
*pi
= kv_get_pi(adev
);
2017 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2018 kv_force_lowest_valid(adev
);
2019 kv_init_graphics_levels(adev
);
2020 kv_program_bootup_state(adev
);
2021 kv_upload_dpm_settings(adev
);
2022 kv_force_lowest_valid(adev
);
2023 kv_unforce_levels(adev
);
2025 kv_init_graphics_levels(adev
);
2026 kv_program_bootup_state(adev
);
2027 kv_freeze_sclk_dpm(adev
, true);
2028 kv_upload_dpm_settings(adev
);
2029 kv_freeze_sclk_dpm(adev
, false);
2030 kv_set_enabled_level(adev
, pi
->graphics_boot_level
);
2035 static void kv_construct_max_power_limits_table(struct amdgpu_device
*adev
,
2036 struct amdgpu_clock_and_voltage_limits
*table
)
2038 struct kv_power_info
*pi
= kv_get_pi(adev
);
2040 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
2041 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
2043 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
2045 kv_convert_2bit_index_to_voltage(adev
,
2046 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
2049 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
2052 static void kv_patch_voltage_values(struct amdgpu_device
*adev
)
2055 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
2056 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
2057 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
2058 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
2059 struct amdgpu_clock_voltage_dependency_table
*samu_table
=
2060 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
2061 struct amdgpu_clock_voltage_dependency_table
*acp_table
=
2062 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
2064 if (uvd_table
->count
) {
2065 for (i
= 0; i
< uvd_table
->count
; i
++)
2066 uvd_table
->entries
[i
].v
=
2067 kv_convert_8bit_index_to_voltage(adev
,
2068 uvd_table
->entries
[i
].v
);
2071 if (vce_table
->count
) {
2072 for (i
= 0; i
< vce_table
->count
; i
++)
2073 vce_table
->entries
[i
].v
=
2074 kv_convert_8bit_index_to_voltage(adev
,
2075 vce_table
->entries
[i
].v
);
2078 if (samu_table
->count
) {
2079 for (i
= 0; i
< samu_table
->count
; i
++)
2080 samu_table
->entries
[i
].v
=
2081 kv_convert_8bit_index_to_voltage(adev
,
2082 samu_table
->entries
[i
].v
);
2085 if (acp_table
->count
) {
2086 for (i
= 0; i
< acp_table
->count
; i
++)
2087 acp_table
->entries
[i
].v
=
2088 kv_convert_8bit_index_to_voltage(adev
,
2089 acp_table
->entries
[i
].v
);
2094 static void kv_construct_boot_state(struct amdgpu_device
*adev
)
2096 struct kv_power_info
*pi
= kv_get_pi(adev
);
2098 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
2099 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
2100 pi
->boot_pl
.ds_divider_index
= 0;
2101 pi
->boot_pl
.ss_divider_index
= 0;
2102 pi
->boot_pl
.allow_gnb_slow
= 1;
2103 pi
->boot_pl
.force_nbp_state
= 0;
2104 pi
->boot_pl
.display_wm
= 0;
2105 pi
->boot_pl
.vce_wm
= 0;
2108 static int kv_force_dpm_highest(struct amdgpu_device
*adev
)
2113 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2117 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
> 0; i
--) {
2118 if (enable_mask
& (1 << i
))
2122 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2123 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2125 return kv_set_enabled_level(adev
, i
);
2128 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
)
2133 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2137 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2138 if (enable_mask
& (1 << i
))
2142 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2143 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2145 return kv_set_enabled_level(adev
, i
);
2148 static u8
kv_get_sleep_divider_id_from_clock(struct amdgpu_device
*adev
,
2149 u32 sclk
, u32 min_sclk_in_sr
)
2151 struct kv_power_info
*pi
= kv_get_pi(adev
);
2154 u32 min
= max(min_sclk_in_sr
, (u32
)KV_MINIMUM_ENGINE_CLOCK
);
2159 if (!pi
->caps_sclk_ds
)
2162 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
> 0; i
--) {
2171 static int kv_get_high_voltage_limit(struct amdgpu_device
*adev
, int *limit
)
2173 struct kv_power_info
*pi
= kv_get_pi(adev
);
2174 struct amdgpu_clock_voltage_dependency_table
*table
=
2175 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2178 if (table
&& table
->count
) {
2179 for (i
= table
->count
- 1; i
>= 0; i
--) {
2180 if (pi
->high_voltage_t
&&
2181 (kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
) <=
2182 pi
->high_voltage_t
)) {
2188 struct sumo_sclk_voltage_mapping_table
*table
=
2189 &pi
->sys_info
.sclk_voltage_mapping_table
;
2191 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
2192 if (pi
->high_voltage_t
&&
2193 (kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
) <=
2194 pi
->high_voltage_t
)) {
2205 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
2206 struct amdgpu_ps
*new_rps
,
2207 struct amdgpu_ps
*old_rps
)
2209 struct kv_ps
*ps
= kv_get_ps(new_rps
);
2210 struct kv_power_info
*pi
= kv_get_pi(adev
);
2211 u32 min_sclk
= 10000; /* ??? */
2215 struct amdgpu_clock_voltage_dependency_table
*table
=
2216 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2217 u32 stable_p_state_sclk
= 0;
2218 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2219 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2221 if (new_rps
->vce_active
) {
2222 new_rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
2223 new_rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
2229 mclk
= max_limits
->mclk
;
2232 if (pi
->caps_stable_p_state
) {
2233 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
2235 for (i
= table
->count
- 1; i
>= 0; i
--) {
2236 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
2237 stable_p_state_sclk
= table
->entries
[i
].clk
;
2243 stable_p_state_sclk
= table
->entries
[0].clk
;
2245 sclk
= stable_p_state_sclk
;
2248 if (new_rps
->vce_active
) {
2249 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
2250 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
2253 ps
->need_dfs_bypass
= true;
2255 for (i
= 0; i
< ps
->num_levels
; i
++) {
2256 if (ps
->levels
[i
].sclk
< sclk
)
2257 ps
->levels
[i
].sclk
= sclk
;
2260 if (table
&& table
->count
) {
2261 for (i
= 0; i
< ps
->num_levels
; i
++) {
2262 if (pi
->high_voltage_t
&&
2263 (pi
->high_voltage_t
<
2264 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2265 kv_get_high_voltage_limit(adev
, &limit
);
2266 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2270 struct sumo_sclk_voltage_mapping_table
*table
=
2271 &pi
->sys_info
.sclk_voltage_mapping_table
;
2273 for (i
= 0; i
< ps
->num_levels
; i
++) {
2274 if (pi
->high_voltage_t
&&
2275 (pi
->high_voltage_t
<
2276 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2277 kv_get_high_voltage_limit(adev
, &limit
);
2278 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2283 if (pi
->caps_stable_p_state
) {
2284 for (i
= 0; i
< ps
->num_levels
; i
++) {
2285 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2289 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
2290 new_rps
->evclk
|| new_rps
->ecclk
;
2292 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2293 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2294 pi
->battery_state
= true;
2296 pi
->battery_state
= false;
2298 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2299 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2300 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2301 ps
->dpmx_nb_ps_lo
= 0x1;
2302 ps
->dpmx_nb_ps_hi
= 0x0;
2304 ps
->dpm0_pg_nb_ps_lo
= 0x3;
2305 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2306 ps
->dpmx_nb_ps_lo
= 0x3;
2307 ps
->dpmx_nb_ps_hi
= 0x0;
2309 if (pi
->sys_info
.nb_dpm_enable
) {
2310 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2311 pi
->video_start
|| (adev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2312 pi
->disable_nb_ps3_in_battery
;
2313 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2314 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2315 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2316 ps
->dpmx_nb_ps_hi
= 0x2;
2321 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device
*adev
,
2322 u32 index
, bool enable
)
2324 struct kv_power_info
*pi
= kv_get_pi(adev
);
2326 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2329 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
)
2331 struct kv_power_info
*pi
= kv_get_pi(adev
);
2332 u32 sclk_in_sr
= 10000; /* ??? */
2335 if (pi
->lowest_valid
> pi
->highest_valid
)
2338 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2339 pi
->graphics_level
[i
].DeepSleepDivId
=
2340 kv_get_sleep_divider_id_from_clock(adev
,
2341 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2347 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
)
2349 struct kv_power_info
*pi
= kv_get_pi(adev
);
2352 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2353 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2354 u32 mclk
= max_limits
->mclk
;
2356 if (pi
->lowest_valid
> pi
->highest_valid
)
2359 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2360 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2361 pi
->graphics_level
[i
].GnbSlow
= 1;
2362 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2363 pi
->graphics_level
[i
].UpH
= 0;
2366 if (!pi
->sys_info
.nb_dpm_enable
)
2369 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2370 (adev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2373 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2374 pi
->graphics_level
[i
].GnbSlow
= 0;
2376 if (pi
->battery_state
)
2377 pi
->graphics_level
[0].ForceNbPs1
= 1;
2379 pi
->graphics_level
[1].GnbSlow
= 0;
2380 pi
->graphics_level
[2].GnbSlow
= 0;
2381 pi
->graphics_level
[3].GnbSlow
= 0;
2382 pi
->graphics_level
[4].GnbSlow
= 0;
2385 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2386 pi
->graphics_level
[i
].GnbSlow
= 1;
2387 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2388 pi
->graphics_level
[i
].UpH
= 0;
2391 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2392 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2393 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2394 if (pi
->lowest_valid
!= pi
->highest_valid
)
2395 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2401 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
)
2403 struct kv_power_info
*pi
= kv_get_pi(adev
);
2406 if (pi
->lowest_valid
> pi
->highest_valid
)
2409 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2410 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2415 static void kv_init_graphics_levels(struct amdgpu_device
*adev
)
2417 struct kv_power_info
*pi
= kv_get_pi(adev
);
2419 struct amdgpu_clock_voltage_dependency_table
*table
=
2420 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2422 if (table
&& table
->count
) {
2425 pi
->graphics_dpm_level_count
= 0;
2426 for (i
= 0; i
< table
->count
; i
++) {
2427 if (pi
->high_voltage_t
&&
2428 (pi
->high_voltage_t
<
2429 kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
)))
2432 kv_set_divider_value(adev
, i
, table
->entries
[i
].clk
);
2433 vid_2bit
= kv_convert_vid7_to_vid2(adev
,
2434 &pi
->sys_info
.vid_mapping_table
,
2435 table
->entries
[i
].v
);
2436 kv_set_vid(adev
, i
, vid_2bit
);
2437 kv_set_at(adev
, i
, pi
->at
[i
]);
2438 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2439 pi
->graphics_dpm_level_count
++;
2442 struct sumo_sclk_voltage_mapping_table
*table
=
2443 &pi
->sys_info
.sclk_voltage_mapping_table
;
2445 pi
->graphics_dpm_level_count
= 0;
2446 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2447 if (pi
->high_voltage_t
&&
2448 pi
->high_voltage_t
<
2449 kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
))
2452 kv_set_divider_value(adev
, i
, table
->entries
[i
].sclk_frequency
);
2453 kv_set_vid(adev
, i
, table
->entries
[i
].vid_2bit
);
2454 kv_set_at(adev
, i
, pi
->at
[i
]);
2455 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2456 pi
->graphics_dpm_level_count
++;
2460 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2461 kv_dpm_power_level_enable(adev
, i
, false);
2464 static void kv_enable_new_levels(struct amdgpu_device
*adev
)
2466 struct kv_power_info
*pi
= kv_get_pi(adev
);
2469 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2470 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2471 kv_dpm_power_level_enable(adev
, i
, true);
2475 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
)
2477 u32 new_mask
= (1 << level
);
2479 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2480 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2484 static int kv_set_enabled_levels(struct amdgpu_device
*adev
)
2486 struct kv_power_info
*pi
= kv_get_pi(adev
);
2487 u32 i
, new_mask
= 0;
2489 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2490 new_mask
|= (1 << i
);
2492 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2493 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2497 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
2498 struct amdgpu_ps
*new_rps
)
2500 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2501 struct kv_power_info
*pi
= kv_get_pi(adev
);
2504 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2507 if (pi
->sys_info
.nb_dpm_enable
) {
2508 nbdpmconfig1
= RREG32_SMC(ixNB_DPM_CONFIG_1
);
2509 nbdpmconfig1
&= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK
|
2510 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK
|
2511 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK
|
2512 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK
);
2513 nbdpmconfig1
|= (new_ps
->dpm0_pg_nb_ps_lo
<< NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT
) |
2514 (new_ps
->dpm0_pg_nb_ps_hi
<< NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT
) |
2515 (new_ps
->dpmx_nb_ps_lo
<< NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT
) |
2516 (new_ps
->dpmx_nb_ps_hi
<< NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT
);
2517 WREG32_SMC(ixNB_DPM_CONFIG_1
, nbdpmconfig1
);
2521 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
2522 int min_temp
, int max_temp
)
2524 int low_temp
= 0 * 1000;
2525 int high_temp
= 255 * 1000;
2528 if (low_temp
< min_temp
)
2529 low_temp
= min_temp
;
2530 if (high_temp
> max_temp
)
2531 high_temp
= max_temp
;
2532 if (high_temp
< low_temp
) {
2533 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2537 tmp
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
2538 tmp
&= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK
|
2539 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK
);
2540 tmp
|= ((49 + (high_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT
) |
2541 ((49 + (low_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT
);
2542 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, tmp
);
2544 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2545 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2551 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2552 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2553 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2554 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2555 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2556 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2559 static int kv_parse_sys_info_table(struct amdgpu_device
*adev
)
2561 struct kv_power_info
*pi
= kv_get_pi(adev
);
2562 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2563 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2564 union igp_info
*igp_info
;
2569 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2570 &frev
, &crev
, &data_offset
)) {
2571 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2575 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2578 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2579 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2580 pi
->sys_info
.bootup_nb_voltage_index
=
2581 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2582 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2583 pi
->sys_info
.htc_tmp_lmt
= 203;
2585 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2586 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2587 pi
->sys_info
.htc_hyst_lmt
= 5;
2589 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2590 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2591 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2594 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2595 pi
->sys_info
.nb_dpm_enable
= true;
2597 pi
->sys_info
.nb_dpm_enable
= false;
2599 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2600 pi
->sys_info
.nbp_memory_clock
[i
] =
2601 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2602 pi
->sys_info
.nbp_n_clock
[i
] =
2603 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2605 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2606 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2607 pi
->caps_enable_dfs_bypass
= true;
2609 sumo_construct_sclk_voltage_mapping_table(adev
,
2610 &pi
->sys_info
.sclk_voltage_mapping_table
,
2611 igp_info
->info_8
.sAvail_SCLK
);
2613 sumo_construct_vid_mapping_table(adev
,
2614 &pi
->sys_info
.vid_mapping_table
,
2615 igp_info
->info_8
.sAvail_SCLK
);
2617 kv_construct_max_power_limits_table(adev
,
2618 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2624 struct _ATOM_POWERPLAY_INFO info
;
2625 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2626 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2627 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2628 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2629 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2632 union pplib_clock_info
{
2633 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2634 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2635 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2636 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2639 union pplib_power_state
{
2640 struct _ATOM_PPLIB_STATE v1
;
2641 struct _ATOM_PPLIB_STATE_V2 v2
;
2644 static void kv_patch_boot_state(struct amdgpu_device
*adev
,
2647 struct kv_power_info
*pi
= kv_get_pi(adev
);
2650 ps
->levels
[0] = pi
->boot_pl
;
2653 static void kv_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
2654 struct amdgpu_ps
*rps
,
2655 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2658 struct kv_ps
*ps
= kv_get_ps(rps
);
2660 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2661 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2662 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2664 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2665 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2666 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2672 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2673 adev
->pm
.dpm
.boot_ps
= rps
;
2674 kv_patch_boot_state(adev
, ps
);
2676 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2677 adev
->pm
.dpm
.uvd_ps
= rps
;
2680 static void kv_parse_pplib_clock_info(struct amdgpu_device
*adev
,
2681 struct amdgpu_ps
*rps
, int index
,
2682 union pplib_clock_info
*clock_info
)
2684 struct kv_power_info
*pi
= kv_get_pi(adev
);
2685 struct kv_ps
*ps
= kv_get_ps(rps
);
2686 struct kv_pl
*pl
= &ps
->levels
[index
];
2689 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2690 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2692 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2694 ps
->num_levels
= index
+ 1;
2696 if (pi
->caps_sclk_ds
) {
2697 pl
->ds_divider_index
= 5;
2698 pl
->ss_divider_index
= 5;
2702 static int kv_parse_power_table(struct amdgpu_device
*adev
)
2704 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2705 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2706 union pplib_power_state
*power_state
;
2707 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2708 union pplib_clock_info
*clock_info
;
2709 struct _StateArray
*state_array
;
2710 struct _ClockInfoArray
*clock_info_array
;
2711 struct _NonClockInfoArray
*non_clock_info_array
;
2712 union power_info
*power_info
;
2713 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2716 u8
*power_state_offset
;
2719 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2720 &frev
, &crev
, &data_offset
))
2722 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2724 amdgpu_add_thermal_controller(adev
);
2726 state_array
= (struct _StateArray
*)
2727 (mode_info
->atom_context
->bios
+ data_offset
+
2728 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2729 clock_info_array
= (struct _ClockInfoArray
*)
2730 (mode_info
->atom_context
->bios
+ data_offset
+
2731 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2732 non_clock_info_array
= (struct _NonClockInfoArray
*)
2733 (mode_info
->atom_context
->bios
+ data_offset
+
2734 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2736 adev
->pm
.dpm
.ps
= kcalloc(state_array
->ucNumEntries
,
2737 sizeof(struct amdgpu_ps
),
2739 if (!adev
->pm
.dpm
.ps
)
2741 power_state_offset
= (u8
*)state_array
->states
;
2742 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2744 power_state
= (union pplib_power_state
*)power_state_offset
;
2745 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2746 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2747 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2748 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2750 kfree(adev
->pm
.dpm
.ps
);
2753 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2755 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2756 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2757 clock_array_index
= idx
[j
];
2758 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2760 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2762 clock_info
= (union pplib_clock_info
*)
2763 ((u8
*)&clock_info_array
->clockInfo
[0] +
2764 (clock_array_index
* clock_info_array
->ucEntrySize
));
2765 kv_parse_pplib_clock_info(adev
,
2766 &adev
->pm
.dpm
.ps
[i
], k
,
2770 kv_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
2772 non_clock_info_array
->ucEntrySize
);
2773 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2775 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2777 /* fill in the vce power states */
2778 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
2780 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
2781 clock_info
= (union pplib_clock_info
*)
2782 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
2783 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2784 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2785 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
2786 adev
->pm
.dpm
.vce_states
[i
].mclk
= 0;
2792 static int kv_dpm_init(struct amdgpu_device
*adev
)
2794 struct kv_power_info
*pi
;
2797 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2800 adev
->pm
.dpm
.priv
= pi
;
2802 ret
= amdgpu_get_platform_caps(adev
);
2806 ret
= amdgpu_parse_extended_power_table(adev
);
2810 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2811 pi
->at
[i
] = TRINITY_AT_DFLT
;
2813 pi
->sram_end
= SMC_RAM_END
;
2815 pi
->enable_nb_dpm
= true;
2817 pi
->caps_power_containment
= true;
2818 pi
->caps_cac
= true;
2819 pi
->enable_didt
= false;
2820 if (pi
->enable_didt
) {
2821 pi
->caps_sq_ramping
= true;
2822 pi
->caps_db_ramping
= true;
2823 pi
->caps_td_ramping
= true;
2824 pi
->caps_tcp_ramping
= true;
2827 if (adev
->powerplay
.pp_feature
& PP_SCLK_DEEP_SLEEP_MASK
)
2828 pi
->caps_sclk_ds
= true;
2830 pi
->caps_sclk_ds
= false;
2832 pi
->enable_auto_thermal_throttling
= true;
2833 pi
->disable_nb_ps3_in_battery
= false;
2834 if (amdgpu_bapm
== 0)
2835 pi
->bapm_enable
= false;
2837 pi
->bapm_enable
= true;
2838 pi
->voltage_drop_t
= 0;
2839 pi
->caps_sclk_throttle_low_notification
= false;
2840 pi
->caps_fps
= false; /* true? */
2841 pi
->caps_uvd_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
) ? true : false;
2842 pi
->caps_uvd_dpm
= true;
2843 pi
->caps_vce_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
) ? true : false;
2844 pi
->caps_samu_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_SAMU
) ? true : false;
2845 pi
->caps_acp_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_ACP
) ? true : false;
2846 pi
->caps_stable_p_state
= false;
2848 ret
= kv_parse_sys_info_table(adev
);
2852 kv_patch_voltage_values(adev
);
2853 kv_construct_boot_state(adev
);
2855 ret
= kv_parse_power_table(adev
);
2859 pi
->enable_dpm
= true;
2865 kv_dpm_debugfs_print_current_performance_level(void *handle
,
2868 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2869 struct kv_power_info
*pi
= kv_get_pi(adev
);
2871 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
2872 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
2873 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
2877 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2878 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2880 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2881 tmp
= (RREG32_SMC(ixSMU_VOLTAGE_STATUS
) &
2882 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2883 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT
;
2884 vddc
= kv_convert_8bit_index_to_voltage(adev
, (u16
)tmp
);
2885 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
2886 seq_printf(m
, "vce %sabled\n", pi
->vce_power_gated
? "dis" : "en");
2887 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2888 current_index
, sclk
, vddc
);
2893 kv_dpm_print_power_state(void *handle
, void *request_ps
)
2896 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
2897 struct kv_ps
*ps
= kv_get_ps(rps
);
2898 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2900 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
2901 amdgpu_dpm_print_cap_info(rps
->caps
);
2902 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2903 for (i
= 0; i
< ps
->num_levels
; i
++) {
2904 struct kv_pl
*pl
= &ps
->levels
[i
];
2905 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2907 kv_convert_8bit_index_to_voltage(adev
, pl
->vddc_index
));
2909 amdgpu_dpm_print_ps_status(adev
, rps
);
2912 static void kv_dpm_fini(struct amdgpu_device
*adev
)
2916 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
2917 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
2919 kfree(adev
->pm
.dpm
.ps
);
2920 kfree(adev
->pm
.dpm
.priv
);
2921 amdgpu_free_extended_power_table(adev
);
2924 static void kv_dpm_display_configuration_changed(void *handle
)
2929 static u32
kv_dpm_get_sclk(void *handle
, bool low
)
2931 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2932 struct kv_power_info
*pi
= kv_get_pi(adev
);
2933 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2936 return requested_state
->levels
[0].sclk
;
2938 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2941 static u32
kv_dpm_get_mclk(void *handle
, bool low
)
2943 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2944 struct kv_power_info
*pi
= kv_get_pi(adev
);
2946 return pi
->sys_info
.bootup_uma_clk
;
2949 /* get temperature in millidegrees */
2950 static int kv_dpm_get_temp(void *handle
)
2953 int actual_temp
= 0;
2954 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2956 temp
= RREG32_SMC(0xC0300E0C);
2959 actual_temp
= (temp
/ 8) - 49;
2963 actual_temp
= actual_temp
* 1000;
2968 static int kv_dpm_early_init(void *handle
)
2970 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2972 adev
->powerplay
.pp_funcs
= &kv_dpm_funcs
;
2973 adev
->powerplay
.pp_handle
= adev
;
2974 kv_dpm_set_irq_funcs(adev
);
2979 static int kv_dpm_late_init(void *handle
)
2981 /* powerdown unused blocks for now */
2982 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2984 if (!adev
->pm
.dpm_enabled
)
2987 kv_dpm_powergate_acp(adev
, true);
2988 kv_dpm_powergate_samu(adev
, true);
2993 static int kv_dpm_sw_init(void *handle
)
2996 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2998 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 230,
2999 &adev
->pm
.dpm
.thermal
.irq
);
3003 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 231,
3004 &adev
->pm
.dpm
.thermal
.irq
);
3008 /* default to balanced state */
3009 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
3010 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
3011 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
3012 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
3013 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
3014 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
3015 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
3016 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
3018 if (amdgpu_dpm
== 0)
3021 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
3022 mutex_lock(&adev
->pm
.mutex
);
3023 ret
= kv_dpm_init(adev
);
3026 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3027 if (amdgpu_dpm
== 1)
3028 amdgpu_pm_print_power_states(adev
);
3029 mutex_unlock(&adev
->pm
.mutex
);
3030 DRM_INFO("amdgpu: dpm initialized\n");
3036 mutex_unlock(&adev
->pm
.mutex
);
3037 DRM_ERROR("amdgpu: dpm initialization failed\n");
3041 static int kv_dpm_sw_fini(void *handle
)
3043 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3045 flush_work(&adev
->pm
.dpm
.thermal
.work
);
3047 mutex_lock(&adev
->pm
.mutex
);
3049 mutex_unlock(&adev
->pm
.mutex
);
3054 static int kv_dpm_hw_init(void *handle
)
3057 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3062 mutex_lock(&adev
->pm
.mutex
);
3063 kv_dpm_setup_asic(adev
);
3064 ret
= kv_dpm_enable(adev
);
3066 adev
->pm
.dpm_enabled
= false;
3068 adev
->pm
.dpm_enabled
= true;
3069 mutex_unlock(&adev
->pm
.mutex
);
3070 amdgpu_pm_compute_clocks(adev
);
3074 static int kv_dpm_hw_fini(void *handle
)
3076 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3078 if (adev
->pm
.dpm_enabled
) {
3079 mutex_lock(&adev
->pm
.mutex
);
3080 kv_dpm_disable(adev
);
3081 mutex_unlock(&adev
->pm
.mutex
);
3087 static int kv_dpm_suspend(void *handle
)
3089 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3091 if (adev
->pm
.dpm_enabled
) {
3092 mutex_lock(&adev
->pm
.mutex
);
3094 kv_dpm_disable(adev
);
3095 /* reset the power state */
3096 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3097 mutex_unlock(&adev
->pm
.mutex
);
3102 static int kv_dpm_resume(void *handle
)
3105 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3107 if (adev
->pm
.dpm_enabled
) {
3108 /* asic init will reset to the boot state */
3109 mutex_lock(&adev
->pm
.mutex
);
3110 kv_dpm_setup_asic(adev
);
3111 ret
= kv_dpm_enable(adev
);
3113 adev
->pm
.dpm_enabled
= false;
3115 adev
->pm
.dpm_enabled
= true;
3116 mutex_unlock(&adev
->pm
.mutex
);
3117 if (adev
->pm
.dpm_enabled
)
3118 amdgpu_pm_compute_clocks(adev
);
3123 static bool kv_dpm_is_idle(void *handle
)
3128 static int kv_dpm_wait_for_idle(void *handle
)
3134 static int kv_dpm_soft_reset(void *handle
)
3139 static int kv_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
3140 struct amdgpu_irq_src
*src
,
3142 enum amdgpu_interrupt_state state
)
3147 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
3149 case AMDGPU_IRQ_STATE_DISABLE
:
3150 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3151 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3152 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3154 case AMDGPU_IRQ_STATE_ENABLE
:
3155 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3156 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3157 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3164 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
3166 case AMDGPU_IRQ_STATE_DISABLE
:
3167 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3168 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3169 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3171 case AMDGPU_IRQ_STATE_ENABLE
:
3172 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3173 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3174 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3187 static int kv_dpm_process_interrupt(struct amdgpu_device
*adev
,
3188 struct amdgpu_irq_src
*source
,
3189 struct amdgpu_iv_entry
*entry
)
3191 bool queue_thermal
= false;
3196 switch (entry
->src_id
) {
3197 case 230: /* thermal low to high */
3198 DRM_DEBUG("IH: thermal low to high\n");
3199 adev
->pm
.dpm
.thermal
.high_to_low
= false;
3200 queue_thermal
= true;
3202 case 231: /* thermal high to low */
3203 DRM_DEBUG("IH: thermal high to low\n");
3204 adev
->pm
.dpm
.thermal
.high_to_low
= true;
3205 queue_thermal
= true;
3212 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
3217 static int kv_dpm_set_clockgating_state(void *handle
,
3218 enum amd_clockgating_state state
)
3223 static int kv_dpm_set_powergating_state(void *handle
,
3224 enum amd_powergating_state state
)
3229 static inline bool kv_are_power_levels_equal(const struct kv_pl
*kv_cpl1
,
3230 const struct kv_pl
*kv_cpl2
)
3232 return ((kv_cpl1
->sclk
== kv_cpl2
->sclk
) &&
3233 (kv_cpl1
->vddc_index
== kv_cpl2
->vddc_index
) &&
3234 (kv_cpl1
->ds_divider_index
== kv_cpl2
->ds_divider_index
) &&
3235 (kv_cpl1
->force_nbp_state
== kv_cpl2
->force_nbp_state
));
3238 static int kv_check_state_equal(void *handle
,
3243 struct kv_ps
*kv_cps
;
3244 struct kv_ps
*kv_rps
;
3246 struct amdgpu_ps
*cps
= (struct amdgpu_ps
*)current_ps
;
3247 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
3248 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3250 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
3253 kv_cps
= kv_get_ps(cps
);
3254 kv_rps
= kv_get_ps(rps
);
3256 if (kv_cps
== NULL
) {
3261 if (kv_cps
->num_levels
!= kv_rps
->num_levels
) {
3266 for (i
= 0; i
< kv_cps
->num_levels
; i
++) {
3267 if (!kv_are_power_levels_equal(&(kv_cps
->levels
[i
]),
3268 &(kv_rps
->levels
[i
]))) {
3274 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3275 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
3276 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
3281 static int kv_dpm_read_sensor(void *handle
, int idx
,
3282 void *value
, int *size
)
3284 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3285 struct kv_power_info
*pi
= kv_get_pi(adev
);
3288 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
3289 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
3290 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
3292 /* size must be at least 4 bytes for all sensors */
3297 case AMDGPU_PP_SENSOR_GFX_SCLK
:
3298 if (pl_index
< SMU__NUM_SCLK_DPM_STATE
) {
3300 pi
->graphics_level
[pl_index
].SclkFrequency
);
3301 *((uint32_t *)value
) = sclk
;
3306 case AMDGPU_PP_SENSOR_GPU_TEMP
:
3307 *((uint32_t *)value
) = kv_dpm_get_temp(adev
);
3315 static int kv_set_powergating_by_smu(void *handle
,
3316 uint32_t block_type
, bool gate
)
3318 switch (block_type
) {
3319 case AMD_IP_BLOCK_TYPE_UVD
:
3320 kv_dpm_powergate_uvd(handle
, gate
);
3322 case AMD_IP_BLOCK_TYPE_VCE
:
3323 kv_dpm_powergate_vce(handle
, gate
);
3331 static const struct amd_ip_funcs kv_dpm_ip_funcs
= {
3333 .early_init
= kv_dpm_early_init
,
3334 .late_init
= kv_dpm_late_init
,
3335 .sw_init
= kv_dpm_sw_init
,
3336 .sw_fini
= kv_dpm_sw_fini
,
3337 .hw_init
= kv_dpm_hw_init
,
3338 .hw_fini
= kv_dpm_hw_fini
,
3339 .suspend
= kv_dpm_suspend
,
3340 .resume
= kv_dpm_resume
,
3341 .is_idle
= kv_dpm_is_idle
,
3342 .wait_for_idle
= kv_dpm_wait_for_idle
,
3343 .soft_reset
= kv_dpm_soft_reset
,
3344 .set_clockgating_state
= kv_dpm_set_clockgating_state
,
3345 .set_powergating_state
= kv_dpm_set_powergating_state
,
3348 const struct amdgpu_ip_block_version kv_smu_ip_block
=
3350 .type
= AMD_IP_BLOCK_TYPE_SMC
,
3354 .funcs
= &kv_dpm_ip_funcs
,
3357 static const struct amd_pm_funcs kv_dpm_funcs
= {
3358 .pre_set_power_state
= &kv_dpm_pre_set_power_state
,
3359 .set_power_state
= &kv_dpm_set_power_state
,
3360 .post_set_power_state
= &kv_dpm_post_set_power_state
,
3361 .display_configuration_changed
= &kv_dpm_display_configuration_changed
,
3362 .get_sclk
= &kv_dpm_get_sclk
,
3363 .get_mclk
= &kv_dpm_get_mclk
,
3364 .print_power_state
= &kv_dpm_print_power_state
,
3365 .debugfs_print_current_performance_level
= &kv_dpm_debugfs_print_current_performance_level
,
3366 .force_performance_level
= &kv_dpm_force_performance_level
,
3367 .set_powergating_by_smu
= kv_set_powergating_by_smu
,
3368 .enable_bapm
= &kv_dpm_enable_bapm
,
3369 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
3370 .check_state_equal
= kv_check_state_equal
,
3371 .read_sensor
= &kv_dpm_read_sensor
,
3374 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs
= {
3375 .set
= kv_dpm_set_interrupt_state
,
3376 .process
= kv_dpm_process_interrupt
,
3379 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
3381 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
3382 adev
->pm
.dpm
.thermal
.irq
.funcs
= &kv_dpm_irq_funcs
;