2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_pm.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_dpm.h"
32 #include <linux/seq_file.h>
34 #include "smu/smu_7_0_0_d.h"
35 #include "smu/smu_7_0_0_sh_mask.h"
37 #include "gca/gfx_7_2_d.h"
38 #include "gca/gfx_7_2_sh_mask.h"
40 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
41 #define KV_MINIMUM_ENGINE_CLOCK 800
42 #define SMC_RAM_END 0x40000
44 static const struct amd_pm_funcs kv_dpm_funcs
;
46 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
47 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
49 static void kv_init_graphics_levels(struct amdgpu_device
*adev
);
50 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
);
51 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
);
52 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
);
53 static void kv_enable_new_levels(struct amdgpu_device
*adev
);
54 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
55 struct amdgpu_ps
*new_rps
);
56 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
);
57 static int kv_set_enabled_levels(struct amdgpu_device
*adev
);
58 static int kv_force_dpm_highest(struct amdgpu_device
*adev
);
59 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
);
60 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
61 struct amdgpu_ps
*new_rps
,
62 struct amdgpu_ps
*old_rps
);
63 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
64 int min_temp
, int max_temp
);
65 static int kv_init_fps_limits(struct amdgpu_device
*adev
);
67 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
);
68 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
);
71 static u32
kv_convert_vid2_to_vid7(struct amdgpu_device
*adev
,
72 struct sumo_vid_mapping_table
*vid_mapping_table
,
75 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
76 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
79 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
80 if (vid_2bit
< vddc_sclk_table
->count
)
81 return vddc_sclk_table
->entries
[vid_2bit
].v
;
83 return vddc_sclk_table
->entries
[vddc_sclk_table
->count
- 1].v
;
85 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
86 if (vid_mapping_table
->entries
[i
].vid_2bit
== vid_2bit
)
87 return vid_mapping_table
->entries
[i
].vid_7bit
;
89 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_7bit
;
93 static u32
kv_convert_vid7_to_vid2(struct amdgpu_device
*adev
,
94 struct sumo_vid_mapping_table
*vid_mapping_table
,
97 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
98 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
101 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
102 for (i
= 0; i
< vddc_sclk_table
->count
; i
++) {
103 if (vddc_sclk_table
->entries
[i
].v
== vid_7bit
)
106 return vddc_sclk_table
->count
- 1;
108 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
109 if (vid_mapping_table
->entries
[i
].vid_7bit
== vid_7bit
)
110 return vid_mapping_table
->entries
[i
].vid_2bit
;
113 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_2bit
;
117 static void sumo_take_smu_control(struct amdgpu_device
*adev
, bool enable
)
119 /* This bit selects who handles display phy powergating.
120 * Clear the bit to let atom handle it.
121 * Set it to let the driver handle it.
122 * For now we just let atom handle it.
125 u32 v
= RREG32(mmDOUT_SCRATCH3
);
132 WREG32(mmDOUT_SCRATCH3
, v
);
136 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device
*adev
,
137 struct sumo_sclk_voltage_mapping_table
*sclk_voltage_mapping_table
,
138 ATOM_AVAILABLE_SCLK_LIST
*table
)
144 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
145 if (table
[i
].ulSupportedSCLK
> prev_sclk
) {
146 sclk_voltage_mapping_table
->entries
[n
].sclk_frequency
=
147 table
[i
].ulSupportedSCLK
;
148 sclk_voltage_mapping_table
->entries
[n
].vid_2bit
=
149 table
[i
].usVoltageIndex
;
150 prev_sclk
= table
[i
].ulSupportedSCLK
;
155 sclk_voltage_mapping_table
->num_max_dpm_entries
= n
;
158 static void sumo_construct_vid_mapping_table(struct amdgpu_device
*adev
,
159 struct sumo_vid_mapping_table
*vid_mapping_table
,
160 ATOM_AVAILABLE_SCLK_LIST
*table
)
164 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
165 if (table
[i
].ulSupportedSCLK
!= 0) {
166 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_7bit
=
167 table
[i
].usVoltageID
;
168 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_2bit
=
169 table
[i
].usVoltageIndex
;
173 for (i
= 0; i
< SUMO_MAX_NUMBER_VOLTAGES
; i
++) {
174 if (vid_mapping_table
->entries
[i
].vid_7bit
== 0) {
175 for (j
= i
+ 1; j
< SUMO_MAX_NUMBER_VOLTAGES
; j
++) {
176 if (vid_mapping_table
->entries
[j
].vid_7bit
!= 0) {
177 vid_mapping_table
->entries
[i
] =
178 vid_mapping_table
->entries
[j
];
179 vid_mapping_table
->entries
[j
].vid_7bit
= 0;
184 if (j
== SUMO_MAX_NUMBER_VOLTAGES
)
189 vid_mapping_table
->num_entries
= i
;
193 static const struct kv_lcac_config_values sx_local_cac_cfg_kv
[] =
206 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv
[] =
212 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv
[] =
218 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv
[] =
224 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv
[] =
230 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv
[] =
262 static const struct kv_lcac_config_reg sx0_cac_config_reg
[] =
264 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
267 static const struct kv_lcac_config_reg mc0_cac_config_reg
[] =
269 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
272 static const struct kv_lcac_config_reg mc1_cac_config_reg
[] =
274 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
277 static const struct kv_lcac_config_reg mc2_cac_config_reg
[] =
279 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
282 static const struct kv_lcac_config_reg mc3_cac_config_reg
[] =
284 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
287 static const struct kv_lcac_config_reg cpl_cac_config_reg
[] =
289 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
293 static const struct kv_pt_config_reg didt_config_kv
[] =
295 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
296 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
297 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
298 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
299 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
300 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
301 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
302 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
303 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
304 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
305 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
306 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
307 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
308 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
309 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
310 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
311 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
312 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
313 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
314 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
315 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
316 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
317 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
318 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
319 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
320 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
321 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
322 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
323 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
324 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
325 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
326 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
327 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
328 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
329 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
330 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
331 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
332 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
333 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
334 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
335 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
336 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
337 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
338 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
339 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
340 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
341 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
342 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
343 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
344 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
345 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
346 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
347 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
348 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
349 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
350 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
351 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
352 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
353 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
354 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
355 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
356 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
357 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
358 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
359 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
360 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
361 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
362 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
363 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
364 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
365 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
366 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
370 static struct kv_ps
*kv_get_ps(struct amdgpu_ps
*rps
)
372 struct kv_ps
*ps
= rps
->ps_priv
;
377 static struct kv_power_info
*kv_get_pi(struct amdgpu_device
*adev
)
379 struct kv_power_info
*pi
= adev
->pm
.dpm
.priv
;
385 static void kv_program_local_cac_table(struct amdgpu_device
*adev
,
386 const struct kv_lcac_config_values
*local_cac_table
,
387 const struct kv_lcac_config_reg
*local_cac_reg
)
390 const struct kv_lcac_config_values
*values
= local_cac_table
;
392 while (values
->block_id
!= 0xffffffff) {
393 count
= values
->signal_id
;
394 for (i
= 0; i
< count
; i
++) {
395 data
= ((values
->block_id
<< local_cac_reg
->block_shift
) &
396 local_cac_reg
->block_mask
);
397 data
|= ((i
<< local_cac_reg
->signal_shift
) &
398 local_cac_reg
->signal_mask
);
399 data
|= ((values
->t
<< local_cac_reg
->t_shift
) &
400 local_cac_reg
->t_mask
);
401 data
|= ((1 << local_cac_reg
->enable_shift
) &
402 local_cac_reg
->enable_mask
);
403 WREG32_SMC(local_cac_reg
->cntl
, data
);
410 static int kv_program_pt_config_registers(struct amdgpu_device
*adev
,
411 const struct kv_pt_config_reg
*cac_config_regs
)
413 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
417 if (config_regs
== NULL
)
420 while (config_regs
->offset
!= 0xFFFFFFFF) {
421 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
422 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
424 switch (config_regs
->type
) {
425 case KV_CONFIGREG_SMC_IND
:
426 data
= RREG32_SMC(config_regs
->offset
);
428 case KV_CONFIGREG_DIDT_IND
:
429 data
= RREG32_DIDT(config_regs
->offset
);
432 data
= RREG32(config_regs
->offset
);
436 data
&= ~config_regs
->mask
;
437 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
441 switch (config_regs
->type
) {
442 case KV_CONFIGREG_SMC_IND
:
443 WREG32_SMC(config_regs
->offset
, data
);
445 case KV_CONFIGREG_DIDT_IND
:
446 WREG32_DIDT(config_regs
->offset
, data
);
449 WREG32(config_regs
->offset
, data
);
459 static void kv_do_enable_didt(struct amdgpu_device
*adev
, bool enable
)
461 struct kv_power_info
*pi
= kv_get_pi(adev
);
464 if (pi
->caps_sq_ramping
) {
465 data
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
467 data
|= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
469 data
&= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
470 WREG32_DIDT(ixDIDT_SQ_CTRL0
, data
);
473 if (pi
->caps_db_ramping
) {
474 data
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
476 data
|= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
478 data
&= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
479 WREG32_DIDT(ixDIDT_DB_CTRL0
, data
);
482 if (pi
->caps_td_ramping
) {
483 data
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
485 data
|= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
487 data
&= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
488 WREG32_DIDT(ixDIDT_TD_CTRL0
, data
);
491 if (pi
->caps_tcp_ramping
) {
492 data
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
494 data
|= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
496 data
&= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
497 WREG32_DIDT(ixDIDT_TCP_CTRL0
, data
);
501 static int kv_enable_didt(struct amdgpu_device
*adev
, bool enable
)
503 struct kv_power_info
*pi
= kv_get_pi(adev
);
506 if (pi
->caps_sq_ramping
||
507 pi
->caps_db_ramping
||
508 pi
->caps_td_ramping
||
509 pi
->caps_tcp_ramping
) {
510 amdgpu_gfx_rlc_enter_safe_mode(adev
);
513 ret
= kv_program_pt_config_registers(adev
, didt_config_kv
);
515 amdgpu_gfx_rlc_exit_safe_mode(adev
);
520 kv_do_enable_didt(adev
, enable
);
522 amdgpu_gfx_rlc_exit_safe_mode(adev
);
529 static void kv_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
531 struct kv_power_info
*pi
= kv_get_pi(adev
);
534 WREG32_SMC(ixLCAC_SX0_OVR_SEL
, 0);
535 WREG32_SMC(ixLCAC_SX0_OVR_VAL
, 0);
536 kv_program_local_cac_table(adev
, sx_local_cac_cfg_kv
, sx0_cac_config_reg
);
538 WREG32_SMC(ixLCAC_MC0_OVR_SEL
, 0);
539 WREG32_SMC(ixLCAC_MC0_OVR_VAL
, 0);
540 kv_program_local_cac_table(adev
, mc0_local_cac_cfg_kv
, mc0_cac_config_reg
);
542 WREG32_SMC(ixLCAC_MC1_OVR_SEL
, 0);
543 WREG32_SMC(ixLCAC_MC1_OVR_VAL
, 0);
544 kv_program_local_cac_table(adev
, mc1_local_cac_cfg_kv
, mc1_cac_config_reg
);
546 WREG32_SMC(ixLCAC_MC2_OVR_SEL
, 0);
547 WREG32_SMC(ixLCAC_MC2_OVR_VAL
, 0);
548 kv_program_local_cac_table(adev
, mc2_local_cac_cfg_kv
, mc2_cac_config_reg
);
550 WREG32_SMC(ixLCAC_MC3_OVR_SEL
, 0);
551 WREG32_SMC(ixLCAC_MC3_OVR_VAL
, 0);
552 kv_program_local_cac_table(adev
, mc3_local_cac_cfg_kv
, mc3_cac_config_reg
);
554 WREG32_SMC(ixLCAC_CPL_OVR_SEL
, 0);
555 WREG32_SMC(ixLCAC_CPL_OVR_VAL
, 0);
556 kv_program_local_cac_table(adev
, cpl_local_cac_cfg_kv
, cpl_cac_config_reg
);
561 static int kv_enable_smc_cac(struct amdgpu_device
*adev
, bool enable
)
563 struct kv_power_info
*pi
= kv_get_pi(adev
);
568 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_EnableCac
);
570 pi
->cac_enabled
= false;
572 pi
->cac_enabled
= true;
573 } else if (pi
->cac_enabled
) {
574 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_DisableCac
);
575 pi
->cac_enabled
= false;
582 static int kv_process_firmware_header(struct amdgpu_device
*adev
)
584 struct kv_power_info
*pi
= kv_get_pi(adev
);
588 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
589 offsetof(SMU7_Firmware_Header
, DpmTable
),
593 pi
->dpm_table_start
= tmp
;
595 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
596 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
600 pi
->soft_regs_start
= tmp
;
605 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device
*adev
)
607 struct kv_power_info
*pi
= kv_get_pi(adev
);
610 pi
->graphics_voltage_change_enable
= 1;
612 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
613 pi
->dpm_table_start
+
614 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
615 &pi
->graphics_voltage_change_enable
,
616 sizeof(u8
), pi
->sram_end
);
621 static int kv_set_dpm_interval(struct amdgpu_device
*adev
)
623 struct kv_power_info
*pi
= kv_get_pi(adev
);
626 pi
->graphics_interval
= 1;
628 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
629 pi
->dpm_table_start
+
630 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
631 &pi
->graphics_interval
,
632 sizeof(u8
), pi
->sram_end
);
637 static int kv_set_dpm_boot_state(struct amdgpu_device
*adev
)
639 struct kv_power_info
*pi
= kv_get_pi(adev
);
642 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
643 pi
->dpm_table_start
+
644 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
645 &pi
->graphics_boot_level
,
646 sizeof(u8
), pi
->sram_end
);
651 static void kv_program_vc(struct amdgpu_device
*adev
)
653 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0x3FFFC100);
656 static void kv_clear_vc(struct amdgpu_device
*adev
)
658 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
661 static int kv_set_divider_value(struct amdgpu_device
*adev
,
664 struct kv_power_info
*pi
= kv_get_pi(adev
);
665 struct atom_clock_dividers dividers
;
668 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
669 sclk
, false, ÷rs
);
673 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
674 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
679 static u16
kv_convert_8bit_index_to_voltage(struct amdgpu_device
*adev
,
682 return 6200 - (voltage
* 25);
685 static u16
kv_convert_2bit_index_to_voltage(struct amdgpu_device
*adev
,
688 struct kv_power_info
*pi
= kv_get_pi(adev
);
689 u32 vid_8bit
= kv_convert_vid2_to_vid7(adev
,
690 &pi
->sys_info
.vid_mapping_table
,
693 return kv_convert_8bit_index_to_voltage(adev
, (u16
)vid_8bit
);
697 static int kv_set_vid(struct amdgpu_device
*adev
, u32 index
, u32 vid
)
699 struct kv_power_info
*pi
= kv_get_pi(adev
);
701 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
702 pi
->graphics_level
[index
].MinVddNb
=
703 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev
, vid
));
708 static int kv_set_at(struct amdgpu_device
*adev
, u32 index
, u32 at
)
710 struct kv_power_info
*pi
= kv_get_pi(adev
);
712 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
717 static void kv_dpm_power_level_enable(struct amdgpu_device
*adev
,
718 u32 index
, bool enable
)
720 struct kv_power_info
*pi
= kv_get_pi(adev
);
722 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
725 static void kv_start_dpm(struct amdgpu_device
*adev
)
727 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
729 tmp
|= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
730 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
732 amdgpu_kv_smc_dpm_enable(adev
, true);
735 static void kv_stop_dpm(struct amdgpu_device
*adev
)
737 amdgpu_kv_smc_dpm_enable(adev
, false);
740 static void kv_start_am(struct amdgpu_device
*adev
)
742 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
744 sclk_pwrmgt_cntl
&= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
745 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
746 sclk_pwrmgt_cntl
|= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
748 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
751 static void kv_reset_am(struct amdgpu_device
*adev
)
753 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
755 sclk_pwrmgt_cntl
|= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
756 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
758 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
761 static int kv_freeze_sclk_dpm(struct amdgpu_device
*adev
, bool freeze
)
763 return amdgpu_kv_notify_message_to_smu(adev
, freeze
?
764 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
767 static int kv_force_lowest_valid(struct amdgpu_device
*adev
)
769 return kv_force_dpm_lowest(adev
);
772 static int kv_unforce_levels(struct amdgpu_device
*adev
)
774 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
775 return amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NoForcedLevel
);
777 return kv_set_enabled_levels(adev
);
780 static int kv_update_sclk_t(struct amdgpu_device
*adev
)
782 struct kv_power_info
*pi
= kv_get_pi(adev
);
783 u32 low_sclk_interrupt_t
= 0;
786 if (pi
->caps_sclk_throttle_low_notification
) {
787 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
789 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
790 pi
->dpm_table_start
+
791 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
792 (u8
*)&low_sclk_interrupt_t
,
793 sizeof(u32
), pi
->sram_end
);
798 static int kv_program_bootup_state(struct amdgpu_device
*adev
)
800 struct kv_power_info
*pi
= kv_get_pi(adev
);
802 struct amdgpu_clock_voltage_dependency_table
*table
=
803 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
805 if (table
&& table
->count
) {
806 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
807 if (table
->entries
[i
].clk
== pi
->boot_pl
.sclk
)
811 pi
->graphics_boot_level
= (u8
)i
;
812 kv_dpm_power_level_enable(adev
, i
, true);
814 struct sumo_sclk_voltage_mapping_table
*table
=
815 &pi
->sys_info
.sclk_voltage_mapping_table
;
817 if (table
->num_max_dpm_entries
== 0)
820 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
821 if (table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
)
825 pi
->graphics_boot_level
= (u8
)i
;
826 kv_dpm_power_level_enable(adev
, i
, true);
831 static int kv_enable_auto_thermal_throttling(struct amdgpu_device
*adev
)
833 struct kv_power_info
*pi
= kv_get_pi(adev
);
836 pi
->graphics_therm_throttle_enable
= 1;
838 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
839 pi
->dpm_table_start
+
840 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
841 &pi
->graphics_therm_throttle_enable
,
842 sizeof(u8
), pi
->sram_end
);
847 static int kv_upload_dpm_settings(struct amdgpu_device
*adev
)
849 struct kv_power_info
*pi
= kv_get_pi(adev
);
852 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
853 pi
->dpm_table_start
+
854 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
855 (u8
*)&pi
->graphics_level
,
856 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
862 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
863 pi
->dpm_table_start
+
864 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
865 &pi
->graphics_dpm_level_count
,
866 sizeof(u8
), pi
->sram_end
);
871 static u32
kv_get_clock_difference(u32 a
, u32 b
)
873 return (a
>= b
) ? a
- b
: b
- a
;
876 static u32
kv_get_clk_bypass(struct amdgpu_device
*adev
, u32 clk
)
878 struct kv_power_info
*pi
= kv_get_pi(adev
);
881 if (pi
->caps_enable_dfs_bypass
) {
882 if (kv_get_clock_difference(clk
, 40000) < 200)
884 else if (kv_get_clock_difference(clk
, 30000) < 200)
886 else if (kv_get_clock_difference(clk
, 20000) < 200)
888 else if (kv_get_clock_difference(clk
, 15000) < 200)
890 else if (kv_get_clock_difference(clk
, 10000) < 200)
901 static int kv_populate_uvd_table(struct amdgpu_device
*adev
)
903 struct kv_power_info
*pi
= kv_get_pi(adev
);
904 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
905 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
906 struct atom_clock_dividers dividers
;
910 if (table
== NULL
|| table
->count
== 0)
913 pi
->uvd_level_count
= 0;
914 for (i
= 0; i
< table
->count
; i
++) {
915 if (pi
->high_voltage_t
&&
916 (pi
->high_voltage_t
< table
->entries
[i
].v
))
919 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
920 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
921 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
923 pi
->uvd_level
[i
].VClkBypassCntl
=
924 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].vclk
);
925 pi
->uvd_level
[i
].DClkBypassCntl
=
926 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].dclk
);
928 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
929 table
->entries
[i
].vclk
, false, ÷rs
);
932 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
934 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
935 table
->entries
[i
].dclk
, false, ÷rs
);
938 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
940 pi
->uvd_level_count
++;
943 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
944 pi
->dpm_table_start
+
945 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
946 (u8
*)&pi
->uvd_level_count
,
947 sizeof(u8
), pi
->sram_end
);
951 pi
->uvd_interval
= 1;
953 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
954 pi
->dpm_table_start
+
955 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
957 sizeof(u8
), pi
->sram_end
);
961 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
962 pi
->dpm_table_start
+
963 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
964 (u8
*)&pi
->uvd_level
,
965 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
972 static int kv_populate_vce_table(struct amdgpu_device
*adev
)
974 struct kv_power_info
*pi
= kv_get_pi(adev
);
977 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
978 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
979 struct atom_clock_dividers dividers
;
981 if (table
== NULL
|| table
->count
== 0)
984 pi
->vce_level_count
= 0;
985 for (i
= 0; i
< table
->count
; i
++) {
986 if (pi
->high_voltage_t
&&
987 pi
->high_voltage_t
< table
->entries
[i
].v
)
990 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
991 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
993 pi
->vce_level
[i
].ClkBypassCntl
=
994 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].evclk
);
996 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
997 table
->entries
[i
].evclk
, false, ÷rs
);
1000 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
1002 pi
->vce_level_count
++;
1005 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1006 pi
->dpm_table_start
+
1007 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
1008 (u8
*)&pi
->vce_level_count
,
1014 pi
->vce_interval
= 1;
1016 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1017 pi
->dpm_table_start
+
1018 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
1019 (u8
*)&pi
->vce_interval
,
1025 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1026 pi
->dpm_table_start
+
1027 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
1028 (u8
*)&pi
->vce_level
,
1029 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
1035 static int kv_populate_samu_table(struct amdgpu_device
*adev
)
1037 struct kv_power_info
*pi
= kv_get_pi(adev
);
1038 struct amdgpu_clock_voltage_dependency_table
*table
=
1039 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1040 struct atom_clock_dividers dividers
;
1044 if (table
== NULL
|| table
->count
== 0)
1047 pi
->samu_level_count
= 0;
1048 for (i
= 0; i
< table
->count
; i
++) {
1049 if (pi
->high_voltage_t
&&
1050 pi
->high_voltage_t
< table
->entries
[i
].v
)
1053 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1054 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1056 pi
->samu_level
[i
].ClkBypassCntl
=
1057 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].clk
);
1059 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1060 table
->entries
[i
].clk
, false, ÷rs
);
1063 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
1065 pi
->samu_level_count
++;
1068 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1069 pi
->dpm_table_start
+
1070 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
1071 (u8
*)&pi
->samu_level_count
,
1077 pi
->samu_interval
= 1;
1079 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1080 pi
->dpm_table_start
+
1081 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
1082 (u8
*)&pi
->samu_interval
,
1088 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1089 pi
->dpm_table_start
+
1090 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
1091 (u8
*)&pi
->samu_level
,
1092 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
1101 static int kv_populate_acp_table(struct amdgpu_device
*adev
)
1103 struct kv_power_info
*pi
= kv_get_pi(adev
);
1104 struct amdgpu_clock_voltage_dependency_table
*table
=
1105 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1106 struct atom_clock_dividers dividers
;
1110 if (table
== NULL
|| table
->count
== 0)
1113 pi
->acp_level_count
= 0;
1114 for (i
= 0; i
< table
->count
; i
++) {
1115 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1116 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1118 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1119 table
->entries
[i
].clk
, false, ÷rs
);
1122 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
1124 pi
->acp_level_count
++;
1127 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1128 pi
->dpm_table_start
+
1129 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
1130 (u8
*)&pi
->acp_level_count
,
1136 pi
->acp_interval
= 1;
1138 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1139 pi
->dpm_table_start
+
1140 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
1141 (u8
*)&pi
->acp_interval
,
1147 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1148 pi
->dpm_table_start
+
1149 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
1150 (u8
*)&pi
->acp_level
,
1151 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
1159 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device
*adev
)
1161 struct kv_power_info
*pi
= kv_get_pi(adev
);
1163 struct amdgpu_clock_voltage_dependency_table
*table
=
1164 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1166 if (table
&& table
->count
) {
1167 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1168 if (pi
->caps_enable_dfs_bypass
) {
1169 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
1170 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1171 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
1172 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1173 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
1174 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1175 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
1176 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1177 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
1178 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1180 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1182 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1186 struct sumo_sclk_voltage_mapping_table
*table
=
1187 &pi
->sys_info
.sclk_voltage_mapping_table
;
1188 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1189 if (pi
->caps_enable_dfs_bypass
) {
1190 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
1191 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1192 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
1193 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1194 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
1195 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1196 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
1197 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1198 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
1199 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1201 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1203 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1209 static int kv_enable_ulv(struct amdgpu_device
*adev
, bool enable
)
1211 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1212 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
1215 static void kv_reset_acp_boot_level(struct amdgpu_device
*adev
)
1217 struct kv_power_info
*pi
= kv_get_pi(adev
);
1219 pi
->acp_boot_level
= 0xff;
1222 static void kv_update_current_ps(struct amdgpu_device
*adev
,
1223 struct amdgpu_ps
*rps
)
1225 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1226 struct kv_power_info
*pi
= kv_get_pi(adev
);
1228 pi
->current_rps
= *rps
;
1229 pi
->current_ps
= *new_ps
;
1230 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
1231 adev
->pm
.dpm
.current_ps
= &pi
->current_rps
;
1234 static void kv_update_requested_ps(struct amdgpu_device
*adev
,
1235 struct amdgpu_ps
*rps
)
1237 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1238 struct kv_power_info
*pi
= kv_get_pi(adev
);
1240 pi
->requested_rps
= *rps
;
1241 pi
->requested_ps
= *new_ps
;
1242 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1243 adev
->pm
.dpm
.requested_ps
= &pi
->requested_rps
;
1246 static void kv_dpm_enable_bapm(void *handle
, bool enable
)
1248 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1249 struct kv_power_info
*pi
= kv_get_pi(adev
);
1252 if (pi
->bapm_enable
) {
1253 ret
= amdgpu_kv_smc_bapm_enable(adev
, enable
);
1255 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1259 static int kv_dpm_enable(struct amdgpu_device
*adev
)
1261 struct kv_power_info
*pi
= kv_get_pi(adev
);
1264 ret
= kv_process_firmware_header(adev
);
1266 DRM_ERROR("kv_process_firmware_header failed\n");
1269 kv_init_fps_limits(adev
);
1270 kv_init_graphics_levels(adev
);
1271 ret
= kv_program_bootup_state(adev
);
1273 DRM_ERROR("kv_program_bootup_state failed\n");
1276 kv_calculate_dfs_bypass_settings(adev
);
1277 ret
= kv_upload_dpm_settings(adev
);
1279 DRM_ERROR("kv_upload_dpm_settings failed\n");
1282 ret
= kv_populate_uvd_table(adev
);
1284 DRM_ERROR("kv_populate_uvd_table failed\n");
1287 ret
= kv_populate_vce_table(adev
);
1289 DRM_ERROR("kv_populate_vce_table failed\n");
1292 ret
= kv_populate_samu_table(adev
);
1294 DRM_ERROR("kv_populate_samu_table failed\n");
1297 ret
= kv_populate_acp_table(adev
);
1299 DRM_ERROR("kv_populate_acp_table failed\n");
1302 kv_program_vc(adev
);
1304 kv_initialize_hardware_cac_manager(adev
);
1307 if (pi
->enable_auto_thermal_throttling
) {
1308 ret
= kv_enable_auto_thermal_throttling(adev
);
1310 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1314 ret
= kv_enable_dpm_voltage_scaling(adev
);
1316 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1319 ret
= kv_set_dpm_interval(adev
);
1321 DRM_ERROR("kv_set_dpm_interval failed\n");
1324 ret
= kv_set_dpm_boot_state(adev
);
1326 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1329 ret
= kv_enable_ulv(adev
, true);
1331 DRM_ERROR("kv_enable_ulv failed\n");
1335 ret
= kv_enable_didt(adev
, true);
1337 DRM_ERROR("kv_enable_didt failed\n");
1340 ret
= kv_enable_smc_cac(adev
, true);
1342 DRM_ERROR("kv_enable_smc_cac failed\n");
1346 kv_reset_acp_boot_level(adev
);
1348 ret
= amdgpu_kv_smc_bapm_enable(adev
, false);
1350 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1354 if (adev
->irq
.installed
&&
1355 amdgpu_is_internal_thermal_sensor(adev
->pm
.int_thermal_type
)) {
1356 ret
= kv_set_thermal_temperature_range(adev
, KV_TEMP_RANGE_MIN
, KV_TEMP_RANGE_MAX
);
1358 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1361 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1362 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1363 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1364 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1370 static void kv_dpm_disable(struct amdgpu_device
*adev
)
1372 struct kv_power_info
*pi
= kv_get_pi(adev
);
1374 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1375 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1376 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1377 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1379 amdgpu_kv_smc_bapm_enable(adev
, false);
1381 if (adev
->asic_type
== CHIP_MULLINS
)
1382 kv_enable_nb_dpm(adev
, false);
1384 /* powerup blocks */
1385 kv_dpm_powergate_acp(adev
, false);
1386 kv_dpm_powergate_samu(adev
, false);
1387 if (pi
->caps_vce_pg
) /* power on the VCE block */
1388 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerON
);
1389 if (pi
->caps_uvd_pg
) /* power on the UVD block */
1390 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerON
);
1392 kv_enable_smc_cac(adev
, false);
1393 kv_enable_didt(adev
, false);
1396 kv_enable_ulv(adev
, false);
1399 kv_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1403 static int kv_write_smc_soft_register(struct amdgpu_device
*adev
,
1404 u16 reg_offset
, u32 value
)
1406 struct kv_power_info
*pi
= kv_get_pi(adev
);
1408 return amdgpu_kv_copy_bytes_to_smc(adev
, pi
->soft_regs_start
+ reg_offset
,
1409 (u8
*)&value
, sizeof(u16
), pi
->sram_end
);
1412 static int kv_read_smc_soft_register(struct amdgpu_device
*adev
,
1413 u16 reg_offset
, u32
*value
)
1415 struct kv_power_info
*pi
= kv_get_pi(adev
);
1417 return amdgpu_kv_read_smc_sram_dword(adev
, pi
->soft_regs_start
+ reg_offset
,
1418 value
, pi
->sram_end
);
1422 static void kv_init_sclk_t(struct amdgpu_device
*adev
)
1424 struct kv_power_info
*pi
= kv_get_pi(adev
);
1426 pi
->low_sclk_interrupt_t
= 0;
1429 static int kv_init_fps_limits(struct amdgpu_device
*adev
)
1431 struct kv_power_info
*pi
= kv_get_pi(adev
);
1438 pi
->fps_high_t
= cpu_to_be16(tmp
);
1439 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1440 pi
->dpm_table_start
+
1441 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1442 (u8
*)&pi
->fps_high_t
,
1443 sizeof(u16
), pi
->sram_end
);
1446 pi
->fps_low_t
= cpu_to_be16(tmp
);
1448 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1449 pi
->dpm_table_start
+
1450 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1451 (u8
*)&pi
->fps_low_t
,
1452 sizeof(u16
), pi
->sram_end
);
1458 static void kv_init_powergate_state(struct amdgpu_device
*adev
)
1460 struct kv_power_info
*pi
= kv_get_pi(adev
);
1462 pi
->uvd_power_gated
= false;
1463 pi
->vce_power_gated
= false;
1464 pi
->samu_power_gated
= false;
1465 pi
->acp_power_gated
= false;
1469 static int kv_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
1471 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1472 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1475 static int kv_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
1477 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1478 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1481 static int kv_enable_samu_dpm(struct amdgpu_device
*adev
, bool enable
)
1483 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1484 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1487 static int kv_enable_acp_dpm(struct amdgpu_device
*adev
, bool enable
)
1489 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1490 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1493 static int kv_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
1495 struct kv_power_info
*pi
= kv_get_pi(adev
);
1496 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
1497 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1503 pi
->uvd_boot_level
= table
->count
- 1;
1505 pi
->uvd_boot_level
= 0;
1507 if (!pi
->caps_uvd_dpm
|| pi
->caps_stable_p_state
) {
1508 mask
= 1 << pi
->uvd_boot_level
;
1513 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1514 pi
->dpm_table_start
+
1515 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1516 (uint8_t *)&pi
->uvd_boot_level
,
1517 sizeof(u8
), pi
->sram_end
);
1521 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1522 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1526 return kv_enable_uvd_dpm(adev
, !gate
);
1529 static u8
kv_get_vce_boot_level(struct amdgpu_device
*adev
, u32 evclk
)
1532 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1533 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1535 for (i
= 0; i
< table
->count
; i
++) {
1536 if (table
->entries
[i
].evclk
>= evclk
)
1543 static int kv_update_vce_dpm(struct amdgpu_device
*adev
,
1544 struct amdgpu_ps
*amdgpu_new_state
,
1545 struct amdgpu_ps
*amdgpu_current_state
)
1547 struct kv_power_info
*pi
= kv_get_pi(adev
);
1548 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1549 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1552 if (amdgpu_new_state
->evclk
> 0 && amdgpu_current_state
->evclk
== 0) {
1553 if (pi
->caps_stable_p_state
)
1554 pi
->vce_boot_level
= table
->count
- 1;
1556 pi
->vce_boot_level
= kv_get_vce_boot_level(adev
, amdgpu_new_state
->evclk
);
1558 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1559 pi
->dpm_table_start
+
1560 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1561 (u8
*)&pi
->vce_boot_level
,
1567 if (pi
->caps_stable_p_state
)
1568 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1569 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1570 (1 << pi
->vce_boot_level
));
1571 kv_enable_vce_dpm(adev
, true);
1572 } else if (amdgpu_new_state
->evclk
== 0 && amdgpu_current_state
->evclk
> 0) {
1573 kv_enable_vce_dpm(adev
, false);
1579 static int kv_update_samu_dpm(struct amdgpu_device
*adev
, bool gate
)
1581 struct kv_power_info
*pi
= kv_get_pi(adev
);
1582 struct amdgpu_clock_voltage_dependency_table
*table
=
1583 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1587 if (pi
->caps_stable_p_state
)
1588 pi
->samu_boot_level
= table
->count
- 1;
1590 pi
->samu_boot_level
= 0;
1592 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1593 pi
->dpm_table_start
+
1594 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1595 (u8
*)&pi
->samu_boot_level
,
1601 if (pi
->caps_stable_p_state
)
1602 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1603 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1604 (1 << pi
->samu_boot_level
));
1607 return kv_enable_samu_dpm(adev
, !gate
);
1610 static u8
kv_get_acp_boot_level(struct amdgpu_device
*adev
)
1613 struct amdgpu_clock_voltage_dependency_table
*table
=
1614 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1616 for (i
= 0; i
< table
->count
; i
++) {
1617 if (table
->entries
[i
].clk
>= 0) /* XXX */
1621 if (i
>= table
->count
)
1622 i
= table
->count
- 1;
1627 static void kv_update_acp_boot_level(struct amdgpu_device
*adev
)
1629 struct kv_power_info
*pi
= kv_get_pi(adev
);
1632 if (!pi
->caps_stable_p_state
) {
1633 acp_boot_level
= kv_get_acp_boot_level(adev
);
1634 if (acp_boot_level
!= pi
->acp_boot_level
) {
1635 pi
->acp_boot_level
= acp_boot_level
;
1636 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1637 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1638 (1 << pi
->acp_boot_level
));
1643 static int kv_update_acp_dpm(struct amdgpu_device
*adev
, bool gate
)
1645 struct kv_power_info
*pi
= kv_get_pi(adev
);
1646 struct amdgpu_clock_voltage_dependency_table
*table
=
1647 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1651 if (pi
->caps_stable_p_state
)
1652 pi
->acp_boot_level
= table
->count
- 1;
1654 pi
->acp_boot_level
= kv_get_acp_boot_level(adev
);
1656 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1657 pi
->dpm_table_start
+
1658 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1659 (u8
*)&pi
->acp_boot_level
,
1665 if (pi
->caps_stable_p_state
)
1666 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1667 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1668 (1 << pi
->acp_boot_level
));
1671 return kv_enable_acp_dpm(adev
, !gate
);
1674 static void kv_dpm_powergate_uvd(void *handle
, bool gate
)
1676 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1677 struct kv_power_info
*pi
= kv_get_pi(adev
);
1680 pi
->uvd_power_gated
= gate
;
1683 /* stop the UVD block */
1684 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1686 kv_update_uvd_dpm(adev
, gate
);
1687 if (pi
->caps_uvd_pg
)
1688 /* power off the UVD block */
1689 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerOFF
);
1691 if (pi
->caps_uvd_pg
)
1692 /* power on the UVD block */
1693 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerON
);
1694 /* re-init the UVD block */
1695 kv_update_uvd_dpm(adev
, gate
);
1697 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1698 AMD_PG_STATE_UNGATE
);
1702 static void kv_dpm_powergate_vce(void *handle
, bool gate
)
1704 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1705 struct kv_power_info
*pi
= kv_get_pi(adev
);
1708 pi
->vce_power_gated
= gate
;
1711 /* stop the VCE block */
1712 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1714 kv_enable_vce_dpm(adev
, false);
1715 if (pi
->caps_vce_pg
) /* power off the VCE block */
1716 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerOFF
);
1718 if (pi
->caps_vce_pg
) /* power on the VCE block */
1719 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerON
);
1720 kv_enable_vce_dpm(adev
, true);
1721 /* re-init the VCE block */
1722 ret
= amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1723 AMD_PG_STATE_UNGATE
);
1728 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
)
1730 struct kv_power_info
*pi
= kv_get_pi(adev
);
1732 if (pi
->samu_power_gated
== gate
)
1735 pi
->samu_power_gated
= gate
;
1738 kv_update_samu_dpm(adev
, true);
1739 if (pi
->caps_samu_pg
)
1740 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerOFF
);
1742 if (pi
->caps_samu_pg
)
1743 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerON
);
1744 kv_update_samu_dpm(adev
, false);
1748 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
)
1750 struct kv_power_info
*pi
= kv_get_pi(adev
);
1752 if (pi
->acp_power_gated
== gate
)
1755 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
1758 pi
->acp_power_gated
= gate
;
1761 kv_update_acp_dpm(adev
, true);
1762 if (pi
->caps_acp_pg
)
1763 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerOFF
);
1765 if (pi
->caps_acp_pg
)
1766 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerON
);
1767 kv_update_acp_dpm(adev
, false);
1771 static void kv_set_valid_clock_range(struct amdgpu_device
*adev
,
1772 struct amdgpu_ps
*new_rps
)
1774 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1775 struct kv_power_info
*pi
= kv_get_pi(adev
);
1777 struct amdgpu_clock_voltage_dependency_table
*table
=
1778 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1780 if (table
&& table
->count
) {
1781 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1782 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1783 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1784 pi
->lowest_valid
= i
;
1789 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1790 if (table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1793 pi
->highest_valid
= i
;
1795 if (pi
->lowest_valid
> pi
->highest_valid
) {
1796 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1797 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1798 pi
->highest_valid
= pi
->lowest_valid
;
1800 pi
->lowest_valid
= pi
->highest_valid
;
1803 struct sumo_sclk_voltage_mapping_table
*table
=
1804 &pi
->sys_info
.sclk_voltage_mapping_table
;
1806 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1807 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1808 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1809 pi
->lowest_valid
= i
;
1814 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1815 if (table
->entries
[i
].sclk_frequency
<=
1816 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1819 pi
->highest_valid
= i
;
1821 if (pi
->lowest_valid
> pi
->highest_valid
) {
1822 if ((new_ps
->levels
[0].sclk
-
1823 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1824 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1825 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1826 pi
->highest_valid
= pi
->lowest_valid
;
1828 pi
->lowest_valid
= pi
->highest_valid
;
1833 static int kv_update_dfs_bypass_settings(struct amdgpu_device
*adev
,
1834 struct amdgpu_ps
*new_rps
)
1836 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1837 struct kv_power_info
*pi
= kv_get_pi(adev
);
1841 if (pi
->caps_enable_dfs_bypass
) {
1842 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1843 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1844 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1845 (pi
->dpm_table_start
+
1846 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1847 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1848 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1850 sizeof(u8
), pi
->sram_end
);
1856 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
1859 struct kv_power_info
*pi
= kv_get_pi(adev
);
1863 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1864 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Enable
);
1866 pi
->nb_dpm_enabled
= true;
1869 if (pi
->enable_nb_dpm
&& pi
->nb_dpm_enabled
) {
1870 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Disable
);
1872 pi
->nb_dpm_enabled
= false;
1879 static int kv_dpm_force_performance_level(void *handle
,
1880 enum amd_dpm_forced_level level
)
1883 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1885 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
1886 ret
= kv_force_dpm_highest(adev
);
1889 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
1890 ret
= kv_force_dpm_lowest(adev
);
1893 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
1894 ret
= kv_unforce_levels(adev
);
1899 adev
->pm
.dpm
.forced_level
= level
;
1904 static int kv_dpm_pre_set_power_state(void *handle
)
1906 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1907 struct kv_power_info
*pi
= kv_get_pi(adev
);
1908 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
1909 struct amdgpu_ps
*new_ps
= &requested_ps
;
1911 kv_update_requested_ps(adev
, new_ps
);
1913 kv_apply_state_adjust_rules(adev
,
1920 static int kv_dpm_set_power_state(void *handle
)
1922 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1923 struct kv_power_info
*pi
= kv_get_pi(adev
);
1924 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
1925 struct amdgpu_ps
*old_ps
= &pi
->current_rps
;
1928 if (pi
->bapm_enable
) {
1929 ret
= amdgpu_kv_smc_bapm_enable(adev
, adev
->pm
.ac_power
);
1931 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1936 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
1937 if (pi
->enable_dpm
) {
1938 kv_set_valid_clock_range(adev
, new_ps
);
1939 kv_update_dfs_bypass_settings(adev
, new_ps
);
1940 ret
= kv_calculate_ds_divider(adev
);
1942 DRM_ERROR("kv_calculate_ds_divider failed\n");
1945 kv_calculate_nbps_level_settings(adev
);
1946 kv_calculate_dpm_settings(adev
);
1947 kv_force_lowest_valid(adev
);
1948 kv_enable_new_levels(adev
);
1949 kv_upload_dpm_settings(adev
);
1950 kv_program_nbps_index_settings(adev
, new_ps
);
1951 kv_unforce_levels(adev
);
1952 kv_set_enabled_levels(adev
);
1953 kv_force_lowest_valid(adev
);
1954 kv_unforce_levels(adev
);
1956 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1958 DRM_ERROR("kv_update_vce_dpm failed\n");
1961 kv_update_sclk_t(adev
);
1962 if (adev
->asic_type
== CHIP_MULLINS
)
1963 kv_enable_nb_dpm(adev
, true);
1966 if (pi
->enable_dpm
) {
1967 kv_set_valid_clock_range(adev
, new_ps
);
1968 kv_update_dfs_bypass_settings(adev
, new_ps
);
1969 ret
= kv_calculate_ds_divider(adev
);
1971 DRM_ERROR("kv_calculate_ds_divider failed\n");
1974 kv_calculate_nbps_level_settings(adev
);
1975 kv_calculate_dpm_settings(adev
);
1976 kv_freeze_sclk_dpm(adev
, true);
1977 kv_upload_dpm_settings(adev
);
1978 kv_program_nbps_index_settings(adev
, new_ps
);
1979 kv_freeze_sclk_dpm(adev
, false);
1980 kv_set_enabled_levels(adev
);
1981 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1983 DRM_ERROR("kv_update_vce_dpm failed\n");
1986 kv_update_acp_boot_level(adev
);
1987 kv_update_sclk_t(adev
);
1988 kv_enable_nb_dpm(adev
, true);
1995 static void kv_dpm_post_set_power_state(void *handle
)
1997 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1998 struct kv_power_info
*pi
= kv_get_pi(adev
);
1999 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
2001 kv_update_current_ps(adev
, new_ps
);
2004 static void kv_dpm_setup_asic(struct amdgpu_device
*adev
)
2006 sumo_take_smu_control(adev
, true);
2007 kv_init_powergate_state(adev
);
2008 kv_init_sclk_t(adev
);
2012 static void kv_dpm_reset_asic(struct amdgpu_device
*adev
)
2014 struct kv_power_info
*pi
= kv_get_pi(adev
);
2016 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2017 kv_force_lowest_valid(adev
);
2018 kv_init_graphics_levels(adev
);
2019 kv_program_bootup_state(adev
);
2020 kv_upload_dpm_settings(adev
);
2021 kv_force_lowest_valid(adev
);
2022 kv_unforce_levels(adev
);
2024 kv_init_graphics_levels(adev
);
2025 kv_program_bootup_state(adev
);
2026 kv_freeze_sclk_dpm(adev
, true);
2027 kv_upload_dpm_settings(adev
);
2028 kv_freeze_sclk_dpm(adev
, false);
2029 kv_set_enabled_level(adev
, pi
->graphics_boot_level
);
2034 static void kv_construct_max_power_limits_table(struct amdgpu_device
*adev
,
2035 struct amdgpu_clock_and_voltage_limits
*table
)
2037 struct kv_power_info
*pi
= kv_get_pi(adev
);
2039 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
2040 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
2042 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
2044 kv_convert_2bit_index_to_voltage(adev
,
2045 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
2048 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
2051 static void kv_patch_voltage_values(struct amdgpu_device
*adev
)
2054 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
2055 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
2056 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
2057 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
2058 struct amdgpu_clock_voltage_dependency_table
*samu_table
=
2059 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
2060 struct amdgpu_clock_voltage_dependency_table
*acp_table
=
2061 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
2063 if (uvd_table
->count
) {
2064 for (i
= 0; i
< uvd_table
->count
; i
++)
2065 uvd_table
->entries
[i
].v
=
2066 kv_convert_8bit_index_to_voltage(adev
,
2067 uvd_table
->entries
[i
].v
);
2070 if (vce_table
->count
) {
2071 for (i
= 0; i
< vce_table
->count
; i
++)
2072 vce_table
->entries
[i
].v
=
2073 kv_convert_8bit_index_to_voltage(adev
,
2074 vce_table
->entries
[i
].v
);
2077 if (samu_table
->count
) {
2078 for (i
= 0; i
< samu_table
->count
; i
++)
2079 samu_table
->entries
[i
].v
=
2080 kv_convert_8bit_index_to_voltage(adev
,
2081 samu_table
->entries
[i
].v
);
2084 if (acp_table
->count
) {
2085 for (i
= 0; i
< acp_table
->count
; i
++)
2086 acp_table
->entries
[i
].v
=
2087 kv_convert_8bit_index_to_voltage(adev
,
2088 acp_table
->entries
[i
].v
);
2093 static void kv_construct_boot_state(struct amdgpu_device
*adev
)
2095 struct kv_power_info
*pi
= kv_get_pi(adev
);
2097 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
2098 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
2099 pi
->boot_pl
.ds_divider_index
= 0;
2100 pi
->boot_pl
.ss_divider_index
= 0;
2101 pi
->boot_pl
.allow_gnb_slow
= 1;
2102 pi
->boot_pl
.force_nbp_state
= 0;
2103 pi
->boot_pl
.display_wm
= 0;
2104 pi
->boot_pl
.vce_wm
= 0;
2107 static int kv_force_dpm_highest(struct amdgpu_device
*adev
)
2112 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2116 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
> 0; i
--) {
2117 if (enable_mask
& (1 << i
))
2121 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2122 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2124 return kv_set_enabled_level(adev
, i
);
2127 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
)
2132 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2136 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2137 if (enable_mask
& (1 << i
))
2141 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2142 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2144 return kv_set_enabled_level(adev
, i
);
2147 static u8
kv_get_sleep_divider_id_from_clock(struct amdgpu_device
*adev
,
2148 u32 sclk
, u32 min_sclk_in_sr
)
2150 struct kv_power_info
*pi
= kv_get_pi(adev
);
2153 u32 min
= max(min_sclk_in_sr
, (u32
)KV_MINIMUM_ENGINE_CLOCK
);
2158 if (!pi
->caps_sclk_ds
)
2161 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
> 0; i
--) {
2170 static int kv_get_high_voltage_limit(struct amdgpu_device
*adev
, int *limit
)
2172 struct kv_power_info
*pi
= kv_get_pi(adev
);
2173 struct amdgpu_clock_voltage_dependency_table
*table
=
2174 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2177 if (table
&& table
->count
) {
2178 for (i
= table
->count
- 1; i
>= 0; i
--) {
2179 if (pi
->high_voltage_t
&&
2180 (kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
) <=
2181 pi
->high_voltage_t
)) {
2187 struct sumo_sclk_voltage_mapping_table
*table
=
2188 &pi
->sys_info
.sclk_voltage_mapping_table
;
2190 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
2191 if (pi
->high_voltage_t
&&
2192 (kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
) <=
2193 pi
->high_voltage_t
)) {
2204 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
2205 struct amdgpu_ps
*new_rps
,
2206 struct amdgpu_ps
*old_rps
)
2208 struct kv_ps
*ps
= kv_get_ps(new_rps
);
2209 struct kv_power_info
*pi
= kv_get_pi(adev
);
2210 u32 min_sclk
= 10000; /* ??? */
2214 struct amdgpu_clock_voltage_dependency_table
*table
=
2215 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2216 u32 stable_p_state_sclk
= 0;
2217 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2218 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2220 if (new_rps
->vce_active
) {
2221 new_rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
2222 new_rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
2228 mclk
= max_limits
->mclk
;
2231 if (pi
->caps_stable_p_state
) {
2232 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
2234 for (i
= table
->count
- 1; i
>= 0; i
--) {
2235 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
2236 stable_p_state_sclk
= table
->entries
[i
].clk
;
2242 stable_p_state_sclk
= table
->entries
[0].clk
;
2244 sclk
= stable_p_state_sclk
;
2247 if (new_rps
->vce_active
) {
2248 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
2249 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
2252 ps
->need_dfs_bypass
= true;
2254 for (i
= 0; i
< ps
->num_levels
; i
++) {
2255 if (ps
->levels
[i
].sclk
< sclk
)
2256 ps
->levels
[i
].sclk
= sclk
;
2259 if (table
&& table
->count
) {
2260 for (i
= 0; i
< ps
->num_levels
; i
++) {
2261 if (pi
->high_voltage_t
&&
2262 (pi
->high_voltage_t
<
2263 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2264 kv_get_high_voltage_limit(adev
, &limit
);
2265 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2269 struct sumo_sclk_voltage_mapping_table
*table
=
2270 &pi
->sys_info
.sclk_voltage_mapping_table
;
2272 for (i
= 0; i
< ps
->num_levels
; i
++) {
2273 if (pi
->high_voltage_t
&&
2274 (pi
->high_voltage_t
<
2275 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2276 kv_get_high_voltage_limit(adev
, &limit
);
2277 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2282 if (pi
->caps_stable_p_state
) {
2283 for (i
= 0; i
< ps
->num_levels
; i
++) {
2284 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2288 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
2289 new_rps
->evclk
|| new_rps
->ecclk
;
2291 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2292 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2293 pi
->battery_state
= true;
2295 pi
->battery_state
= false;
2297 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2298 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2299 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2300 ps
->dpmx_nb_ps_lo
= 0x1;
2301 ps
->dpmx_nb_ps_hi
= 0x0;
2303 ps
->dpm0_pg_nb_ps_lo
= 0x3;
2304 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2305 ps
->dpmx_nb_ps_lo
= 0x3;
2306 ps
->dpmx_nb_ps_hi
= 0x0;
2308 if (pi
->sys_info
.nb_dpm_enable
) {
2309 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2310 pi
->video_start
|| (adev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2311 pi
->disable_nb_ps3_in_battery
;
2312 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2313 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2314 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2315 ps
->dpmx_nb_ps_hi
= 0x2;
2320 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device
*adev
,
2321 u32 index
, bool enable
)
2323 struct kv_power_info
*pi
= kv_get_pi(adev
);
2325 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2328 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
)
2330 struct kv_power_info
*pi
= kv_get_pi(adev
);
2331 u32 sclk_in_sr
= 10000; /* ??? */
2334 if (pi
->lowest_valid
> pi
->highest_valid
)
2337 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2338 pi
->graphics_level
[i
].DeepSleepDivId
=
2339 kv_get_sleep_divider_id_from_clock(adev
,
2340 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2346 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
)
2348 struct kv_power_info
*pi
= kv_get_pi(adev
);
2351 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2352 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2353 u32 mclk
= max_limits
->mclk
;
2355 if (pi
->lowest_valid
> pi
->highest_valid
)
2358 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2359 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2360 pi
->graphics_level
[i
].GnbSlow
= 1;
2361 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2362 pi
->graphics_level
[i
].UpH
= 0;
2365 if (!pi
->sys_info
.nb_dpm_enable
)
2368 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2369 (adev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2372 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2373 pi
->graphics_level
[i
].GnbSlow
= 0;
2375 if (pi
->battery_state
)
2376 pi
->graphics_level
[0].ForceNbPs1
= 1;
2378 pi
->graphics_level
[1].GnbSlow
= 0;
2379 pi
->graphics_level
[2].GnbSlow
= 0;
2380 pi
->graphics_level
[3].GnbSlow
= 0;
2381 pi
->graphics_level
[4].GnbSlow
= 0;
2384 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2385 pi
->graphics_level
[i
].GnbSlow
= 1;
2386 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2387 pi
->graphics_level
[i
].UpH
= 0;
2390 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2391 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2392 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2393 if (pi
->lowest_valid
!= pi
->highest_valid
)
2394 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2400 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
)
2402 struct kv_power_info
*pi
= kv_get_pi(adev
);
2405 if (pi
->lowest_valid
> pi
->highest_valid
)
2408 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2409 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2414 static void kv_init_graphics_levels(struct amdgpu_device
*adev
)
2416 struct kv_power_info
*pi
= kv_get_pi(adev
);
2418 struct amdgpu_clock_voltage_dependency_table
*table
=
2419 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2421 if (table
&& table
->count
) {
2424 pi
->graphics_dpm_level_count
= 0;
2425 for (i
= 0; i
< table
->count
; i
++) {
2426 if (pi
->high_voltage_t
&&
2427 (pi
->high_voltage_t
<
2428 kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
)))
2431 kv_set_divider_value(adev
, i
, table
->entries
[i
].clk
);
2432 vid_2bit
= kv_convert_vid7_to_vid2(adev
,
2433 &pi
->sys_info
.vid_mapping_table
,
2434 table
->entries
[i
].v
);
2435 kv_set_vid(adev
, i
, vid_2bit
);
2436 kv_set_at(adev
, i
, pi
->at
[i
]);
2437 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2438 pi
->graphics_dpm_level_count
++;
2441 struct sumo_sclk_voltage_mapping_table
*table
=
2442 &pi
->sys_info
.sclk_voltage_mapping_table
;
2444 pi
->graphics_dpm_level_count
= 0;
2445 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2446 if (pi
->high_voltage_t
&&
2447 pi
->high_voltage_t
<
2448 kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
))
2451 kv_set_divider_value(adev
, i
, table
->entries
[i
].sclk_frequency
);
2452 kv_set_vid(adev
, i
, table
->entries
[i
].vid_2bit
);
2453 kv_set_at(adev
, i
, pi
->at
[i
]);
2454 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2455 pi
->graphics_dpm_level_count
++;
2459 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2460 kv_dpm_power_level_enable(adev
, i
, false);
2463 static void kv_enable_new_levels(struct amdgpu_device
*adev
)
2465 struct kv_power_info
*pi
= kv_get_pi(adev
);
2468 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2469 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2470 kv_dpm_power_level_enable(adev
, i
, true);
2474 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
)
2476 u32 new_mask
= (1 << level
);
2478 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2479 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2483 static int kv_set_enabled_levels(struct amdgpu_device
*adev
)
2485 struct kv_power_info
*pi
= kv_get_pi(adev
);
2486 u32 i
, new_mask
= 0;
2488 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2489 new_mask
|= (1 << i
);
2491 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2492 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2496 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
2497 struct amdgpu_ps
*new_rps
)
2499 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2500 struct kv_power_info
*pi
= kv_get_pi(adev
);
2503 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2506 if (pi
->sys_info
.nb_dpm_enable
) {
2507 nbdpmconfig1
= RREG32_SMC(ixNB_DPM_CONFIG_1
);
2508 nbdpmconfig1
&= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK
|
2509 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK
|
2510 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK
|
2511 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK
);
2512 nbdpmconfig1
|= (new_ps
->dpm0_pg_nb_ps_lo
<< NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT
) |
2513 (new_ps
->dpm0_pg_nb_ps_hi
<< NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT
) |
2514 (new_ps
->dpmx_nb_ps_lo
<< NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT
) |
2515 (new_ps
->dpmx_nb_ps_hi
<< NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT
);
2516 WREG32_SMC(ixNB_DPM_CONFIG_1
, nbdpmconfig1
);
2520 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
2521 int min_temp
, int max_temp
)
2523 int low_temp
= 0 * 1000;
2524 int high_temp
= 255 * 1000;
2527 if (low_temp
< min_temp
)
2528 low_temp
= min_temp
;
2529 if (high_temp
> max_temp
)
2530 high_temp
= max_temp
;
2531 if (high_temp
< low_temp
) {
2532 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2536 tmp
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
2537 tmp
&= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK
|
2538 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK
);
2539 tmp
|= ((49 + (high_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT
) |
2540 ((49 + (low_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT
);
2541 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, tmp
);
2543 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2544 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2550 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2551 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2552 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2553 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2554 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2555 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2558 static int kv_parse_sys_info_table(struct amdgpu_device
*adev
)
2560 struct kv_power_info
*pi
= kv_get_pi(adev
);
2561 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2562 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2563 union igp_info
*igp_info
;
2568 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2569 &frev
, &crev
, &data_offset
)) {
2570 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2574 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2577 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2578 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2579 pi
->sys_info
.bootup_nb_voltage_index
=
2580 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2581 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2582 pi
->sys_info
.htc_tmp_lmt
= 203;
2584 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2585 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2586 pi
->sys_info
.htc_hyst_lmt
= 5;
2588 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2589 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2590 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2593 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2594 pi
->sys_info
.nb_dpm_enable
= true;
2596 pi
->sys_info
.nb_dpm_enable
= false;
2598 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2599 pi
->sys_info
.nbp_memory_clock
[i
] =
2600 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2601 pi
->sys_info
.nbp_n_clock
[i
] =
2602 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2604 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2605 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2606 pi
->caps_enable_dfs_bypass
= true;
2608 sumo_construct_sclk_voltage_mapping_table(adev
,
2609 &pi
->sys_info
.sclk_voltage_mapping_table
,
2610 igp_info
->info_8
.sAvail_SCLK
);
2612 sumo_construct_vid_mapping_table(adev
,
2613 &pi
->sys_info
.vid_mapping_table
,
2614 igp_info
->info_8
.sAvail_SCLK
);
2616 kv_construct_max_power_limits_table(adev
,
2617 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2623 struct _ATOM_POWERPLAY_INFO info
;
2624 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2625 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2626 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2627 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2628 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2631 union pplib_clock_info
{
2632 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2633 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2634 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2635 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2638 union pplib_power_state
{
2639 struct _ATOM_PPLIB_STATE v1
;
2640 struct _ATOM_PPLIB_STATE_V2 v2
;
2643 static void kv_patch_boot_state(struct amdgpu_device
*adev
,
2646 struct kv_power_info
*pi
= kv_get_pi(adev
);
2649 ps
->levels
[0] = pi
->boot_pl
;
2652 static void kv_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
2653 struct amdgpu_ps
*rps
,
2654 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2657 struct kv_ps
*ps
= kv_get_ps(rps
);
2659 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2660 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2661 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2663 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2664 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2665 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2671 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2672 adev
->pm
.dpm
.boot_ps
= rps
;
2673 kv_patch_boot_state(adev
, ps
);
2675 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2676 adev
->pm
.dpm
.uvd_ps
= rps
;
2679 static void kv_parse_pplib_clock_info(struct amdgpu_device
*adev
,
2680 struct amdgpu_ps
*rps
, int index
,
2681 union pplib_clock_info
*clock_info
)
2683 struct kv_power_info
*pi
= kv_get_pi(adev
);
2684 struct kv_ps
*ps
= kv_get_ps(rps
);
2685 struct kv_pl
*pl
= &ps
->levels
[index
];
2688 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2689 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2691 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2693 ps
->num_levels
= index
+ 1;
2695 if (pi
->caps_sclk_ds
) {
2696 pl
->ds_divider_index
= 5;
2697 pl
->ss_divider_index
= 5;
2701 static int kv_parse_power_table(struct amdgpu_device
*adev
)
2703 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2704 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2705 union pplib_power_state
*power_state
;
2706 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2707 union pplib_clock_info
*clock_info
;
2708 struct _StateArray
*state_array
;
2709 struct _ClockInfoArray
*clock_info_array
;
2710 struct _NonClockInfoArray
*non_clock_info_array
;
2711 union power_info
*power_info
;
2712 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2715 u8
*power_state_offset
;
2718 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2719 &frev
, &crev
, &data_offset
))
2721 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2723 amdgpu_add_thermal_controller(adev
);
2725 state_array
= (struct _StateArray
*)
2726 (mode_info
->atom_context
->bios
+ data_offset
+
2727 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2728 clock_info_array
= (struct _ClockInfoArray
*)
2729 (mode_info
->atom_context
->bios
+ data_offset
+
2730 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2731 non_clock_info_array
= (struct _NonClockInfoArray
*)
2732 (mode_info
->atom_context
->bios
+ data_offset
+
2733 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2735 adev
->pm
.dpm
.ps
= kcalloc(state_array
->ucNumEntries
,
2736 sizeof(struct amdgpu_ps
),
2738 if (!adev
->pm
.dpm
.ps
)
2740 power_state_offset
= (u8
*)state_array
->states
;
2741 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2743 power_state
= (union pplib_power_state
*)power_state_offset
;
2744 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2745 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2746 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2747 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2749 kfree(adev
->pm
.dpm
.ps
);
2752 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2754 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2755 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2756 clock_array_index
= idx
[j
];
2757 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2759 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2761 clock_info
= (union pplib_clock_info
*)
2762 ((u8
*)&clock_info_array
->clockInfo
[0] +
2763 (clock_array_index
* clock_info_array
->ucEntrySize
));
2764 kv_parse_pplib_clock_info(adev
,
2765 &adev
->pm
.dpm
.ps
[i
], k
,
2769 kv_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
2771 non_clock_info_array
->ucEntrySize
);
2772 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2774 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2776 /* fill in the vce power states */
2777 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
2779 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
2780 clock_info
= (union pplib_clock_info
*)
2781 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
2782 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2783 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2784 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
2785 adev
->pm
.dpm
.vce_states
[i
].mclk
= 0;
2791 static int kv_dpm_init(struct amdgpu_device
*adev
)
2793 struct kv_power_info
*pi
;
2796 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2799 adev
->pm
.dpm
.priv
= pi
;
2801 ret
= amdgpu_get_platform_caps(adev
);
2805 ret
= amdgpu_parse_extended_power_table(adev
);
2809 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2810 pi
->at
[i
] = TRINITY_AT_DFLT
;
2812 pi
->sram_end
= SMC_RAM_END
;
2814 pi
->enable_nb_dpm
= true;
2816 pi
->caps_power_containment
= true;
2817 pi
->caps_cac
= true;
2818 pi
->enable_didt
= false;
2819 if (pi
->enable_didt
) {
2820 pi
->caps_sq_ramping
= true;
2821 pi
->caps_db_ramping
= true;
2822 pi
->caps_td_ramping
= true;
2823 pi
->caps_tcp_ramping
= true;
2826 if (adev
->pm
.pp_feature
& PP_SCLK_DEEP_SLEEP_MASK
)
2827 pi
->caps_sclk_ds
= true;
2829 pi
->caps_sclk_ds
= false;
2831 pi
->enable_auto_thermal_throttling
= true;
2832 pi
->disable_nb_ps3_in_battery
= false;
2833 if (amdgpu_bapm
== 0)
2834 pi
->bapm_enable
= false;
2836 pi
->bapm_enable
= true;
2837 pi
->voltage_drop_t
= 0;
2838 pi
->caps_sclk_throttle_low_notification
= false;
2839 pi
->caps_fps
= false; /* true? */
2840 pi
->caps_uvd_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
) ? true : false;
2841 pi
->caps_uvd_dpm
= true;
2842 pi
->caps_vce_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
) ? true : false;
2843 pi
->caps_samu_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_SAMU
) ? true : false;
2844 pi
->caps_acp_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_ACP
) ? true : false;
2845 pi
->caps_stable_p_state
= false;
2847 ret
= kv_parse_sys_info_table(adev
);
2851 kv_patch_voltage_values(adev
);
2852 kv_construct_boot_state(adev
);
2854 ret
= kv_parse_power_table(adev
);
2858 pi
->enable_dpm
= true;
2864 kv_dpm_debugfs_print_current_performance_level(void *handle
,
2867 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2868 struct kv_power_info
*pi
= kv_get_pi(adev
);
2870 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
2871 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
2872 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
2876 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2877 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2879 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2880 tmp
= (RREG32_SMC(ixSMU_VOLTAGE_STATUS
) &
2881 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2882 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT
;
2883 vddc
= kv_convert_8bit_index_to_voltage(adev
, (u16
)tmp
);
2884 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
2885 seq_printf(m
, "vce %sabled\n", pi
->vce_power_gated
? "dis" : "en");
2886 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2887 current_index
, sclk
, vddc
);
2892 kv_dpm_print_power_state(void *handle
, void *request_ps
)
2895 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
2896 struct kv_ps
*ps
= kv_get_ps(rps
);
2897 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2899 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
2900 amdgpu_dpm_print_cap_info(rps
->caps
);
2901 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2902 for (i
= 0; i
< ps
->num_levels
; i
++) {
2903 struct kv_pl
*pl
= &ps
->levels
[i
];
2904 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2906 kv_convert_8bit_index_to_voltage(adev
, pl
->vddc_index
));
2908 amdgpu_dpm_print_ps_status(adev
, rps
);
2911 static void kv_dpm_fini(struct amdgpu_device
*adev
)
2915 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
2916 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
2918 kfree(adev
->pm
.dpm
.ps
);
2919 kfree(adev
->pm
.dpm
.priv
);
2920 amdgpu_free_extended_power_table(adev
);
2923 static void kv_dpm_display_configuration_changed(void *handle
)
2928 static u32
kv_dpm_get_sclk(void *handle
, bool low
)
2930 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2931 struct kv_power_info
*pi
= kv_get_pi(adev
);
2932 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2935 return requested_state
->levels
[0].sclk
;
2937 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2940 static u32
kv_dpm_get_mclk(void *handle
, bool low
)
2942 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2943 struct kv_power_info
*pi
= kv_get_pi(adev
);
2945 return pi
->sys_info
.bootup_uma_clk
;
2948 /* get temperature in millidegrees */
2949 static int kv_dpm_get_temp(void *handle
)
2952 int actual_temp
= 0;
2953 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2955 temp
= RREG32_SMC(0xC0300E0C);
2958 actual_temp
= (temp
/ 8) - 49;
2962 actual_temp
= actual_temp
* 1000;
2967 static int kv_dpm_early_init(void *handle
)
2969 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2971 adev
->powerplay
.pp_funcs
= &kv_dpm_funcs
;
2972 adev
->powerplay
.pp_handle
= adev
;
2973 kv_dpm_set_irq_funcs(adev
);
2978 static int kv_dpm_late_init(void *handle
)
2980 /* powerdown unused blocks for now */
2981 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2983 if (!adev
->pm
.dpm_enabled
)
2986 kv_dpm_powergate_acp(adev
, true);
2987 kv_dpm_powergate_samu(adev
, true);
2992 static int kv_dpm_sw_init(void *handle
)
2995 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2997 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 230,
2998 &adev
->pm
.dpm
.thermal
.irq
);
3002 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 231,
3003 &adev
->pm
.dpm
.thermal
.irq
);
3007 /* default to balanced state */
3008 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
3009 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
3010 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
3011 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
3012 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
3013 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
3014 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
3015 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
3017 if (amdgpu_dpm
== 0)
3020 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
3021 mutex_lock(&adev
->pm
.mutex
);
3022 ret
= kv_dpm_init(adev
);
3025 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3026 if (amdgpu_dpm
== 1)
3027 amdgpu_pm_print_power_states(adev
);
3028 mutex_unlock(&adev
->pm
.mutex
);
3029 DRM_INFO("amdgpu: dpm initialized\n");
3035 mutex_unlock(&adev
->pm
.mutex
);
3036 DRM_ERROR("amdgpu: dpm initialization failed\n");
3040 static int kv_dpm_sw_fini(void *handle
)
3042 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3044 flush_work(&adev
->pm
.dpm
.thermal
.work
);
3046 mutex_lock(&adev
->pm
.mutex
);
3048 mutex_unlock(&adev
->pm
.mutex
);
3053 static int kv_dpm_hw_init(void *handle
)
3056 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3061 mutex_lock(&adev
->pm
.mutex
);
3062 kv_dpm_setup_asic(adev
);
3063 ret
= kv_dpm_enable(adev
);
3065 adev
->pm
.dpm_enabled
= false;
3067 adev
->pm
.dpm_enabled
= true;
3068 mutex_unlock(&adev
->pm
.mutex
);
3069 amdgpu_pm_compute_clocks(adev
);
3073 static int kv_dpm_hw_fini(void *handle
)
3075 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3077 if (adev
->pm
.dpm_enabled
) {
3078 mutex_lock(&adev
->pm
.mutex
);
3079 kv_dpm_disable(adev
);
3080 mutex_unlock(&adev
->pm
.mutex
);
3086 static int kv_dpm_suspend(void *handle
)
3088 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3090 if (adev
->pm
.dpm_enabled
) {
3091 mutex_lock(&adev
->pm
.mutex
);
3093 kv_dpm_disable(adev
);
3094 /* reset the power state */
3095 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3096 mutex_unlock(&adev
->pm
.mutex
);
3101 static int kv_dpm_resume(void *handle
)
3104 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3106 if (adev
->pm
.dpm_enabled
) {
3107 /* asic init will reset to the boot state */
3108 mutex_lock(&adev
->pm
.mutex
);
3109 kv_dpm_setup_asic(adev
);
3110 ret
= kv_dpm_enable(adev
);
3112 adev
->pm
.dpm_enabled
= false;
3114 adev
->pm
.dpm_enabled
= true;
3115 mutex_unlock(&adev
->pm
.mutex
);
3116 if (adev
->pm
.dpm_enabled
)
3117 amdgpu_pm_compute_clocks(adev
);
3122 static bool kv_dpm_is_idle(void *handle
)
3127 static int kv_dpm_wait_for_idle(void *handle
)
3133 static int kv_dpm_soft_reset(void *handle
)
3138 static int kv_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
3139 struct amdgpu_irq_src
*src
,
3141 enum amdgpu_interrupt_state state
)
3146 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
3148 case AMDGPU_IRQ_STATE_DISABLE
:
3149 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3150 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3151 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3153 case AMDGPU_IRQ_STATE_ENABLE
:
3154 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3155 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3156 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3163 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
3165 case AMDGPU_IRQ_STATE_DISABLE
:
3166 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3167 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3168 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3170 case AMDGPU_IRQ_STATE_ENABLE
:
3171 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3172 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3173 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3186 static int kv_dpm_process_interrupt(struct amdgpu_device
*adev
,
3187 struct amdgpu_irq_src
*source
,
3188 struct amdgpu_iv_entry
*entry
)
3190 bool queue_thermal
= false;
3195 switch (entry
->src_id
) {
3196 case 230: /* thermal low to high */
3197 DRM_DEBUG("IH: thermal low to high\n");
3198 adev
->pm
.dpm
.thermal
.high_to_low
= false;
3199 queue_thermal
= true;
3201 case 231: /* thermal high to low */
3202 DRM_DEBUG("IH: thermal high to low\n");
3203 adev
->pm
.dpm
.thermal
.high_to_low
= true;
3204 queue_thermal
= true;
3211 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
3216 static int kv_dpm_set_clockgating_state(void *handle
,
3217 enum amd_clockgating_state state
)
3222 static int kv_dpm_set_powergating_state(void *handle
,
3223 enum amd_powergating_state state
)
3228 static inline bool kv_are_power_levels_equal(const struct kv_pl
*kv_cpl1
,
3229 const struct kv_pl
*kv_cpl2
)
3231 return ((kv_cpl1
->sclk
== kv_cpl2
->sclk
) &&
3232 (kv_cpl1
->vddc_index
== kv_cpl2
->vddc_index
) &&
3233 (kv_cpl1
->ds_divider_index
== kv_cpl2
->ds_divider_index
) &&
3234 (kv_cpl1
->force_nbp_state
== kv_cpl2
->force_nbp_state
));
3237 static int kv_check_state_equal(void *handle
,
3242 struct kv_ps
*kv_cps
;
3243 struct kv_ps
*kv_rps
;
3245 struct amdgpu_ps
*cps
= (struct amdgpu_ps
*)current_ps
;
3246 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
3247 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3249 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
3252 kv_cps
= kv_get_ps(cps
);
3253 kv_rps
= kv_get_ps(rps
);
3255 if (kv_cps
== NULL
) {
3260 if (kv_cps
->num_levels
!= kv_rps
->num_levels
) {
3265 for (i
= 0; i
< kv_cps
->num_levels
; i
++) {
3266 if (!kv_are_power_levels_equal(&(kv_cps
->levels
[i
]),
3267 &(kv_rps
->levels
[i
]))) {
3273 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3274 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
3275 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
3280 static int kv_dpm_read_sensor(void *handle
, int idx
,
3281 void *value
, int *size
)
3283 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3284 struct kv_power_info
*pi
= kv_get_pi(adev
);
3287 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
3288 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
3289 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
3291 /* size must be at least 4 bytes for all sensors */
3296 case AMDGPU_PP_SENSOR_GFX_SCLK
:
3297 if (pl_index
< SMU__NUM_SCLK_DPM_STATE
) {
3299 pi
->graphics_level
[pl_index
].SclkFrequency
);
3300 *((uint32_t *)value
) = sclk
;
3305 case AMDGPU_PP_SENSOR_GPU_TEMP
:
3306 *((uint32_t *)value
) = kv_dpm_get_temp(adev
);
3314 static int kv_set_powergating_by_smu(void *handle
,
3315 uint32_t block_type
, bool gate
)
3317 switch (block_type
) {
3318 case AMD_IP_BLOCK_TYPE_UVD
:
3319 kv_dpm_powergate_uvd(handle
, gate
);
3321 case AMD_IP_BLOCK_TYPE_VCE
:
3322 kv_dpm_powergate_vce(handle
, gate
);
3330 static const struct amd_ip_funcs kv_dpm_ip_funcs
= {
3332 .early_init
= kv_dpm_early_init
,
3333 .late_init
= kv_dpm_late_init
,
3334 .sw_init
= kv_dpm_sw_init
,
3335 .sw_fini
= kv_dpm_sw_fini
,
3336 .hw_init
= kv_dpm_hw_init
,
3337 .hw_fini
= kv_dpm_hw_fini
,
3338 .suspend
= kv_dpm_suspend
,
3339 .resume
= kv_dpm_resume
,
3340 .is_idle
= kv_dpm_is_idle
,
3341 .wait_for_idle
= kv_dpm_wait_for_idle
,
3342 .soft_reset
= kv_dpm_soft_reset
,
3343 .set_clockgating_state
= kv_dpm_set_clockgating_state
,
3344 .set_powergating_state
= kv_dpm_set_powergating_state
,
3347 const struct amdgpu_ip_block_version kv_smu_ip_block
=
3349 .type
= AMD_IP_BLOCK_TYPE_SMC
,
3353 .funcs
= &kv_dpm_ip_funcs
,
3356 static const struct amd_pm_funcs kv_dpm_funcs
= {
3357 .pre_set_power_state
= &kv_dpm_pre_set_power_state
,
3358 .set_power_state
= &kv_dpm_set_power_state
,
3359 .post_set_power_state
= &kv_dpm_post_set_power_state
,
3360 .display_configuration_changed
= &kv_dpm_display_configuration_changed
,
3361 .get_sclk
= &kv_dpm_get_sclk
,
3362 .get_mclk
= &kv_dpm_get_mclk
,
3363 .print_power_state
= &kv_dpm_print_power_state
,
3364 .debugfs_print_current_performance_level
= &kv_dpm_debugfs_print_current_performance_level
,
3365 .force_performance_level
= &kv_dpm_force_performance_level
,
3366 .set_powergating_by_smu
= kv_set_powergating_by_smu
,
3367 .enable_bapm
= &kv_dpm_enable_bapm
,
3368 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
3369 .check_state_equal
= kv_check_state_equal
,
3370 .read_sensor
= &kv_dpm_read_sensor
,
3373 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs
= {
3374 .set
= kv_dpm_set_interrupt_state
,
3375 .process
= kv_dpm_process_interrupt
,
3378 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
3380 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
3381 adev
->pm
.dpm
.thermal
.irq
.funcs
= &kv_dpm_irq_funcs
;