2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
25 #include <linux/seq_file.h>
31 #include "radeon_asic.h"
33 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
34 #define KV_MINIMUM_ENGINE_CLOCK 800
35 #define SMC_RAM_END 0x40000
37 static int kv_enable_nb_dpm(struct radeon_device
*rdev
,
39 static void kv_init_graphics_levels(struct radeon_device
*rdev
);
40 static int kv_calculate_ds_divider(struct radeon_device
*rdev
);
41 static int kv_calculate_nbps_level_settings(struct radeon_device
*rdev
);
42 static int kv_calculate_dpm_settings(struct radeon_device
*rdev
);
43 static void kv_enable_new_levels(struct radeon_device
*rdev
);
44 static void kv_program_nbps_index_settings(struct radeon_device
*rdev
,
45 struct radeon_ps
*new_rps
);
46 static int kv_set_enabled_level(struct radeon_device
*rdev
, u32 level
);
47 static int kv_set_enabled_levels(struct radeon_device
*rdev
);
48 static int kv_force_dpm_highest(struct radeon_device
*rdev
);
49 static int kv_force_dpm_lowest(struct radeon_device
*rdev
);
50 static void kv_apply_state_adjust_rules(struct radeon_device
*rdev
,
51 struct radeon_ps
*new_rps
,
52 struct radeon_ps
*old_rps
);
53 static int kv_set_thermal_temperature_range(struct radeon_device
*rdev
,
54 int min_temp
, int max_temp
);
55 static int kv_init_fps_limits(struct radeon_device
*rdev
);
57 void kv_dpm_powergate_uvd(struct radeon_device
*rdev
, bool gate
);
58 static void kv_dpm_powergate_vce(struct radeon_device
*rdev
, bool gate
);
59 static void kv_dpm_powergate_samu(struct radeon_device
*rdev
, bool gate
);
60 static void kv_dpm_powergate_acp(struct radeon_device
*rdev
, bool gate
);
62 extern void cik_enter_rlc_safe_mode(struct radeon_device
*rdev
);
63 extern void cik_exit_rlc_safe_mode(struct radeon_device
*rdev
);
64 extern void cik_update_cg(struct radeon_device
*rdev
,
65 u32 block
, bool enable
);
67 static const struct kv_pt_config_reg didt_config_kv
[] =
69 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
70 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
71 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
72 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
73 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
74 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
75 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
76 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
77 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
78 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
79 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
80 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
81 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
82 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
83 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
84 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
85 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
86 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
87 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
88 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
89 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
90 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
91 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
92 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
93 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
94 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
95 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
96 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
97 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
98 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
99 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
100 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
101 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
102 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
103 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
104 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
105 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
106 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
107 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
108 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
109 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
110 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
111 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
112 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
113 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
114 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
115 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
116 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
117 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
118 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
119 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
120 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
121 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
122 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
123 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
124 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
125 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
126 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
127 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
128 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
129 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
130 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
131 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
132 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
133 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
134 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
135 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
136 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
137 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
138 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
139 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
140 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
144 static struct kv_ps
*kv_get_ps(struct radeon_ps
*rps
)
146 struct kv_ps
*ps
= rps
->ps_priv
;
151 static struct kv_power_info
*kv_get_pi(struct radeon_device
*rdev
)
153 struct kv_power_info
*pi
= rdev
->pm
.dpm
.priv
;
158 static int kv_program_pt_config_registers(struct radeon_device
*rdev
,
159 const struct kv_pt_config_reg
*cac_config_regs
)
161 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
165 if (config_regs
== NULL
)
168 while (config_regs
->offset
!= 0xFFFFFFFF) {
169 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
170 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
172 switch (config_regs
->type
) {
173 case KV_CONFIGREG_SMC_IND
:
174 data
= RREG32_SMC(config_regs
->offset
);
176 case KV_CONFIGREG_DIDT_IND
:
177 data
= RREG32_DIDT(config_regs
->offset
);
180 data
= RREG32(config_regs
->offset
<< 2);
184 data
&= ~config_regs
->mask
;
185 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
189 switch (config_regs
->type
) {
190 case KV_CONFIGREG_SMC_IND
:
191 WREG32_SMC(config_regs
->offset
, data
);
193 case KV_CONFIGREG_DIDT_IND
:
194 WREG32_DIDT(config_regs
->offset
, data
);
197 WREG32(config_regs
->offset
<< 2, data
);
207 static void kv_do_enable_didt(struct radeon_device
*rdev
, bool enable
)
209 struct kv_power_info
*pi
= kv_get_pi(rdev
);
212 if (pi
->caps_sq_ramping
) {
213 data
= RREG32_DIDT(DIDT_SQ_CTRL0
);
215 data
|= DIDT_CTRL_EN
;
217 data
&= ~DIDT_CTRL_EN
;
218 WREG32_DIDT(DIDT_SQ_CTRL0
, data
);
221 if (pi
->caps_db_ramping
) {
222 data
= RREG32_DIDT(DIDT_DB_CTRL0
);
224 data
|= DIDT_CTRL_EN
;
226 data
&= ~DIDT_CTRL_EN
;
227 WREG32_DIDT(DIDT_DB_CTRL0
, data
);
230 if (pi
->caps_td_ramping
) {
231 data
= RREG32_DIDT(DIDT_TD_CTRL0
);
233 data
|= DIDT_CTRL_EN
;
235 data
&= ~DIDT_CTRL_EN
;
236 WREG32_DIDT(DIDT_TD_CTRL0
, data
);
239 if (pi
->caps_tcp_ramping
) {
240 data
= RREG32_DIDT(DIDT_TCP_CTRL0
);
242 data
|= DIDT_CTRL_EN
;
244 data
&= ~DIDT_CTRL_EN
;
245 WREG32_DIDT(DIDT_TCP_CTRL0
, data
);
249 static int kv_enable_didt(struct radeon_device
*rdev
, bool enable
)
251 struct kv_power_info
*pi
= kv_get_pi(rdev
);
254 if (pi
->caps_sq_ramping
||
255 pi
->caps_db_ramping
||
256 pi
->caps_td_ramping
||
257 pi
->caps_tcp_ramping
) {
258 cik_enter_rlc_safe_mode(rdev
);
261 ret
= kv_program_pt_config_registers(rdev
, didt_config_kv
);
263 cik_exit_rlc_safe_mode(rdev
);
268 kv_do_enable_didt(rdev
, enable
);
270 cik_exit_rlc_safe_mode(rdev
);
276 static int kv_enable_smc_cac(struct radeon_device
*rdev
, bool enable
)
278 struct kv_power_info
*pi
= kv_get_pi(rdev
);
283 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_EnableCac
);
285 pi
->cac_enabled
= false;
287 pi
->cac_enabled
= true;
288 } else if (pi
->cac_enabled
) {
289 kv_notify_message_to_smu(rdev
, PPSMC_MSG_DisableCac
);
290 pi
->cac_enabled
= false;
297 static int kv_process_firmware_header(struct radeon_device
*rdev
)
299 struct kv_power_info
*pi
= kv_get_pi(rdev
);
303 ret
= kv_read_smc_sram_dword(rdev
, SMU7_FIRMWARE_HEADER_LOCATION
+
304 offsetof(SMU7_Firmware_Header
, DpmTable
),
308 pi
->dpm_table_start
= tmp
;
310 ret
= kv_read_smc_sram_dword(rdev
, SMU7_FIRMWARE_HEADER_LOCATION
+
311 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
315 pi
->soft_regs_start
= tmp
;
320 static int kv_enable_dpm_voltage_scaling(struct radeon_device
*rdev
)
322 struct kv_power_info
*pi
= kv_get_pi(rdev
);
325 pi
->graphics_voltage_change_enable
= 1;
327 ret
= kv_copy_bytes_to_smc(rdev
,
328 pi
->dpm_table_start
+
329 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
330 &pi
->graphics_voltage_change_enable
,
331 sizeof(u8
), pi
->sram_end
);
336 static int kv_set_dpm_interval(struct radeon_device
*rdev
)
338 struct kv_power_info
*pi
= kv_get_pi(rdev
);
341 pi
->graphics_interval
= 1;
343 ret
= kv_copy_bytes_to_smc(rdev
,
344 pi
->dpm_table_start
+
345 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
346 &pi
->graphics_interval
,
347 sizeof(u8
), pi
->sram_end
);
352 static int kv_set_dpm_boot_state(struct radeon_device
*rdev
)
354 struct kv_power_info
*pi
= kv_get_pi(rdev
);
357 ret
= kv_copy_bytes_to_smc(rdev
,
358 pi
->dpm_table_start
+
359 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
360 &pi
->graphics_boot_level
,
361 sizeof(u8
), pi
->sram_end
);
366 static void kv_program_vc(struct radeon_device
*rdev
)
368 WREG32_SMC(CG_FTV_0
, 0x3FFFC100);
371 static void kv_clear_vc(struct radeon_device
*rdev
)
373 WREG32_SMC(CG_FTV_0
, 0);
376 static int kv_set_divider_value(struct radeon_device
*rdev
,
379 struct kv_power_info
*pi
= kv_get_pi(rdev
);
380 struct atom_clock_dividers dividers
;
383 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
384 sclk
, false, ÷rs
);
388 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
389 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
394 static u32
kv_convert_vid2_to_vid7(struct radeon_device
*rdev
,
395 struct sumo_vid_mapping_table
*vid_mapping_table
,
398 struct radeon_clock_voltage_dependency_table
*vddc_sclk_table
=
399 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
402 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
403 if (vid_2bit
< vddc_sclk_table
->count
)
404 return vddc_sclk_table
->entries
[vid_2bit
].v
;
406 return vddc_sclk_table
->entries
[vddc_sclk_table
->count
- 1].v
;
408 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
409 if (vid_mapping_table
->entries
[i
].vid_2bit
== vid_2bit
)
410 return vid_mapping_table
->entries
[i
].vid_7bit
;
412 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_7bit
;
416 static u32
kv_convert_vid7_to_vid2(struct radeon_device
*rdev
,
417 struct sumo_vid_mapping_table
*vid_mapping_table
,
420 struct radeon_clock_voltage_dependency_table
*vddc_sclk_table
=
421 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
424 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
425 for (i
= 0; i
< vddc_sclk_table
->count
; i
++) {
426 if (vddc_sclk_table
->entries
[i
].v
== vid_7bit
)
429 return vddc_sclk_table
->count
- 1;
431 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
432 if (vid_mapping_table
->entries
[i
].vid_7bit
== vid_7bit
)
433 return vid_mapping_table
->entries
[i
].vid_2bit
;
436 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_2bit
;
440 static u16
kv_convert_8bit_index_to_voltage(struct radeon_device
*rdev
,
443 return 6200 - (voltage
* 25);
446 static u16
kv_convert_2bit_index_to_voltage(struct radeon_device
*rdev
,
449 struct kv_power_info
*pi
= kv_get_pi(rdev
);
450 u32 vid_8bit
= kv_convert_vid2_to_vid7(rdev
,
451 &pi
->sys_info
.vid_mapping_table
,
454 return kv_convert_8bit_index_to_voltage(rdev
, (u16
)vid_8bit
);
458 static int kv_set_vid(struct radeon_device
*rdev
, u32 index
, u32 vid
)
460 struct kv_power_info
*pi
= kv_get_pi(rdev
);
462 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
463 pi
->graphics_level
[index
].MinVddNb
=
464 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev
, vid
));
469 static int kv_set_at(struct radeon_device
*rdev
, u32 index
, u32 at
)
471 struct kv_power_info
*pi
= kv_get_pi(rdev
);
473 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
478 static void kv_dpm_power_level_enable(struct radeon_device
*rdev
,
479 u32 index
, bool enable
)
481 struct kv_power_info
*pi
= kv_get_pi(rdev
);
483 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
486 static void kv_start_dpm(struct radeon_device
*rdev
)
488 u32 tmp
= RREG32_SMC(GENERAL_PWRMGT
);
490 tmp
|= GLOBAL_PWRMGT_EN
;
491 WREG32_SMC(GENERAL_PWRMGT
, tmp
);
493 kv_smc_dpm_enable(rdev
, true);
496 static void kv_stop_dpm(struct radeon_device
*rdev
)
498 kv_smc_dpm_enable(rdev
, false);
501 static void kv_start_am(struct radeon_device
*rdev
)
503 u32 sclk_pwrmgt_cntl
= RREG32_SMC(SCLK_PWRMGT_CNTL
);
505 sclk_pwrmgt_cntl
&= ~(RESET_SCLK_CNT
| RESET_BUSY_CNT
);
506 sclk_pwrmgt_cntl
|= DYNAMIC_PM_EN
;
508 WREG32_SMC(SCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
511 static void kv_reset_am(struct radeon_device
*rdev
)
513 u32 sclk_pwrmgt_cntl
= RREG32_SMC(SCLK_PWRMGT_CNTL
);
515 sclk_pwrmgt_cntl
|= (RESET_SCLK_CNT
| RESET_BUSY_CNT
);
517 WREG32_SMC(SCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
520 static int kv_freeze_sclk_dpm(struct radeon_device
*rdev
, bool freeze
)
522 return kv_notify_message_to_smu(rdev
, freeze
?
523 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
526 static int kv_force_lowest_valid(struct radeon_device
*rdev
)
528 return kv_force_dpm_lowest(rdev
);
531 static int kv_unforce_levels(struct radeon_device
*rdev
)
533 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
534 return kv_notify_message_to_smu(rdev
, PPSMC_MSG_NoForcedLevel
);
536 return kv_set_enabled_levels(rdev
);
539 static int kv_update_sclk_t(struct radeon_device
*rdev
)
541 struct kv_power_info
*pi
= kv_get_pi(rdev
);
542 u32 low_sclk_interrupt_t
= 0;
545 if (pi
->caps_sclk_throttle_low_notification
) {
546 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
548 ret
= kv_copy_bytes_to_smc(rdev
,
549 pi
->dpm_table_start
+
550 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
551 (u8
*)&low_sclk_interrupt_t
,
552 sizeof(u32
), pi
->sram_end
);
557 static int kv_program_bootup_state(struct radeon_device
*rdev
)
559 struct kv_power_info
*pi
= kv_get_pi(rdev
);
561 struct radeon_clock_voltage_dependency_table
*table
=
562 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
564 if (table
&& table
->count
) {
565 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
566 if (table
->entries
[i
].clk
== pi
->boot_pl
.sclk
)
570 pi
->graphics_boot_level
= (u8
)i
;
571 kv_dpm_power_level_enable(rdev
, i
, true);
573 struct sumo_sclk_voltage_mapping_table
*table
=
574 &pi
->sys_info
.sclk_voltage_mapping_table
;
576 if (table
->num_max_dpm_entries
== 0)
579 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
580 if (table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
)
584 pi
->graphics_boot_level
= (u8
)i
;
585 kv_dpm_power_level_enable(rdev
, i
, true);
590 static int kv_enable_auto_thermal_throttling(struct radeon_device
*rdev
)
592 struct kv_power_info
*pi
= kv_get_pi(rdev
);
595 pi
->graphics_therm_throttle_enable
= 1;
597 ret
= kv_copy_bytes_to_smc(rdev
,
598 pi
->dpm_table_start
+
599 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
600 &pi
->graphics_therm_throttle_enable
,
601 sizeof(u8
), pi
->sram_end
);
606 static int kv_upload_dpm_settings(struct radeon_device
*rdev
)
608 struct kv_power_info
*pi
= kv_get_pi(rdev
);
611 ret
= kv_copy_bytes_to_smc(rdev
,
612 pi
->dpm_table_start
+
613 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
614 (u8
*)&pi
->graphics_level
,
615 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
621 ret
= kv_copy_bytes_to_smc(rdev
,
622 pi
->dpm_table_start
+
623 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
624 &pi
->graphics_dpm_level_count
,
625 sizeof(u8
), pi
->sram_end
);
630 static u32
kv_get_clock_difference(u32 a
, u32 b
)
632 return (a
>= b
) ? a
- b
: b
- a
;
635 static u32
kv_get_clk_bypass(struct radeon_device
*rdev
, u32 clk
)
637 struct kv_power_info
*pi
= kv_get_pi(rdev
);
640 if (pi
->caps_enable_dfs_bypass
) {
641 if (kv_get_clock_difference(clk
, 40000) < 200)
643 else if (kv_get_clock_difference(clk
, 30000) < 200)
645 else if (kv_get_clock_difference(clk
, 20000) < 200)
647 else if (kv_get_clock_difference(clk
, 15000) < 200)
649 else if (kv_get_clock_difference(clk
, 10000) < 200)
660 static int kv_populate_uvd_table(struct radeon_device
*rdev
)
662 struct kv_power_info
*pi
= kv_get_pi(rdev
);
663 struct radeon_uvd_clock_voltage_dependency_table
*table
=
664 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
665 struct atom_clock_dividers dividers
;
669 if (table
== NULL
|| table
->count
== 0)
672 pi
->uvd_level_count
= 0;
673 for (i
= 0; i
< table
->count
; i
++) {
674 if (pi
->high_voltage_t
&&
675 (pi
->high_voltage_t
< table
->entries
[i
].v
))
678 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
679 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
680 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
682 pi
->uvd_level
[i
].VClkBypassCntl
=
683 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].vclk
);
684 pi
->uvd_level
[i
].DClkBypassCntl
=
685 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].dclk
);
687 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
688 table
->entries
[i
].vclk
, false, ÷rs
);
691 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
693 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
694 table
->entries
[i
].dclk
, false, ÷rs
);
697 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
699 pi
->uvd_level_count
++;
702 ret
= kv_copy_bytes_to_smc(rdev
,
703 pi
->dpm_table_start
+
704 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
705 (u8
*)&pi
->uvd_level_count
,
706 sizeof(u8
), pi
->sram_end
);
710 pi
->uvd_interval
= 1;
712 ret
= kv_copy_bytes_to_smc(rdev
,
713 pi
->dpm_table_start
+
714 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
716 sizeof(u8
), pi
->sram_end
);
720 ret
= kv_copy_bytes_to_smc(rdev
,
721 pi
->dpm_table_start
+
722 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
723 (u8
*)&pi
->uvd_level
,
724 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
731 static int kv_populate_vce_table(struct radeon_device
*rdev
)
733 struct kv_power_info
*pi
= kv_get_pi(rdev
);
736 struct radeon_vce_clock_voltage_dependency_table
*table
=
737 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
738 struct atom_clock_dividers dividers
;
740 if (table
== NULL
|| table
->count
== 0)
743 pi
->vce_level_count
= 0;
744 for (i
= 0; i
< table
->count
; i
++) {
745 if (pi
->high_voltage_t
&&
746 pi
->high_voltage_t
< table
->entries
[i
].v
)
749 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
750 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
752 pi
->vce_level
[i
].ClkBypassCntl
=
753 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].evclk
);
755 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
756 table
->entries
[i
].evclk
, false, ÷rs
);
759 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
761 pi
->vce_level_count
++;
764 ret
= kv_copy_bytes_to_smc(rdev
,
765 pi
->dpm_table_start
+
766 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
767 (u8
*)&pi
->vce_level_count
,
773 pi
->vce_interval
= 1;
775 ret
= kv_copy_bytes_to_smc(rdev
,
776 pi
->dpm_table_start
+
777 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
778 (u8
*)&pi
->vce_interval
,
784 ret
= kv_copy_bytes_to_smc(rdev
,
785 pi
->dpm_table_start
+
786 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
787 (u8
*)&pi
->vce_level
,
788 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
794 static int kv_populate_samu_table(struct radeon_device
*rdev
)
796 struct kv_power_info
*pi
= kv_get_pi(rdev
);
797 struct radeon_clock_voltage_dependency_table
*table
=
798 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
799 struct atom_clock_dividers dividers
;
803 if (table
== NULL
|| table
->count
== 0)
806 pi
->samu_level_count
= 0;
807 for (i
= 0; i
< table
->count
; i
++) {
808 if (pi
->high_voltage_t
&&
809 pi
->high_voltage_t
< table
->entries
[i
].v
)
812 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
813 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
815 pi
->samu_level
[i
].ClkBypassCntl
=
816 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].clk
);
818 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
819 table
->entries
[i
].clk
, false, ÷rs
);
822 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
824 pi
->samu_level_count
++;
827 ret
= kv_copy_bytes_to_smc(rdev
,
828 pi
->dpm_table_start
+
829 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
830 (u8
*)&pi
->samu_level_count
,
836 pi
->samu_interval
= 1;
838 ret
= kv_copy_bytes_to_smc(rdev
,
839 pi
->dpm_table_start
+
840 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
841 (u8
*)&pi
->samu_interval
,
847 ret
= kv_copy_bytes_to_smc(rdev
,
848 pi
->dpm_table_start
+
849 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
850 (u8
*)&pi
->samu_level
,
851 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
860 static int kv_populate_acp_table(struct radeon_device
*rdev
)
862 struct kv_power_info
*pi
= kv_get_pi(rdev
);
863 struct radeon_clock_voltage_dependency_table
*table
=
864 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
865 struct atom_clock_dividers dividers
;
869 if (table
== NULL
|| table
->count
== 0)
872 pi
->acp_level_count
= 0;
873 for (i
= 0; i
< table
->count
; i
++) {
874 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
875 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
877 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
878 table
->entries
[i
].clk
, false, ÷rs
);
881 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
883 pi
->acp_level_count
++;
886 ret
= kv_copy_bytes_to_smc(rdev
,
887 pi
->dpm_table_start
+
888 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
889 (u8
*)&pi
->acp_level_count
,
895 pi
->acp_interval
= 1;
897 ret
= kv_copy_bytes_to_smc(rdev
,
898 pi
->dpm_table_start
+
899 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
900 (u8
*)&pi
->acp_interval
,
906 ret
= kv_copy_bytes_to_smc(rdev
,
907 pi
->dpm_table_start
+
908 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
909 (u8
*)&pi
->acp_level
,
910 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
918 static void kv_calculate_dfs_bypass_settings(struct radeon_device
*rdev
)
920 struct kv_power_info
*pi
= kv_get_pi(rdev
);
922 struct radeon_clock_voltage_dependency_table
*table
=
923 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
925 if (table
&& table
->count
) {
926 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
927 if (pi
->caps_enable_dfs_bypass
) {
928 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
929 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
930 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
931 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
932 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
933 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
934 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
935 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
936 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
937 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
939 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
941 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
945 struct sumo_sclk_voltage_mapping_table
*table
=
946 &pi
->sys_info
.sclk_voltage_mapping_table
;
947 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
948 if (pi
->caps_enable_dfs_bypass
) {
949 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
950 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
951 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
952 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
953 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
954 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
955 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
956 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
957 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
958 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
960 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
962 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
968 static int kv_enable_ulv(struct radeon_device
*rdev
, bool enable
)
970 return kv_notify_message_to_smu(rdev
, enable
?
971 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
974 static void kv_reset_acp_boot_level(struct radeon_device
*rdev
)
976 struct kv_power_info
*pi
= kv_get_pi(rdev
);
978 pi
->acp_boot_level
= 0xff;
981 static void kv_update_current_ps(struct radeon_device
*rdev
,
982 struct radeon_ps
*rps
)
984 struct kv_ps
*new_ps
= kv_get_ps(rps
);
985 struct kv_power_info
*pi
= kv_get_pi(rdev
);
987 pi
->current_rps
= *rps
;
988 pi
->current_ps
= *new_ps
;
989 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
992 static void kv_update_requested_ps(struct radeon_device
*rdev
,
993 struct radeon_ps
*rps
)
995 struct kv_ps
*new_ps
= kv_get_ps(rps
);
996 struct kv_power_info
*pi
= kv_get_pi(rdev
);
998 pi
->requested_rps
= *rps
;
999 pi
->requested_ps
= *new_ps
;
1000 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1003 void kv_dpm_enable_bapm(struct radeon_device
*rdev
, bool enable
)
1005 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1008 if (pi
->bapm_enable
) {
1009 ret
= kv_smc_bapm_enable(rdev
, enable
);
1011 DRM_ERROR("kv_smc_bapm_enable failed\n");
1015 static void kv_enable_thermal_int(struct radeon_device
*rdev
, bool enable
)
1019 thermal_int
= RREG32_SMC(CG_THERMAL_INT_CTRL
);
1021 thermal_int
|= THERM_INTH_MASK
| THERM_INTL_MASK
;
1023 thermal_int
&= ~(THERM_INTH_MASK
| THERM_INTL_MASK
);
1024 WREG32_SMC(CG_THERMAL_INT_CTRL
, thermal_int
);
1028 int kv_dpm_enable(struct radeon_device
*rdev
)
1030 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1033 ret
= kv_process_firmware_header(rdev
);
1035 DRM_ERROR("kv_process_firmware_header failed\n");
1038 kv_init_fps_limits(rdev
);
1039 kv_init_graphics_levels(rdev
);
1040 ret
= kv_program_bootup_state(rdev
);
1042 DRM_ERROR("kv_program_bootup_state failed\n");
1045 kv_calculate_dfs_bypass_settings(rdev
);
1046 ret
= kv_upload_dpm_settings(rdev
);
1048 DRM_ERROR("kv_upload_dpm_settings failed\n");
1051 ret
= kv_populate_uvd_table(rdev
);
1053 DRM_ERROR("kv_populate_uvd_table failed\n");
1056 ret
= kv_populate_vce_table(rdev
);
1058 DRM_ERROR("kv_populate_vce_table failed\n");
1061 ret
= kv_populate_samu_table(rdev
);
1063 DRM_ERROR("kv_populate_samu_table failed\n");
1066 ret
= kv_populate_acp_table(rdev
);
1068 DRM_ERROR("kv_populate_acp_table failed\n");
1071 kv_program_vc(rdev
);
1074 if (pi
->enable_auto_thermal_throttling
) {
1075 ret
= kv_enable_auto_thermal_throttling(rdev
);
1077 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1081 ret
= kv_enable_dpm_voltage_scaling(rdev
);
1083 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1086 ret
= kv_set_dpm_interval(rdev
);
1088 DRM_ERROR("kv_set_dpm_interval failed\n");
1091 ret
= kv_set_dpm_boot_state(rdev
);
1093 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1096 ret
= kv_enable_ulv(rdev
, true);
1098 DRM_ERROR("kv_enable_ulv failed\n");
1102 ret
= kv_enable_didt(rdev
, true);
1104 DRM_ERROR("kv_enable_didt failed\n");
1107 ret
= kv_enable_smc_cac(rdev
, true);
1109 DRM_ERROR("kv_enable_smc_cac failed\n");
1113 kv_reset_acp_boot_level(rdev
);
1115 ret
= kv_smc_bapm_enable(rdev
, false);
1117 DRM_ERROR("kv_smc_bapm_enable failed\n");
1121 kv_update_current_ps(rdev
, rdev
->pm
.dpm
.boot_ps
);
1126 int kv_dpm_late_enable(struct radeon_device
*rdev
)
1130 if (rdev
->irq
.installed
&&
1131 r600_is_internal_thermal_sensor(rdev
->pm
.int_thermal_type
)) {
1132 ret
= kv_set_thermal_temperature_range(rdev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
1134 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1137 kv_enable_thermal_int(rdev
, true);
1140 /* powerdown unused blocks for now */
1141 kv_dpm_powergate_acp(rdev
, true);
1142 kv_dpm_powergate_samu(rdev
, true);
1143 kv_dpm_powergate_vce(rdev
, true);
1144 kv_dpm_powergate_uvd(rdev
, true);
1149 void kv_dpm_disable(struct radeon_device
*rdev
)
1151 kv_smc_bapm_enable(rdev
, false);
1153 if (rdev
->family
== CHIP_MULLINS
)
1154 kv_enable_nb_dpm(rdev
, false);
1156 /* powerup blocks */
1157 kv_dpm_powergate_acp(rdev
, false);
1158 kv_dpm_powergate_samu(rdev
, false);
1159 kv_dpm_powergate_vce(rdev
, false);
1160 kv_dpm_powergate_uvd(rdev
, false);
1162 kv_enable_smc_cac(rdev
, false);
1163 kv_enable_didt(rdev
, false);
1166 kv_enable_ulv(rdev
, false);
1168 kv_enable_thermal_int(rdev
, false);
1170 kv_update_current_ps(rdev
, rdev
->pm
.dpm
.boot_ps
);
1173 static void kv_init_sclk_t(struct radeon_device
*rdev
)
1175 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1177 pi
->low_sclk_interrupt_t
= 0;
1180 static int kv_init_fps_limits(struct radeon_device
*rdev
)
1182 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1189 pi
->fps_high_t
= cpu_to_be16(tmp
);
1190 ret
= kv_copy_bytes_to_smc(rdev
,
1191 pi
->dpm_table_start
+
1192 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1193 (u8
*)&pi
->fps_high_t
,
1194 sizeof(u16
), pi
->sram_end
);
1197 pi
->fps_low_t
= cpu_to_be16(tmp
);
1199 ret
= kv_copy_bytes_to_smc(rdev
,
1200 pi
->dpm_table_start
+
1201 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1202 (u8
*)&pi
->fps_low_t
,
1203 sizeof(u16
), pi
->sram_end
);
1209 static void kv_init_powergate_state(struct radeon_device
*rdev
)
1211 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1213 pi
->uvd_power_gated
= false;
1214 pi
->vce_power_gated
= false;
1215 pi
->samu_power_gated
= false;
1216 pi
->acp_power_gated
= false;
1220 static int kv_enable_uvd_dpm(struct radeon_device
*rdev
, bool enable
)
1222 return kv_notify_message_to_smu(rdev
, enable
?
1223 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1226 static int kv_enable_vce_dpm(struct radeon_device
*rdev
, bool enable
)
1228 return kv_notify_message_to_smu(rdev
, enable
?
1229 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1232 static int kv_enable_samu_dpm(struct radeon_device
*rdev
, bool enable
)
1234 return kv_notify_message_to_smu(rdev
, enable
?
1235 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1238 static int kv_enable_acp_dpm(struct radeon_device
*rdev
, bool enable
)
1240 return kv_notify_message_to_smu(rdev
, enable
?
1241 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1244 static int kv_update_uvd_dpm(struct radeon_device
*rdev
, bool gate
)
1246 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1247 struct radeon_uvd_clock_voltage_dependency_table
*table
=
1248 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1254 pi
->uvd_boot_level
= table
->count
- 1;
1256 pi
->uvd_boot_level
= 0;
1258 if (!pi
->caps_uvd_dpm
|| pi
->caps_stable_p_state
) {
1259 mask
= 1 << pi
->uvd_boot_level
;
1264 ret
= kv_copy_bytes_to_smc(rdev
,
1265 pi
->dpm_table_start
+
1266 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1267 (uint8_t *)&pi
->uvd_boot_level
,
1268 sizeof(u8
), pi
->sram_end
);
1272 kv_send_msg_to_smc_with_parameter(rdev
,
1273 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1277 return kv_enable_uvd_dpm(rdev
, !gate
);
1280 static u8
kv_get_vce_boot_level(struct radeon_device
*rdev
, u32 evclk
)
1283 struct radeon_vce_clock_voltage_dependency_table
*table
=
1284 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1286 for (i
= 0; i
< table
->count
; i
++) {
1287 if (table
->entries
[i
].evclk
>= evclk
)
1294 static int kv_update_vce_dpm(struct radeon_device
*rdev
,
1295 struct radeon_ps
*radeon_new_state
,
1296 struct radeon_ps
*radeon_current_state
)
1298 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1299 struct radeon_vce_clock_voltage_dependency_table
*table
=
1300 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1303 if (radeon_new_state
->evclk
> 0 && radeon_current_state
->evclk
== 0) {
1304 kv_dpm_powergate_vce(rdev
, false);
1305 /* turn the clocks on when encoding */
1306 cik_update_cg(rdev
, RADEON_CG_BLOCK_VCE
, false);
1307 if (pi
->caps_stable_p_state
)
1308 pi
->vce_boot_level
= table
->count
- 1;
1310 pi
->vce_boot_level
= kv_get_vce_boot_level(rdev
, radeon_new_state
->evclk
);
1312 ret
= kv_copy_bytes_to_smc(rdev
,
1313 pi
->dpm_table_start
+
1314 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1315 (u8
*)&pi
->vce_boot_level
,
1321 if (pi
->caps_stable_p_state
)
1322 kv_send_msg_to_smc_with_parameter(rdev
,
1323 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1324 (1 << pi
->vce_boot_level
));
1326 kv_enable_vce_dpm(rdev
, true);
1327 } else if (radeon_new_state
->evclk
== 0 && radeon_current_state
->evclk
> 0) {
1328 kv_enable_vce_dpm(rdev
, false);
1329 /* turn the clocks off when not encoding */
1330 cik_update_cg(rdev
, RADEON_CG_BLOCK_VCE
, true);
1331 kv_dpm_powergate_vce(rdev
, true);
1337 static int kv_update_samu_dpm(struct radeon_device
*rdev
, bool gate
)
1339 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1340 struct radeon_clock_voltage_dependency_table
*table
=
1341 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1345 if (pi
->caps_stable_p_state
)
1346 pi
->samu_boot_level
= table
->count
- 1;
1348 pi
->samu_boot_level
= 0;
1350 ret
= kv_copy_bytes_to_smc(rdev
,
1351 pi
->dpm_table_start
+
1352 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1353 (u8
*)&pi
->samu_boot_level
,
1359 if (pi
->caps_stable_p_state
)
1360 kv_send_msg_to_smc_with_parameter(rdev
,
1361 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1362 (1 << pi
->samu_boot_level
));
1365 return kv_enable_samu_dpm(rdev
, !gate
);
1368 static u8
kv_get_acp_boot_level(struct radeon_device
*rdev
)
1371 struct radeon_clock_voltage_dependency_table
*table
=
1372 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1374 for (i
= 0; i
< table
->count
; i
++) {
1375 if (table
->entries
[i
].clk
>= 0) /* XXX */
1379 if (i
>= table
->count
)
1380 i
= table
->count
- 1;
1385 static void kv_update_acp_boot_level(struct radeon_device
*rdev
)
1387 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1390 if (!pi
->caps_stable_p_state
) {
1391 acp_boot_level
= kv_get_acp_boot_level(rdev
);
1392 if (acp_boot_level
!= pi
->acp_boot_level
) {
1393 pi
->acp_boot_level
= acp_boot_level
;
1394 kv_send_msg_to_smc_with_parameter(rdev
,
1395 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1396 (1 << pi
->acp_boot_level
));
1401 static int kv_update_acp_dpm(struct radeon_device
*rdev
, bool gate
)
1403 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1404 struct radeon_clock_voltage_dependency_table
*table
=
1405 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1409 if (pi
->caps_stable_p_state
)
1410 pi
->acp_boot_level
= table
->count
- 1;
1412 pi
->acp_boot_level
= kv_get_acp_boot_level(rdev
);
1414 ret
= kv_copy_bytes_to_smc(rdev
,
1415 pi
->dpm_table_start
+
1416 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1417 (u8
*)&pi
->acp_boot_level
,
1423 if (pi
->caps_stable_p_state
)
1424 kv_send_msg_to_smc_with_parameter(rdev
,
1425 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1426 (1 << pi
->acp_boot_level
));
1429 return kv_enable_acp_dpm(rdev
, !gate
);
1432 void kv_dpm_powergate_uvd(struct radeon_device
*rdev
, bool gate
)
1434 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1436 if (pi
->uvd_power_gated
== gate
)
1439 pi
->uvd_power_gated
= gate
;
1442 if (pi
->caps_uvd_pg
) {
1443 uvd_v1_0_stop(rdev
);
1444 cik_update_cg(rdev
, RADEON_CG_BLOCK_UVD
, false);
1446 kv_update_uvd_dpm(rdev
, gate
);
1447 if (pi
->caps_uvd_pg
)
1448 kv_notify_message_to_smu(rdev
, PPSMC_MSG_UVDPowerOFF
);
1450 if (pi
->caps_uvd_pg
) {
1451 kv_notify_message_to_smu(rdev
, PPSMC_MSG_UVDPowerON
);
1452 uvd_v4_2_resume(rdev
);
1453 uvd_v1_0_start(rdev
);
1454 cik_update_cg(rdev
, RADEON_CG_BLOCK_UVD
, true);
1456 kv_update_uvd_dpm(rdev
, gate
);
1460 static void kv_dpm_powergate_vce(struct radeon_device
*rdev
, bool gate
)
1462 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1464 if (pi
->vce_power_gated
== gate
)
1467 pi
->vce_power_gated
= gate
;
1470 if (pi
->caps_vce_pg
) {
1471 /* XXX do we need a vce_v1_0_stop() ? */
1472 kv_notify_message_to_smu(rdev
, PPSMC_MSG_VCEPowerOFF
);
1475 if (pi
->caps_vce_pg
) {
1476 kv_notify_message_to_smu(rdev
, PPSMC_MSG_VCEPowerON
);
1477 vce_v2_0_resume(rdev
);
1478 vce_v1_0_start(rdev
);
1483 static void kv_dpm_powergate_samu(struct radeon_device
*rdev
, bool gate
)
1485 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1487 if (pi
->samu_power_gated
== gate
)
1490 pi
->samu_power_gated
= gate
;
1493 kv_update_samu_dpm(rdev
, true);
1494 if (pi
->caps_samu_pg
)
1495 kv_notify_message_to_smu(rdev
, PPSMC_MSG_SAMPowerOFF
);
1497 if (pi
->caps_samu_pg
)
1498 kv_notify_message_to_smu(rdev
, PPSMC_MSG_SAMPowerON
);
1499 kv_update_samu_dpm(rdev
, false);
1503 static void kv_dpm_powergate_acp(struct radeon_device
*rdev
, bool gate
)
1505 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1507 if (pi
->acp_power_gated
== gate
)
1510 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
1513 pi
->acp_power_gated
= gate
;
1516 kv_update_acp_dpm(rdev
, true);
1517 if (pi
->caps_acp_pg
)
1518 kv_notify_message_to_smu(rdev
, PPSMC_MSG_ACPPowerOFF
);
1520 if (pi
->caps_acp_pg
)
1521 kv_notify_message_to_smu(rdev
, PPSMC_MSG_ACPPowerON
);
1522 kv_update_acp_dpm(rdev
, false);
1526 static void kv_set_valid_clock_range(struct radeon_device
*rdev
,
1527 struct radeon_ps
*new_rps
)
1529 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1530 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1532 struct radeon_clock_voltage_dependency_table
*table
=
1533 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1535 if (table
&& table
->count
) {
1536 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1537 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1538 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1539 pi
->lowest_valid
= i
;
1544 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1545 if (table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1548 pi
->highest_valid
= i
;
1550 if (pi
->lowest_valid
> pi
->highest_valid
) {
1551 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1552 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1553 pi
->highest_valid
= pi
->lowest_valid
;
1555 pi
->lowest_valid
= pi
->highest_valid
;
1558 struct sumo_sclk_voltage_mapping_table
*table
=
1559 &pi
->sys_info
.sclk_voltage_mapping_table
;
1561 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1562 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1563 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1564 pi
->lowest_valid
= i
;
1569 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1570 if (table
->entries
[i
].sclk_frequency
<=
1571 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1574 pi
->highest_valid
= i
;
1576 if (pi
->lowest_valid
> pi
->highest_valid
) {
1577 if ((new_ps
->levels
[0].sclk
-
1578 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1579 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1580 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1581 pi
->highest_valid
= pi
->lowest_valid
;
1583 pi
->lowest_valid
= pi
->highest_valid
;
1588 static int kv_update_dfs_bypass_settings(struct radeon_device
*rdev
,
1589 struct radeon_ps
*new_rps
)
1591 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1592 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1596 if (pi
->caps_enable_dfs_bypass
) {
1597 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1598 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1599 ret
= kv_copy_bytes_to_smc(rdev
,
1600 (pi
->dpm_table_start
+
1601 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1602 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1603 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1605 sizeof(u8
), pi
->sram_end
);
1611 static int kv_enable_nb_dpm(struct radeon_device
*rdev
,
1614 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1618 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1619 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_NBDPM_Enable
);
1621 pi
->nb_dpm_enabled
= true;
1624 if (pi
->enable_nb_dpm
&& pi
->nb_dpm_enabled
) {
1625 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_NBDPM_Disable
);
1627 pi
->nb_dpm_enabled
= false;
1634 int kv_dpm_force_performance_level(struct radeon_device
*rdev
,
1635 enum radeon_dpm_forced_level level
)
1639 if (level
== RADEON_DPM_FORCED_LEVEL_HIGH
) {
1640 ret
= kv_force_dpm_highest(rdev
);
1643 } else if (level
== RADEON_DPM_FORCED_LEVEL_LOW
) {
1644 ret
= kv_force_dpm_lowest(rdev
);
1647 } else if (level
== RADEON_DPM_FORCED_LEVEL_AUTO
) {
1648 ret
= kv_unforce_levels(rdev
);
1653 rdev
->pm
.dpm
.forced_level
= level
;
1658 int kv_dpm_pre_set_power_state(struct radeon_device
*rdev
)
1660 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1661 struct radeon_ps requested_ps
= *rdev
->pm
.dpm
.requested_ps
;
1662 struct radeon_ps
*new_ps
= &requested_ps
;
1664 kv_update_requested_ps(rdev
, new_ps
);
1666 kv_apply_state_adjust_rules(rdev
,
1673 int kv_dpm_set_power_state(struct radeon_device
*rdev
)
1675 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1676 struct radeon_ps
*new_ps
= &pi
->requested_rps
;
1677 struct radeon_ps
*old_ps
= &pi
->current_rps
;
1680 if (pi
->bapm_enable
) {
1681 ret
= kv_smc_bapm_enable(rdev
, rdev
->pm
.dpm
.ac_power
);
1683 DRM_ERROR("kv_smc_bapm_enable failed\n");
1688 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
1689 if (pi
->enable_dpm
) {
1690 kv_set_valid_clock_range(rdev
, new_ps
);
1691 kv_update_dfs_bypass_settings(rdev
, new_ps
);
1692 ret
= kv_calculate_ds_divider(rdev
);
1694 DRM_ERROR("kv_calculate_ds_divider failed\n");
1697 kv_calculate_nbps_level_settings(rdev
);
1698 kv_calculate_dpm_settings(rdev
);
1699 kv_force_lowest_valid(rdev
);
1700 kv_enable_new_levels(rdev
);
1701 kv_upload_dpm_settings(rdev
);
1702 kv_program_nbps_index_settings(rdev
, new_ps
);
1703 kv_unforce_levels(rdev
);
1704 kv_set_enabled_levels(rdev
);
1705 kv_force_lowest_valid(rdev
);
1706 kv_unforce_levels(rdev
);
1708 ret
= kv_update_vce_dpm(rdev
, new_ps
, old_ps
);
1710 DRM_ERROR("kv_update_vce_dpm failed\n");
1713 kv_update_sclk_t(rdev
);
1714 if (rdev
->family
== CHIP_MULLINS
)
1715 kv_enable_nb_dpm(rdev
, true);
1718 if (pi
->enable_dpm
) {
1719 kv_set_valid_clock_range(rdev
, new_ps
);
1720 kv_update_dfs_bypass_settings(rdev
, new_ps
);
1721 ret
= kv_calculate_ds_divider(rdev
);
1723 DRM_ERROR("kv_calculate_ds_divider failed\n");
1726 kv_calculate_nbps_level_settings(rdev
);
1727 kv_calculate_dpm_settings(rdev
);
1728 kv_freeze_sclk_dpm(rdev
, true);
1729 kv_upload_dpm_settings(rdev
);
1730 kv_program_nbps_index_settings(rdev
, new_ps
);
1731 kv_freeze_sclk_dpm(rdev
, false);
1732 kv_set_enabled_levels(rdev
);
1733 ret
= kv_update_vce_dpm(rdev
, new_ps
, old_ps
);
1735 DRM_ERROR("kv_update_vce_dpm failed\n");
1738 kv_update_acp_boot_level(rdev
);
1739 kv_update_sclk_t(rdev
);
1740 kv_enable_nb_dpm(rdev
, true);
1747 void kv_dpm_post_set_power_state(struct radeon_device
*rdev
)
1749 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1750 struct radeon_ps
*new_ps
= &pi
->requested_rps
;
1752 kv_update_current_ps(rdev
, new_ps
);
1755 void kv_dpm_setup_asic(struct radeon_device
*rdev
)
1757 sumo_take_smu_control(rdev
, true);
1758 kv_init_powergate_state(rdev
);
1759 kv_init_sclk_t(rdev
);
1762 //XXX use sumo_dpm_display_configuration_changed
1764 static void kv_construct_max_power_limits_table(struct radeon_device
*rdev
,
1765 struct radeon_clock_and_voltage_limits
*table
)
1767 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1769 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
1770 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
1772 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
1774 kv_convert_2bit_index_to_voltage(rdev
,
1775 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
1778 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
1781 static void kv_patch_voltage_values(struct radeon_device
*rdev
)
1784 struct radeon_uvd_clock_voltage_dependency_table
*uvd_table
=
1785 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1786 struct radeon_vce_clock_voltage_dependency_table
*vce_table
=
1787 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1788 struct radeon_clock_voltage_dependency_table
*samu_table
=
1789 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1790 struct radeon_clock_voltage_dependency_table
*acp_table
=
1791 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1793 if (uvd_table
->count
) {
1794 for (i
= 0; i
< uvd_table
->count
; i
++)
1795 uvd_table
->entries
[i
].v
=
1796 kv_convert_8bit_index_to_voltage(rdev
,
1797 uvd_table
->entries
[i
].v
);
1800 if (vce_table
->count
) {
1801 for (i
= 0; i
< vce_table
->count
; i
++)
1802 vce_table
->entries
[i
].v
=
1803 kv_convert_8bit_index_to_voltage(rdev
,
1804 vce_table
->entries
[i
].v
);
1807 if (samu_table
->count
) {
1808 for (i
= 0; i
< samu_table
->count
; i
++)
1809 samu_table
->entries
[i
].v
=
1810 kv_convert_8bit_index_to_voltage(rdev
,
1811 samu_table
->entries
[i
].v
);
1814 if (acp_table
->count
) {
1815 for (i
= 0; i
< acp_table
->count
; i
++)
1816 acp_table
->entries
[i
].v
=
1817 kv_convert_8bit_index_to_voltage(rdev
,
1818 acp_table
->entries
[i
].v
);
1823 static void kv_construct_boot_state(struct radeon_device
*rdev
)
1825 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1827 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
1828 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
1829 pi
->boot_pl
.ds_divider_index
= 0;
1830 pi
->boot_pl
.ss_divider_index
= 0;
1831 pi
->boot_pl
.allow_gnb_slow
= 1;
1832 pi
->boot_pl
.force_nbp_state
= 0;
1833 pi
->boot_pl
.display_wm
= 0;
1834 pi
->boot_pl
.vce_wm
= 0;
1837 static int kv_force_dpm_highest(struct radeon_device
*rdev
)
1842 ret
= kv_dpm_get_enable_mask(rdev
, &enable_mask
);
1846 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
> 0; i
--) {
1847 if (enable_mask
& (1 << i
))
1851 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
1852 return kv_send_msg_to_smc_with_parameter(rdev
, PPSMC_MSG_DPM_ForceState
, i
);
1854 return kv_set_enabled_level(rdev
, i
);
1857 static int kv_force_dpm_lowest(struct radeon_device
*rdev
)
1862 ret
= kv_dpm_get_enable_mask(rdev
, &enable_mask
);
1866 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
1867 if (enable_mask
& (1 << i
))
1871 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
1872 return kv_send_msg_to_smc_with_parameter(rdev
, PPSMC_MSG_DPM_ForceState
, i
);
1874 return kv_set_enabled_level(rdev
, i
);
1877 static u8
kv_get_sleep_divider_id_from_clock(struct radeon_device
*rdev
,
1878 u32 sclk
, u32 min_sclk_in_sr
)
1880 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1883 u32 min
= (min_sclk_in_sr
> KV_MINIMUM_ENGINE_CLOCK
) ?
1884 min_sclk_in_sr
: KV_MINIMUM_ENGINE_CLOCK
;
1889 if (!pi
->caps_sclk_ds
)
1892 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
> 0; i
--) {
1893 temp
= sclk
/ sumo_get_sleep_divider_from_id(i
);
1901 static int kv_get_high_voltage_limit(struct radeon_device
*rdev
, int *limit
)
1903 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1904 struct radeon_clock_voltage_dependency_table
*table
=
1905 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1908 if (table
&& table
->count
) {
1909 for (i
= table
->count
- 1; i
>= 0; i
--) {
1910 if (pi
->high_voltage_t
&&
1911 (kv_convert_8bit_index_to_voltage(rdev
, table
->entries
[i
].v
) <=
1912 pi
->high_voltage_t
)) {
1918 struct sumo_sclk_voltage_mapping_table
*table
=
1919 &pi
->sys_info
.sclk_voltage_mapping_table
;
1921 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
1922 if (pi
->high_voltage_t
&&
1923 (kv_convert_2bit_index_to_voltage(rdev
, table
->entries
[i
].vid_2bit
) <=
1924 pi
->high_voltage_t
)) {
1935 static void kv_apply_state_adjust_rules(struct radeon_device
*rdev
,
1936 struct radeon_ps
*new_rps
,
1937 struct radeon_ps
*old_rps
)
1939 struct kv_ps
*ps
= kv_get_ps(new_rps
);
1940 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1941 u32 min_sclk
= 10000; /* ??? */
1945 struct radeon_clock_voltage_dependency_table
*table
=
1946 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1947 u32 stable_p_state_sclk
= 0;
1948 struct radeon_clock_and_voltage_limits
*max_limits
=
1949 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
1951 if (new_rps
->vce_active
) {
1952 new_rps
->evclk
= rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].evclk
;
1953 new_rps
->ecclk
= rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].ecclk
;
1959 mclk
= max_limits
->mclk
;
1962 if (pi
->caps_stable_p_state
) {
1963 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
1965 for (i
= table
->count
- 1; i
>= 0; i
--) {
1966 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
1967 stable_p_state_sclk
= table
->entries
[i
].clk
;
1973 stable_p_state_sclk
= table
->entries
[0].clk
;
1975 sclk
= stable_p_state_sclk
;
1978 if (new_rps
->vce_active
) {
1979 if (sclk
< rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].sclk
)
1980 sclk
= rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].sclk
;
1983 ps
->need_dfs_bypass
= true;
1985 for (i
= 0; i
< ps
->num_levels
; i
++) {
1986 if (ps
->levels
[i
].sclk
< sclk
)
1987 ps
->levels
[i
].sclk
= sclk
;
1990 if (table
&& table
->count
) {
1991 for (i
= 0; i
< ps
->num_levels
; i
++) {
1992 if (pi
->high_voltage_t
&&
1993 (pi
->high_voltage_t
<
1994 kv_convert_8bit_index_to_voltage(rdev
, ps
->levels
[i
].vddc_index
))) {
1995 kv_get_high_voltage_limit(rdev
, &limit
);
1996 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2000 struct sumo_sclk_voltage_mapping_table
*table
=
2001 &pi
->sys_info
.sclk_voltage_mapping_table
;
2003 for (i
= 0; i
< ps
->num_levels
; i
++) {
2004 if (pi
->high_voltage_t
&&
2005 (pi
->high_voltage_t
<
2006 kv_convert_8bit_index_to_voltage(rdev
, ps
->levels
[i
].vddc_index
))) {
2007 kv_get_high_voltage_limit(rdev
, &limit
);
2008 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2013 if (pi
->caps_stable_p_state
) {
2014 for (i
= 0; i
< ps
->num_levels
; i
++) {
2015 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2019 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
2020 new_rps
->evclk
|| new_rps
->ecclk
;
2022 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2023 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2024 pi
->battery_state
= true;
2026 pi
->battery_state
= false;
2028 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
2029 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2030 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2031 ps
->dpmx_nb_ps_lo
= 0x1;
2032 ps
->dpmx_nb_ps_hi
= 0x0;
2034 ps
->dpm0_pg_nb_ps_lo
= 0x3;
2035 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2036 ps
->dpmx_nb_ps_lo
= 0x3;
2037 ps
->dpmx_nb_ps_hi
= 0x0;
2039 if (pi
->sys_info
.nb_dpm_enable
) {
2040 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2041 pi
->video_start
|| (rdev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2042 pi
->disable_nb_ps3_in_battery
;
2043 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2044 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2045 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2046 ps
->dpmx_nb_ps_hi
= 0x2;
2051 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device
*rdev
,
2052 u32 index
, bool enable
)
2054 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2056 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2059 static int kv_calculate_ds_divider(struct radeon_device
*rdev
)
2061 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2062 u32 sclk_in_sr
= 10000; /* ??? */
2065 if (pi
->lowest_valid
> pi
->highest_valid
)
2068 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2069 pi
->graphics_level
[i
].DeepSleepDivId
=
2070 kv_get_sleep_divider_id_from_clock(rdev
,
2071 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2077 static int kv_calculate_nbps_level_settings(struct radeon_device
*rdev
)
2079 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2082 struct radeon_clock_and_voltage_limits
*max_limits
=
2083 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2084 u32 mclk
= max_limits
->mclk
;
2086 if (pi
->lowest_valid
> pi
->highest_valid
)
2089 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
2090 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2091 pi
->graphics_level
[i
].GnbSlow
= 1;
2092 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2093 pi
->graphics_level
[i
].UpH
= 0;
2096 if (!pi
->sys_info
.nb_dpm_enable
)
2099 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2100 (rdev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2103 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2104 pi
->graphics_level
[i
].GnbSlow
= 0;
2106 if (pi
->battery_state
)
2107 pi
->graphics_level
[0].ForceNbPs1
= 1;
2109 pi
->graphics_level
[1].GnbSlow
= 0;
2110 pi
->graphics_level
[2].GnbSlow
= 0;
2111 pi
->graphics_level
[3].GnbSlow
= 0;
2112 pi
->graphics_level
[4].GnbSlow
= 0;
2115 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2116 pi
->graphics_level
[i
].GnbSlow
= 1;
2117 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2118 pi
->graphics_level
[i
].UpH
= 0;
2121 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2122 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2123 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2124 if (pi
->lowest_valid
!= pi
->highest_valid
)
2125 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2131 static int kv_calculate_dpm_settings(struct radeon_device
*rdev
)
2133 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2136 if (pi
->lowest_valid
> pi
->highest_valid
)
2139 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2140 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2145 static void kv_init_graphics_levels(struct radeon_device
*rdev
)
2147 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2149 struct radeon_clock_voltage_dependency_table
*table
=
2150 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2152 if (table
&& table
->count
) {
2155 pi
->graphics_dpm_level_count
= 0;
2156 for (i
= 0; i
< table
->count
; i
++) {
2157 if (pi
->high_voltage_t
&&
2158 (pi
->high_voltage_t
<
2159 kv_convert_8bit_index_to_voltage(rdev
, table
->entries
[i
].v
)))
2162 kv_set_divider_value(rdev
, i
, table
->entries
[i
].clk
);
2163 vid_2bit
= kv_convert_vid7_to_vid2(rdev
,
2164 &pi
->sys_info
.vid_mapping_table
,
2165 table
->entries
[i
].v
);
2166 kv_set_vid(rdev
, i
, vid_2bit
);
2167 kv_set_at(rdev
, i
, pi
->at
[i
]);
2168 kv_dpm_power_level_enabled_for_throttle(rdev
, i
, true);
2169 pi
->graphics_dpm_level_count
++;
2172 struct sumo_sclk_voltage_mapping_table
*table
=
2173 &pi
->sys_info
.sclk_voltage_mapping_table
;
2175 pi
->graphics_dpm_level_count
= 0;
2176 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2177 if (pi
->high_voltage_t
&&
2178 pi
->high_voltage_t
<
2179 kv_convert_2bit_index_to_voltage(rdev
, table
->entries
[i
].vid_2bit
))
2182 kv_set_divider_value(rdev
, i
, table
->entries
[i
].sclk_frequency
);
2183 kv_set_vid(rdev
, i
, table
->entries
[i
].vid_2bit
);
2184 kv_set_at(rdev
, i
, pi
->at
[i
]);
2185 kv_dpm_power_level_enabled_for_throttle(rdev
, i
, true);
2186 pi
->graphics_dpm_level_count
++;
2190 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2191 kv_dpm_power_level_enable(rdev
, i
, false);
2194 static void kv_enable_new_levels(struct radeon_device
*rdev
)
2196 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2199 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2200 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2201 kv_dpm_power_level_enable(rdev
, i
, true);
2205 static int kv_set_enabled_level(struct radeon_device
*rdev
, u32 level
)
2207 u32 new_mask
= (1 << level
);
2209 return kv_send_msg_to_smc_with_parameter(rdev
,
2210 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2214 static int kv_set_enabled_levels(struct radeon_device
*rdev
)
2216 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2217 u32 i
, new_mask
= 0;
2219 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2220 new_mask
|= (1 << i
);
2222 return kv_send_msg_to_smc_with_parameter(rdev
,
2223 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2227 static void kv_program_nbps_index_settings(struct radeon_device
*rdev
,
2228 struct radeon_ps
*new_rps
)
2230 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2231 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2234 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
2237 if (pi
->sys_info
.nb_dpm_enable
) {
2238 nbdpmconfig1
= RREG32_SMC(NB_DPM_CONFIG_1
);
2239 nbdpmconfig1
&= ~(Dpm0PgNbPsLo_MASK
| Dpm0PgNbPsHi_MASK
|
2240 DpmXNbPsLo_MASK
| DpmXNbPsHi_MASK
);
2241 nbdpmconfig1
|= (Dpm0PgNbPsLo(new_ps
->dpm0_pg_nb_ps_lo
) |
2242 Dpm0PgNbPsHi(new_ps
->dpm0_pg_nb_ps_hi
) |
2243 DpmXNbPsLo(new_ps
->dpmx_nb_ps_lo
) |
2244 DpmXNbPsHi(new_ps
->dpmx_nb_ps_hi
));
2245 WREG32_SMC(NB_DPM_CONFIG_1
, nbdpmconfig1
);
2249 static int kv_set_thermal_temperature_range(struct radeon_device
*rdev
,
2250 int min_temp
, int max_temp
)
2252 int low_temp
= 0 * 1000;
2253 int high_temp
= 255 * 1000;
2256 if (low_temp
< min_temp
)
2257 low_temp
= min_temp
;
2258 if (high_temp
> max_temp
)
2259 high_temp
= max_temp
;
2260 if (high_temp
< low_temp
) {
2261 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2265 tmp
= RREG32_SMC(CG_THERMAL_INT_CTRL
);
2266 tmp
&= ~(DIG_THERM_INTH_MASK
| DIG_THERM_INTL_MASK
);
2267 tmp
|= (DIG_THERM_INTH(49 + (high_temp
/ 1000)) |
2268 DIG_THERM_INTL(49 + (low_temp
/ 1000)));
2269 WREG32_SMC(CG_THERMAL_INT_CTRL
, tmp
);
2271 rdev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2272 rdev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2278 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2279 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2280 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2281 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2282 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2283 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2286 static int kv_parse_sys_info_table(struct radeon_device
*rdev
)
2288 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2289 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
2290 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2291 union igp_info
*igp_info
;
2296 if (atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2297 &frev
, &crev
, &data_offset
)) {
2298 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2302 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2305 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2306 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2307 pi
->sys_info
.bootup_nb_voltage_index
=
2308 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2309 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2310 pi
->sys_info
.htc_tmp_lmt
= 203;
2312 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2313 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2314 pi
->sys_info
.htc_hyst_lmt
= 5;
2316 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2317 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2318 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2321 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2322 pi
->sys_info
.nb_dpm_enable
= true;
2324 pi
->sys_info
.nb_dpm_enable
= false;
2326 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2327 pi
->sys_info
.nbp_memory_clock
[i
] =
2328 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2329 pi
->sys_info
.nbp_n_clock
[i
] =
2330 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2332 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2333 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2334 pi
->caps_enable_dfs_bypass
= true;
2336 sumo_construct_sclk_voltage_mapping_table(rdev
,
2337 &pi
->sys_info
.sclk_voltage_mapping_table
,
2338 igp_info
->info_8
.sAvail_SCLK
);
2340 sumo_construct_vid_mapping_table(rdev
,
2341 &pi
->sys_info
.vid_mapping_table
,
2342 igp_info
->info_8
.sAvail_SCLK
);
2344 kv_construct_max_power_limits_table(rdev
,
2345 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2351 struct _ATOM_POWERPLAY_INFO info
;
2352 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2353 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2354 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2355 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2356 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2359 union pplib_clock_info
{
2360 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2361 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2362 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2363 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2366 union pplib_power_state
{
2367 struct _ATOM_PPLIB_STATE v1
;
2368 struct _ATOM_PPLIB_STATE_V2 v2
;
2371 static void kv_patch_boot_state(struct radeon_device
*rdev
,
2374 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2377 ps
->levels
[0] = pi
->boot_pl
;
2380 static void kv_parse_pplib_non_clock_info(struct radeon_device
*rdev
,
2381 struct radeon_ps
*rps
,
2382 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2385 struct kv_ps
*ps
= kv_get_ps(rps
);
2387 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2388 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2389 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2391 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2392 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2393 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2399 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2400 rdev
->pm
.dpm
.boot_ps
= rps
;
2401 kv_patch_boot_state(rdev
, ps
);
2403 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2404 rdev
->pm
.dpm
.uvd_ps
= rps
;
2407 static void kv_parse_pplib_clock_info(struct radeon_device
*rdev
,
2408 struct radeon_ps
*rps
, int index
,
2409 union pplib_clock_info
*clock_info
)
2411 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2412 struct kv_ps
*ps
= kv_get_ps(rps
);
2413 struct kv_pl
*pl
= &ps
->levels
[index
];
2416 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2417 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2419 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2421 ps
->num_levels
= index
+ 1;
2423 if (pi
->caps_sclk_ds
) {
2424 pl
->ds_divider_index
= 5;
2425 pl
->ss_divider_index
= 5;
2429 static int kv_parse_power_table(struct radeon_device
*rdev
)
2431 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
2432 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2433 union pplib_power_state
*power_state
;
2434 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2435 union pplib_clock_info
*clock_info
;
2436 struct _StateArray
*state_array
;
2437 struct _ClockInfoArray
*clock_info_array
;
2438 struct _NonClockInfoArray
*non_clock_info_array
;
2439 union power_info
*power_info
;
2440 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2443 u8
*power_state_offset
;
2446 if (!atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2447 &frev
, &crev
, &data_offset
))
2449 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2451 state_array
= (struct _StateArray
*)
2452 (mode_info
->atom_context
->bios
+ data_offset
+
2453 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2454 clock_info_array
= (struct _ClockInfoArray
*)
2455 (mode_info
->atom_context
->bios
+ data_offset
+
2456 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2457 non_clock_info_array
= (struct _NonClockInfoArray
*)
2458 (mode_info
->atom_context
->bios
+ data_offset
+
2459 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2461 rdev
->pm
.dpm
.ps
= kcalloc(state_array
->ucNumEntries
,
2462 sizeof(struct radeon_ps
),
2464 if (!rdev
->pm
.dpm
.ps
)
2466 power_state_offset
= (u8
*)state_array
->states
;
2467 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2469 power_state
= (union pplib_power_state
*)power_state_offset
;
2470 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2471 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2472 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2473 if (!rdev
->pm
.power_state
[i
].clock_info
)
2475 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2477 kfree(rdev
->pm
.dpm
.ps
);
2480 rdev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2482 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2483 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2484 clock_array_index
= idx
[j
];
2485 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2487 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2489 clock_info
= (union pplib_clock_info
*)
2490 ((u8
*)&clock_info_array
->clockInfo
[0] +
2491 (clock_array_index
* clock_info_array
->ucEntrySize
));
2492 kv_parse_pplib_clock_info(rdev
,
2493 &rdev
->pm
.dpm
.ps
[i
], k
,
2497 kv_parse_pplib_non_clock_info(rdev
, &rdev
->pm
.dpm
.ps
[i
],
2499 non_clock_info_array
->ucEntrySize
);
2500 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2502 rdev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2504 /* fill in the vce power states */
2505 for (i
= 0; i
< RADEON_MAX_VCE_LEVELS
; i
++) {
2507 clock_array_index
= rdev
->pm
.dpm
.vce_states
[i
].clk_idx
;
2508 clock_info
= (union pplib_clock_info
*)
2509 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
2510 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2511 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2512 rdev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
2513 rdev
->pm
.dpm
.vce_states
[i
].mclk
= 0;
2519 int kv_dpm_init(struct radeon_device
*rdev
)
2521 struct kv_power_info
*pi
;
2524 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2527 rdev
->pm
.dpm
.priv
= pi
;
2529 ret
= r600_get_platform_caps(rdev
);
2533 ret
= r600_parse_extended_power_table(rdev
);
2537 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2538 pi
->at
[i
] = TRINITY_AT_DFLT
;
2540 pi
->sram_end
= SMC_RAM_END
;
2542 /* Enabling nb dpm on an asrock system prevents dpm from working */
2543 if (rdev
->pdev
->subsystem_vendor
== 0x1849)
2544 pi
->enable_nb_dpm
= false;
2546 pi
->enable_nb_dpm
= true;
2548 pi
->caps_power_containment
= true;
2549 pi
->caps_cac
= true;
2550 pi
->enable_didt
= false;
2551 if (pi
->enable_didt
) {
2552 pi
->caps_sq_ramping
= true;
2553 pi
->caps_db_ramping
= true;
2554 pi
->caps_td_ramping
= true;
2555 pi
->caps_tcp_ramping
= true;
2558 pi
->caps_sclk_ds
= true;
2559 pi
->enable_auto_thermal_throttling
= true;
2560 pi
->disable_nb_ps3_in_battery
= false;
2561 if (radeon_bapm
== -1) {
2562 /* only enable bapm on KB, ML by default */
2563 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
2564 pi
->bapm_enable
= true;
2566 pi
->bapm_enable
= false;
2567 } else if (radeon_bapm
== 0) {
2568 pi
->bapm_enable
= false;
2570 pi
->bapm_enable
= true;
2572 pi
->voltage_drop_t
= 0;
2573 pi
->caps_sclk_throttle_low_notification
= false;
2574 pi
->caps_fps
= false; /* true? */
2575 pi
->caps_uvd_pg
= true;
2576 pi
->caps_uvd_dpm
= true;
2577 pi
->caps_vce_pg
= false; /* XXX true */
2578 pi
->caps_samu_pg
= false;
2579 pi
->caps_acp_pg
= false;
2580 pi
->caps_stable_p_state
= false;
2582 ret
= kv_parse_sys_info_table(rdev
);
2586 kv_patch_voltage_values(rdev
);
2587 kv_construct_boot_state(rdev
);
2589 ret
= kv_parse_power_table(rdev
);
2593 pi
->enable_dpm
= true;
2598 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device
*rdev
,
2601 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2603 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURR_SCLK_INDEX_MASK
) >>
2604 CURR_SCLK_INDEX_SHIFT
;
2608 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2609 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2611 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2612 tmp
= (RREG32_SMC(SMU_VOLTAGE_STATUS
) & SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2613 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT
;
2614 vddc
= kv_convert_8bit_index_to_voltage(rdev
, (u16
)tmp
);
2615 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
2616 seq_printf(m
, "vce %sabled\n", pi
->vce_power_gated
? "dis" : "en");
2617 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2618 current_index
, sclk
, vddc
);
2622 u32
kv_dpm_get_current_sclk(struct radeon_device
*rdev
)
2624 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2626 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURR_SCLK_INDEX_MASK
) >>
2627 CURR_SCLK_INDEX_SHIFT
;
2630 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2633 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2638 u32
kv_dpm_get_current_mclk(struct radeon_device
*rdev
)
2640 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2642 return pi
->sys_info
.bootup_uma_clk
;
2645 void kv_dpm_print_power_state(struct radeon_device
*rdev
,
2646 struct radeon_ps
*rps
)
2649 struct kv_ps
*ps
= kv_get_ps(rps
);
2651 r600_dpm_print_class_info(rps
->class, rps
->class2
);
2652 r600_dpm_print_cap_info(rps
->caps
);
2653 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2654 for (i
= 0; i
< ps
->num_levels
; i
++) {
2655 struct kv_pl
*pl
= &ps
->levels
[i
];
2656 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2658 kv_convert_8bit_index_to_voltage(rdev
, pl
->vddc_index
));
2660 r600_dpm_print_ps_status(rdev
, rps
);
2663 void kv_dpm_fini(struct radeon_device
*rdev
)
2667 for (i
= 0; i
< rdev
->pm
.dpm
.num_ps
; i
++) {
2668 kfree(rdev
->pm
.dpm
.ps
[i
].ps_priv
);
2670 kfree(rdev
->pm
.dpm
.ps
);
2671 kfree(rdev
->pm
.dpm
.priv
);
2672 r600_free_extended_power_table(rdev
);
2675 void kv_dpm_display_configuration_changed(struct radeon_device
*rdev
)
2680 u32
kv_dpm_get_sclk(struct radeon_device
*rdev
, bool low
)
2682 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2683 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2686 return requested_state
->levels
[0].sclk
;
2688 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2691 u32
kv_dpm_get_mclk(struct radeon_device
*rdev
, bool low
)
2693 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2695 return pi
->sys_info
.bootup_uma_clk
;