2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
26 #include <drm/drm_debugfs.h>
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_smu.h"
35 #include <linux/power_supply.h>
36 #include <linux/pci.h>
37 #include <linux/hwmon.h>
38 #include <linux/hwmon-sysfs.h>
39 #include <linux/nospec.h>
40 #include <linux/pm_runtime.h>
44 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
46 static const struct cg_flag_name clocks
[] = {
47 {AMD_CG_SUPPORT_GFX_MGCG
, "Graphics Medium Grain Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_MGLS
, "Graphics Medium Grain memory Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_CGCG
, "Graphics Coarse Grain Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_CGLS
, "Graphics Coarse Grain memory Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_CGTS
, "Graphics Coarse Grain Tree Shader Clock Gating"},
52 {AMD_CG_SUPPORT_GFX_CGTS_LS
, "Graphics Coarse Grain Tree Shader Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_CP_LS
, "Graphics Command Processor Light Sleep"},
54 {AMD_CG_SUPPORT_GFX_RLC_LS
, "Graphics Run List Controller Light Sleep"},
55 {AMD_CG_SUPPORT_GFX_3D_CGCG
, "Graphics 3D Coarse Grain Clock Gating"},
56 {AMD_CG_SUPPORT_GFX_3D_CGLS
, "Graphics 3D Coarse Grain memory Light Sleep"},
57 {AMD_CG_SUPPORT_MC_LS
, "Memory Controller Light Sleep"},
58 {AMD_CG_SUPPORT_MC_MGCG
, "Memory Controller Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_SDMA_LS
, "System Direct Memory Access Light Sleep"},
60 {AMD_CG_SUPPORT_SDMA_MGCG
, "System Direct Memory Access Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_BIF_MGCG
, "Bus Interface Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_BIF_LS
, "Bus Interface Light Sleep"},
63 {AMD_CG_SUPPORT_UVD_MGCG
, "Unified Video Decoder Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_VCE_MGCG
, "Video Compression Engine Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_HDP_LS
, "Host Data Path Light Sleep"},
66 {AMD_CG_SUPPORT_HDP_MGCG
, "Host Data Path Medium Grain Clock Gating"},
67 {AMD_CG_SUPPORT_DRM_MGCG
, "Digital Right Management Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_DRM_LS
, "Digital Right Management Light Sleep"},
69 {AMD_CG_SUPPORT_ROM_MGCG
, "Rom Medium Grain Clock Gating"},
70 {AMD_CG_SUPPORT_DF_MGCG
, "Data Fabric Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_MGCG
, "Address Translation Hub Medium Grain Clock Gating"},
73 {AMD_CG_SUPPORT_ATHUB_LS
, "Address Translation Hub Light Sleep"},
77 static const struct hwmon_temp_label
{
78 enum PP_HWMON_TEMP channel
;
81 {PP_TEMP_EDGE
, "edge"},
82 {PP_TEMP_JUNCTION
, "junction"},
86 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
88 if (adev
->pm
.dpm_enabled
) {
89 mutex_lock(&adev
->pm
.mutex
);
90 if (power_supply_is_system_supplied() > 0)
91 adev
->pm
.ac_power
= true;
93 adev
->pm
.ac_power
= false;
94 if (adev
->powerplay
.pp_funcs
->enable_bapm
)
95 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.ac_power
);
96 mutex_unlock(&adev
->pm
.mutex
);
100 int amdgpu_dpm_read_sensor(struct amdgpu_device
*adev
, enum amd_pp_sensors sensor
,
101 void *data
, uint32_t *size
)
108 if (is_support_sw_smu(adev
))
109 ret
= smu_read_sensor(&adev
->smu
, sensor
, data
, size
);
111 if (adev
->powerplay
.pp_funcs
&& adev
->powerplay
.pp_funcs
->read_sensor
)
112 ret
= adev
->powerplay
.pp_funcs
->read_sensor((adev
)->powerplay
.pp_handle
,
122 * DOC: power_dpm_state
124 * The power_dpm_state file is a legacy interface and is only provided for
125 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
126 * certain power related parameters. The file power_dpm_state is used for this.
127 * It accepts the following arguments:
137 * On older GPUs, the vbios provided a special power state for battery
138 * operation. Selecting battery switched to this state. This is no
139 * longer provided on newer GPUs so the option does nothing in that case.
143 * On older GPUs, the vbios provided a special power state for balanced
144 * operation. Selecting balanced switched to this state. This is no
145 * longer provided on newer GPUs so the option does nothing in that case.
149 * On older GPUs, the vbios provided a special power state for performance
150 * operation. Selecting performance switched to this state. This is no
151 * longer provided on newer GPUs so the option does nothing in that case.
155 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
156 struct device_attribute
*attr
,
159 struct drm_device
*ddev
= dev_get_drvdata(dev
);
160 struct amdgpu_device
*adev
= ddev
->dev_private
;
161 enum amd_pm_state_type pm
;
164 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
167 ret
= pm_runtime_get_sync(ddev
->dev
);
171 if (is_support_sw_smu(adev
)) {
172 if (adev
->smu
.ppt_funcs
->get_current_power_state
)
173 pm
= smu_get_current_power_state(&adev
->smu
);
175 pm
= adev
->pm
.dpm
.user_state
;
176 } else if (adev
->powerplay
.pp_funcs
->get_current_power_state
) {
177 pm
= amdgpu_dpm_get_current_power_state(adev
);
179 pm
= adev
->pm
.dpm
.user_state
;
182 pm_runtime_mark_last_busy(ddev
->dev
);
183 pm_runtime_put_autosuspend(ddev
->dev
);
185 return snprintf(buf
, PAGE_SIZE
, "%s\n",
186 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
187 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
190 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
191 struct device_attribute
*attr
,
195 struct drm_device
*ddev
= dev_get_drvdata(dev
);
196 struct amdgpu_device
*adev
= ddev
->dev_private
;
197 enum amd_pm_state_type state
;
200 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
203 if (strncmp("battery", buf
, strlen("battery")) == 0)
204 state
= POWER_STATE_TYPE_BATTERY
;
205 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
206 state
= POWER_STATE_TYPE_BALANCED
;
207 else if (strncmp("performance", buf
, strlen("performance")) == 0)
208 state
= POWER_STATE_TYPE_PERFORMANCE
;
212 ret
= pm_runtime_get_sync(ddev
->dev
);
216 if (is_support_sw_smu(adev
)) {
217 mutex_lock(&adev
->pm
.mutex
);
218 adev
->pm
.dpm
.user_state
= state
;
219 mutex_unlock(&adev
->pm
.mutex
);
220 } else if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
221 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_ENABLE_USER_STATE
, &state
);
223 mutex_lock(&adev
->pm
.mutex
);
224 adev
->pm
.dpm
.user_state
= state
;
225 mutex_unlock(&adev
->pm
.mutex
);
227 amdgpu_pm_compute_clocks(adev
);
229 pm_runtime_mark_last_busy(ddev
->dev
);
230 pm_runtime_put_autosuspend(ddev
->dev
);
237 * DOC: power_dpm_force_performance_level
239 * The amdgpu driver provides a sysfs API for adjusting certain power
240 * related parameters. The file power_dpm_force_performance_level is
241 * used for this. It accepts the following arguments:
261 * When auto is selected, the driver will attempt to dynamically select
262 * the optimal power profile for current conditions in the driver.
266 * When low is selected, the clocks are forced to the lowest power state.
270 * When high is selected, the clocks are forced to the highest power state.
274 * When manual is selected, the user can manually adjust which power states
275 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
276 * and pp_dpm_pcie files and adjust the power state transition heuristics
277 * via the pp_power_profile_mode sysfs file.
284 * When the profiling modes are selected, clock and power gating are
285 * disabled and the clocks are set for different profiling cases. This
286 * mode is recommended for profiling specific work loads where you do
287 * not want clock or power gating for clock fluctuation to interfere
288 * with your results. profile_standard sets the clocks to a fixed clock
289 * level which varies from asic to asic. profile_min_sclk forces the sclk
290 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
291 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
295 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
296 struct device_attribute
*attr
,
299 struct drm_device
*ddev
= dev_get_drvdata(dev
);
300 struct amdgpu_device
*adev
= ddev
->dev_private
;
301 enum amd_dpm_forced_level level
= 0xff;
304 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
307 ret
= pm_runtime_get_sync(ddev
->dev
);
311 if (is_support_sw_smu(adev
))
312 level
= smu_get_performance_level(&adev
->smu
);
313 else if (adev
->powerplay
.pp_funcs
->get_performance_level
)
314 level
= amdgpu_dpm_get_performance_level(adev
);
316 level
= adev
->pm
.dpm
.forced_level
;
318 pm_runtime_mark_last_busy(ddev
->dev
);
319 pm_runtime_put_autosuspend(ddev
->dev
);
321 return snprintf(buf
, PAGE_SIZE
, "%s\n",
322 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
323 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" :
324 (level
== AMD_DPM_FORCED_LEVEL_HIGH
) ? "high" :
325 (level
== AMD_DPM_FORCED_LEVEL_MANUAL
) ? "manual" :
326 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
) ? "profile_standard" :
327 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) ? "profile_min_sclk" :
328 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) ? "profile_min_mclk" :
329 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) ? "profile_peak" :
333 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
334 struct device_attribute
*attr
,
338 struct drm_device
*ddev
= dev_get_drvdata(dev
);
339 struct amdgpu_device
*adev
= ddev
->dev_private
;
340 enum amd_dpm_forced_level level
;
341 enum amd_dpm_forced_level current_level
= 0xff;
344 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
347 if (strncmp("low", buf
, strlen("low")) == 0) {
348 level
= AMD_DPM_FORCED_LEVEL_LOW
;
349 } else if (strncmp("high", buf
, strlen("high")) == 0) {
350 level
= AMD_DPM_FORCED_LEVEL_HIGH
;
351 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
352 level
= AMD_DPM_FORCED_LEVEL_AUTO
;
353 } else if (strncmp("manual", buf
, strlen("manual")) == 0) {
354 level
= AMD_DPM_FORCED_LEVEL_MANUAL
;
355 } else if (strncmp("profile_exit", buf
, strlen("profile_exit")) == 0) {
356 level
= AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
;
357 } else if (strncmp("profile_standard", buf
, strlen("profile_standard")) == 0) {
358 level
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
;
359 } else if (strncmp("profile_min_sclk", buf
, strlen("profile_min_sclk")) == 0) {
360 level
= AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
;
361 } else if (strncmp("profile_min_mclk", buf
, strlen("profile_min_mclk")) == 0) {
362 level
= AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
;
363 } else if (strncmp("profile_peak", buf
, strlen("profile_peak")) == 0) {
364 level
= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
369 ret
= pm_runtime_get_sync(ddev
->dev
);
373 if (is_support_sw_smu(adev
))
374 current_level
= smu_get_performance_level(&adev
->smu
);
375 else if (adev
->powerplay
.pp_funcs
->get_performance_level
)
376 current_level
= amdgpu_dpm_get_performance_level(adev
);
378 if (current_level
== level
) {
379 pm_runtime_mark_last_busy(ddev
->dev
);
380 pm_runtime_put_autosuspend(ddev
->dev
);
384 /* profile_exit setting is valid only when current mode is in profile mode */
385 if (!(current_level
& (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
386 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
387 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
388 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
)) &&
389 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)) {
390 pr_err("Currently not in any profile mode!\n");
391 pm_runtime_mark_last_busy(ddev
->dev
);
392 pm_runtime_put_autosuspend(ddev
->dev
);
396 if (is_support_sw_smu(adev
)) {
397 ret
= smu_force_performance_level(&adev
->smu
, level
);
399 pm_runtime_mark_last_busy(ddev
->dev
);
400 pm_runtime_put_autosuspend(ddev
->dev
);
403 } else if (adev
->powerplay
.pp_funcs
->force_performance_level
) {
404 mutex_lock(&adev
->pm
.mutex
);
405 if (adev
->pm
.dpm
.thermal_active
) {
406 mutex_unlock(&adev
->pm
.mutex
);
407 pm_runtime_mark_last_busy(ddev
->dev
);
408 pm_runtime_put_autosuspend(ddev
->dev
);
411 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
413 mutex_unlock(&adev
->pm
.mutex
);
414 pm_runtime_mark_last_busy(ddev
->dev
);
415 pm_runtime_put_autosuspend(ddev
->dev
);
418 adev
->pm
.dpm
.forced_level
= level
;
420 mutex_unlock(&adev
->pm
.mutex
);
422 pm_runtime_mark_last_busy(ddev
->dev
);
423 pm_runtime_put_autosuspend(ddev
->dev
);
428 static ssize_t
amdgpu_get_pp_num_states(struct device
*dev
,
429 struct device_attribute
*attr
,
432 struct drm_device
*ddev
= dev_get_drvdata(dev
);
433 struct amdgpu_device
*adev
= ddev
->dev_private
;
434 struct pp_states_info data
;
437 ret
= pm_runtime_get_sync(ddev
->dev
);
441 if (is_support_sw_smu(adev
)) {
442 ret
= smu_get_power_num_states(&adev
->smu
, &data
);
445 } else if (adev
->powerplay
.pp_funcs
->get_pp_num_states
)
446 amdgpu_dpm_get_pp_num_states(adev
, &data
);
448 pm_runtime_mark_last_busy(ddev
->dev
);
449 pm_runtime_put_autosuspend(ddev
->dev
);
451 buf_len
= snprintf(buf
, PAGE_SIZE
, "states: %d\n", data
.nums
);
452 for (i
= 0; i
< data
.nums
; i
++)
453 buf_len
+= snprintf(buf
+ buf_len
, PAGE_SIZE
, "%d %s\n", i
,
454 (data
.states
[i
] == POWER_STATE_TYPE_INTERNAL_BOOT
) ? "boot" :
455 (data
.states
[i
] == POWER_STATE_TYPE_BATTERY
) ? "battery" :
456 (data
.states
[i
] == POWER_STATE_TYPE_BALANCED
) ? "balanced" :
457 (data
.states
[i
] == POWER_STATE_TYPE_PERFORMANCE
) ? "performance" : "default");
462 static ssize_t
amdgpu_get_pp_cur_state(struct device
*dev
,
463 struct device_attribute
*attr
,
466 struct drm_device
*ddev
= dev_get_drvdata(dev
);
467 struct amdgpu_device
*adev
= ddev
->dev_private
;
468 struct pp_states_info data
;
469 struct smu_context
*smu
= &adev
->smu
;
470 enum amd_pm_state_type pm
= 0;
473 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
476 ret
= pm_runtime_get_sync(ddev
->dev
);
480 if (is_support_sw_smu(adev
)) {
481 pm
= smu_get_current_power_state(smu
);
482 ret
= smu_get_power_num_states(smu
, &data
);
485 } else if (adev
->powerplay
.pp_funcs
->get_current_power_state
486 && adev
->powerplay
.pp_funcs
->get_pp_num_states
) {
487 pm
= amdgpu_dpm_get_current_power_state(adev
);
488 amdgpu_dpm_get_pp_num_states(adev
, &data
);
491 pm_runtime_mark_last_busy(ddev
->dev
);
492 pm_runtime_put_autosuspend(ddev
->dev
);
494 for (i
= 0; i
< data
.nums
; i
++) {
495 if (pm
== data
.states
[i
])
502 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
505 static ssize_t
amdgpu_get_pp_force_state(struct device
*dev
,
506 struct device_attribute
*attr
,
509 struct drm_device
*ddev
= dev_get_drvdata(dev
);
510 struct amdgpu_device
*adev
= ddev
->dev_private
;
512 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
515 if (adev
->pp_force_state_enabled
)
516 return amdgpu_get_pp_cur_state(dev
, attr
, buf
);
518 return snprintf(buf
, PAGE_SIZE
, "\n");
521 static ssize_t
amdgpu_set_pp_force_state(struct device
*dev
,
522 struct device_attribute
*attr
,
526 struct drm_device
*ddev
= dev_get_drvdata(dev
);
527 struct amdgpu_device
*adev
= ddev
->dev_private
;
528 enum amd_pm_state_type state
= 0;
532 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
535 if (strlen(buf
) == 1)
536 adev
->pp_force_state_enabled
= false;
537 else if (is_support_sw_smu(adev
))
538 adev
->pp_force_state_enabled
= false;
539 else if (adev
->powerplay
.pp_funcs
->dispatch_tasks
&&
540 adev
->powerplay
.pp_funcs
->get_pp_num_states
) {
541 struct pp_states_info data
;
543 ret
= kstrtoul(buf
, 0, &idx
);
544 if (ret
|| idx
>= ARRAY_SIZE(data
.states
))
547 idx
= array_index_nospec(idx
, ARRAY_SIZE(data
.states
));
549 amdgpu_dpm_get_pp_num_states(adev
, &data
);
550 state
= data
.states
[idx
];
552 ret
= pm_runtime_get_sync(ddev
->dev
);
556 /* only set user selected power states */
557 if (state
!= POWER_STATE_TYPE_INTERNAL_BOOT
&&
558 state
!= POWER_STATE_TYPE_DEFAULT
) {
559 amdgpu_dpm_dispatch_task(adev
,
560 AMD_PP_TASK_ENABLE_USER_STATE
, &state
);
561 adev
->pp_force_state_enabled
= true;
563 pm_runtime_mark_last_busy(ddev
->dev
);
564 pm_runtime_put_autosuspend(ddev
->dev
);
573 * The amdgpu driver provides a sysfs API for uploading new powerplay
574 * tables. The file pp_table is used for this. Reading the file
575 * will dump the current power play table. Writing to the file
576 * will attempt to upload a new powerplay table and re-initialize
577 * powerplay using that new table.
581 static ssize_t
amdgpu_get_pp_table(struct device
*dev
,
582 struct device_attribute
*attr
,
585 struct drm_device
*ddev
= dev_get_drvdata(dev
);
586 struct amdgpu_device
*adev
= ddev
->dev_private
;
590 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
593 ret
= pm_runtime_get_sync(ddev
->dev
);
597 if (is_support_sw_smu(adev
)) {
598 size
= smu_sys_get_pp_table(&adev
->smu
, (void **)&table
);
599 pm_runtime_mark_last_busy(ddev
->dev
);
600 pm_runtime_put_autosuspend(ddev
->dev
);
603 } else if (adev
->powerplay
.pp_funcs
->get_pp_table
) {
604 size
= amdgpu_dpm_get_pp_table(adev
, &table
);
605 pm_runtime_mark_last_busy(ddev
->dev
);
606 pm_runtime_put_autosuspend(ddev
->dev
);
610 pm_runtime_mark_last_busy(ddev
->dev
);
611 pm_runtime_put_autosuspend(ddev
->dev
);
615 if (size
>= PAGE_SIZE
)
616 size
= PAGE_SIZE
- 1;
618 memcpy(buf
, table
, size
);
623 static ssize_t
amdgpu_set_pp_table(struct device
*dev
,
624 struct device_attribute
*attr
,
628 struct drm_device
*ddev
= dev_get_drvdata(dev
);
629 struct amdgpu_device
*adev
= ddev
->dev_private
;
632 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
635 ret
= pm_runtime_get_sync(ddev
->dev
);
639 if (is_support_sw_smu(adev
)) {
640 ret
= smu_sys_set_pp_table(&adev
->smu
, (void *)buf
, count
);
642 pm_runtime_mark_last_busy(ddev
->dev
);
643 pm_runtime_put_autosuspend(ddev
->dev
);
646 } else if (adev
->powerplay
.pp_funcs
->set_pp_table
)
647 amdgpu_dpm_set_pp_table(adev
, buf
, count
);
649 pm_runtime_mark_last_busy(ddev
->dev
);
650 pm_runtime_put_autosuspend(ddev
->dev
);
656 * DOC: pp_od_clk_voltage
658 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
659 * in each power level within a power state. The pp_od_clk_voltage is used for
662 * < For Vega10 and previous ASICs >
664 * Reading the file will display:
666 * - a list of engine clock levels and voltages labeled OD_SCLK
668 * - a list of memory clock levels and voltages labeled OD_MCLK
670 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
672 * To manually adjust these settings, first select manual using
673 * power_dpm_force_performance_level. Enter a new value for each
674 * level by writing a string that contains "s/m level clock voltage" to
675 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
676 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
677 * 810 mV. When you have edited all of the states as needed, write
678 * "c" (commit) to the file to commit your changes. If you want to reset to the
679 * default power levels, write "r" (reset) to the file to reset them.
684 * Reading the file will display:
686 * - minimum and maximum engine clock labeled OD_SCLK
688 * - maximum memory clock labeled OD_MCLK
690 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
691 * They can be used to calibrate the sclk voltage curve.
693 * - a list of valid ranges for sclk, mclk, and voltage curve points
696 * To manually adjust these settings:
698 * - First select manual using power_dpm_force_performance_level
700 * - For clock frequency setting, enter a new value by writing a
701 * string that contains "s/m index clock" to the file. The index
702 * should be 0 if to set minimum clock. And 1 if to set maximum
703 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
704 * "m 1 800" will update maximum mclk to be 800Mhz.
706 * For sclk voltage curve, enter the new values by writing a
707 * string that contains "vc point clock voltage" to the file. The
708 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
709 * update point1 with clock set as 300Mhz and voltage as
710 * 600mV. "vc 2 1000 1000" will update point3 with clock set
711 * as 1000Mhz and voltage 1000mV.
713 * - When you have edited all of the states as needed, write "c" (commit)
714 * to the file to commit your changes
716 * - If you want to reset to the default power levels, write "r" (reset)
717 * to the file to reset them
721 static ssize_t
amdgpu_set_pp_od_clk_voltage(struct device
*dev
,
722 struct device_attribute
*attr
,
726 struct drm_device
*ddev
= dev_get_drvdata(dev
);
727 struct amdgpu_device
*adev
= ddev
->dev_private
;
729 uint32_t parameter_size
= 0;
734 const char delimiter
[3] = {' ', '\n', '\0'};
737 if (amdgpu_sriov_vf(adev
))
744 type
= PP_OD_EDIT_SCLK_VDDC_TABLE
;
745 else if (*buf
== 'm')
746 type
= PP_OD_EDIT_MCLK_VDDC_TABLE
;
748 type
= PP_OD_RESTORE_DEFAULT_TABLE
;
749 else if (*buf
== 'c')
750 type
= PP_OD_COMMIT_DPM_TABLE
;
751 else if (!strncmp(buf
, "vc", 2))
752 type
= PP_OD_EDIT_VDDC_CURVE
;
756 memcpy(buf_cpy
, buf
, count
+1);
760 if (type
== PP_OD_EDIT_VDDC_CURVE
)
762 while (isspace(*++tmp_str
));
765 sub_str
= strsep(&tmp_str
, delimiter
);
766 ret
= kstrtol(sub_str
, 0, ¶meter
[parameter_size
]);
771 while (isspace(*tmp_str
))
775 ret
= pm_runtime_get_sync(ddev
->dev
);
779 if (is_support_sw_smu(adev
)) {
780 ret
= smu_od_edit_dpm_table(&adev
->smu
, type
,
781 parameter
, parameter_size
);
784 pm_runtime_mark_last_busy(ddev
->dev
);
785 pm_runtime_put_autosuspend(ddev
->dev
);
789 if (adev
->powerplay
.pp_funcs
->odn_edit_dpm_table
) {
790 ret
= amdgpu_dpm_odn_edit_dpm_table(adev
, type
,
791 parameter
, parameter_size
);
793 pm_runtime_mark_last_busy(ddev
->dev
);
794 pm_runtime_put_autosuspend(ddev
->dev
);
799 if (type
== PP_OD_COMMIT_DPM_TABLE
) {
800 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
801 amdgpu_dpm_dispatch_task(adev
,
802 AMD_PP_TASK_READJUST_POWER_STATE
,
804 pm_runtime_mark_last_busy(ddev
->dev
);
805 pm_runtime_put_autosuspend(ddev
->dev
);
808 pm_runtime_mark_last_busy(ddev
->dev
);
809 pm_runtime_put_autosuspend(ddev
->dev
);
814 pm_runtime_mark_last_busy(ddev
->dev
);
815 pm_runtime_put_autosuspend(ddev
->dev
);
820 static ssize_t
amdgpu_get_pp_od_clk_voltage(struct device
*dev
,
821 struct device_attribute
*attr
,
824 struct drm_device
*ddev
= dev_get_drvdata(dev
);
825 struct amdgpu_device
*adev
= ddev
->dev_private
;
829 if (amdgpu_sriov_vf(adev
))
832 ret
= pm_runtime_get_sync(ddev
->dev
);
836 if (is_support_sw_smu(adev
)) {
837 size
= smu_print_clk_levels(&adev
->smu
, SMU_OD_SCLK
, buf
);
838 size
+= smu_print_clk_levels(&adev
->smu
, SMU_OD_MCLK
, buf
+size
);
839 size
+= smu_print_clk_levels(&adev
->smu
, SMU_OD_VDDC_CURVE
, buf
+size
);
840 size
+= smu_print_clk_levels(&adev
->smu
, SMU_OD_RANGE
, buf
+size
);
841 } else if (adev
->powerplay
.pp_funcs
->print_clock_levels
) {
842 size
= amdgpu_dpm_print_clock_levels(adev
, OD_SCLK
, buf
);
843 size
+= amdgpu_dpm_print_clock_levels(adev
, OD_MCLK
, buf
+size
);
844 size
+= amdgpu_dpm_print_clock_levels(adev
, OD_VDDC_CURVE
, buf
+size
);
845 size
+= amdgpu_dpm_print_clock_levels(adev
, OD_RANGE
, buf
+size
);
847 size
= snprintf(buf
, PAGE_SIZE
, "\n");
849 pm_runtime_mark_last_busy(ddev
->dev
);
850 pm_runtime_put_autosuspend(ddev
->dev
);
858 * The amdgpu driver provides a sysfs API for adjusting what powerplay
859 * features to be enabled. The file pp_features is used for this. And
860 * this is only available for Vega10 and later dGPUs.
862 * Reading back the file will show you the followings:
863 * - Current ppfeature masks
864 * - List of the all supported powerplay features with their naming,
865 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
867 * To manually enable or disable a specific feature, just set or clear
868 * the corresponding bit from original ppfeature masks and input the
869 * new ppfeature masks.
871 static ssize_t
amdgpu_set_pp_feature_status(struct device
*dev
,
872 struct device_attribute
*attr
,
876 struct drm_device
*ddev
= dev_get_drvdata(dev
);
877 struct amdgpu_device
*adev
= ddev
->dev_private
;
878 uint64_t featuremask
;
881 if (amdgpu_sriov_vf(adev
))
884 ret
= kstrtou64(buf
, 0, &featuremask
);
888 pr_debug("featuremask = 0x%llx\n", featuremask
);
890 ret
= pm_runtime_get_sync(ddev
->dev
);
894 if (is_support_sw_smu(adev
)) {
895 ret
= smu_sys_set_pp_feature_mask(&adev
->smu
, featuremask
);
897 pm_runtime_mark_last_busy(ddev
->dev
);
898 pm_runtime_put_autosuspend(ddev
->dev
);
901 } else if (adev
->powerplay
.pp_funcs
->set_ppfeature_status
) {
902 ret
= amdgpu_dpm_set_ppfeature_status(adev
, featuremask
);
904 pm_runtime_mark_last_busy(ddev
->dev
);
905 pm_runtime_put_autosuspend(ddev
->dev
);
909 pm_runtime_mark_last_busy(ddev
->dev
);
910 pm_runtime_put_autosuspend(ddev
->dev
);
915 static ssize_t
amdgpu_get_pp_feature_status(struct device
*dev
,
916 struct device_attribute
*attr
,
919 struct drm_device
*ddev
= dev_get_drvdata(dev
);
920 struct amdgpu_device
*adev
= ddev
->dev_private
;
924 if (amdgpu_sriov_vf(adev
))
927 ret
= pm_runtime_get_sync(ddev
->dev
);
931 if (is_support_sw_smu(adev
))
932 size
= smu_sys_get_pp_feature_mask(&adev
->smu
, buf
);
933 else if (adev
->powerplay
.pp_funcs
->get_ppfeature_status
)
934 size
= amdgpu_dpm_get_ppfeature_status(adev
, buf
);
936 size
= snprintf(buf
, PAGE_SIZE
, "\n");
938 pm_runtime_mark_last_busy(ddev
->dev
);
939 pm_runtime_put_autosuspend(ddev
->dev
);
945 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
947 * The amdgpu driver provides a sysfs API for adjusting what power levels
948 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
949 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
952 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
953 * Vega10 and later ASICs.
954 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
956 * Reading back the files will show you the available power levels within
957 * the power state and the clock information for those levels.
959 * To manually adjust these states, first select manual using
960 * power_dpm_force_performance_level.
961 * Secondly, enter a new value for each level by inputing a string that
962 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
965 * .. code-block:: bash
967 * echo "4 5 6" > pp_dpm_sclk
969 * will enable sclk levels 4, 5, and 6.
971 * NOTE: change to the dcefclk max dpm level is not supported now
974 static ssize_t
amdgpu_get_pp_dpm_sclk(struct device
*dev
,
975 struct device_attribute
*attr
,
978 struct drm_device
*ddev
= dev_get_drvdata(dev
);
979 struct amdgpu_device
*adev
= ddev
->dev_private
;
983 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
986 ret
= pm_runtime_get_sync(ddev
->dev
);
990 if (is_support_sw_smu(adev
))
991 size
= smu_print_clk_levels(&adev
->smu
, SMU_SCLK
, buf
);
992 else if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
993 size
= amdgpu_dpm_print_clock_levels(adev
, PP_SCLK
, buf
);
995 size
= snprintf(buf
, PAGE_SIZE
, "\n");
997 pm_runtime_mark_last_busy(ddev
->dev
);
998 pm_runtime_put_autosuspend(ddev
->dev
);
1004 * Worst case: 32 bits individually specified, in octal at 12 characters
1005 * per line (+1 for \n).
1007 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1009 static ssize_t
amdgpu_read_mask(const char *buf
, size_t count
, uint32_t *mask
)
1013 char *sub_str
= NULL
;
1015 char buf_cpy
[AMDGPU_MASK_BUF_MAX
+ 1];
1016 const char delimiter
[3] = {' ', '\n', '\0'};
1021 bytes
= min(count
, sizeof(buf_cpy
) - 1);
1022 memcpy(buf_cpy
, buf
, bytes
);
1023 buf_cpy
[bytes
] = '\0';
1026 sub_str
= strsep(&tmp
, delimiter
);
1027 if (strlen(sub_str
)) {
1028 ret
= kstrtol(sub_str
, 0, &level
);
1031 *mask
|= 1 << level
;
1039 static ssize_t
amdgpu_set_pp_dpm_sclk(struct device
*dev
,
1040 struct device_attribute
*attr
,
1044 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1045 struct amdgpu_device
*adev
= ddev
->dev_private
;
1049 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1052 ret
= amdgpu_read_mask(buf
, count
, &mask
);
1056 ret
= pm_runtime_get_sync(ddev
->dev
);
1060 if (is_support_sw_smu(adev
))
1061 ret
= smu_force_clk_levels(&adev
->smu
, SMU_SCLK
, mask
, true);
1062 else if (adev
->powerplay
.pp_funcs
->force_clock_level
)
1063 ret
= amdgpu_dpm_force_clock_level(adev
, PP_SCLK
, mask
);
1065 pm_runtime_mark_last_busy(ddev
->dev
);
1066 pm_runtime_put_autosuspend(ddev
->dev
);
1074 static ssize_t
amdgpu_get_pp_dpm_mclk(struct device
*dev
,
1075 struct device_attribute
*attr
,
1078 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1079 struct amdgpu_device
*adev
= ddev
->dev_private
;
1083 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1086 ret
= pm_runtime_get_sync(ddev
->dev
);
1090 if (is_support_sw_smu(adev
))
1091 size
= smu_print_clk_levels(&adev
->smu
, SMU_MCLK
, buf
);
1092 else if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
1093 size
= amdgpu_dpm_print_clock_levels(adev
, PP_MCLK
, buf
);
1095 size
= snprintf(buf
, PAGE_SIZE
, "\n");
1097 pm_runtime_mark_last_busy(ddev
->dev
);
1098 pm_runtime_put_autosuspend(ddev
->dev
);
1103 static ssize_t
amdgpu_set_pp_dpm_mclk(struct device
*dev
,
1104 struct device_attribute
*attr
,
1108 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1109 struct amdgpu_device
*adev
= ddev
->dev_private
;
1113 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1116 ret
= amdgpu_read_mask(buf
, count
, &mask
);
1120 ret
= pm_runtime_get_sync(ddev
->dev
);
1124 if (is_support_sw_smu(adev
))
1125 ret
= smu_force_clk_levels(&adev
->smu
, SMU_MCLK
, mask
, true);
1126 else if (adev
->powerplay
.pp_funcs
->force_clock_level
)
1127 ret
= amdgpu_dpm_force_clock_level(adev
, PP_MCLK
, mask
);
1129 pm_runtime_mark_last_busy(ddev
->dev
);
1130 pm_runtime_put_autosuspend(ddev
->dev
);
1138 static ssize_t
amdgpu_get_pp_dpm_socclk(struct device
*dev
,
1139 struct device_attribute
*attr
,
1142 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1143 struct amdgpu_device
*adev
= ddev
->dev_private
;
1147 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1150 ret
= pm_runtime_get_sync(ddev
->dev
);
1154 if (is_support_sw_smu(adev
))
1155 size
= smu_print_clk_levels(&adev
->smu
, SMU_SOCCLK
, buf
);
1156 else if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
1157 size
= amdgpu_dpm_print_clock_levels(adev
, PP_SOCCLK
, buf
);
1159 size
= snprintf(buf
, PAGE_SIZE
, "\n");
1161 pm_runtime_mark_last_busy(ddev
->dev
);
1162 pm_runtime_put_autosuspend(ddev
->dev
);
1167 static ssize_t
amdgpu_set_pp_dpm_socclk(struct device
*dev
,
1168 struct device_attribute
*attr
,
1172 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1173 struct amdgpu_device
*adev
= ddev
->dev_private
;
1177 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1180 ret
= amdgpu_read_mask(buf
, count
, &mask
);
1184 ret
= pm_runtime_get_sync(ddev
->dev
);
1188 if (is_support_sw_smu(adev
))
1189 ret
= smu_force_clk_levels(&adev
->smu
, SMU_SOCCLK
, mask
, true);
1190 else if (adev
->powerplay
.pp_funcs
->force_clock_level
)
1191 ret
= amdgpu_dpm_force_clock_level(adev
, PP_SOCCLK
, mask
);
1195 pm_runtime_mark_last_busy(ddev
->dev
);
1196 pm_runtime_put_autosuspend(ddev
->dev
);
1204 static ssize_t
amdgpu_get_pp_dpm_fclk(struct device
*dev
,
1205 struct device_attribute
*attr
,
1208 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1209 struct amdgpu_device
*adev
= ddev
->dev_private
;
1213 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1216 ret
= pm_runtime_get_sync(ddev
->dev
);
1220 if (is_support_sw_smu(adev
))
1221 size
= smu_print_clk_levels(&adev
->smu
, SMU_FCLK
, buf
);
1222 else if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
1223 size
= amdgpu_dpm_print_clock_levels(adev
, PP_FCLK
, buf
);
1225 size
= snprintf(buf
, PAGE_SIZE
, "\n");
1227 pm_runtime_mark_last_busy(ddev
->dev
);
1228 pm_runtime_put_autosuspend(ddev
->dev
);
1233 static ssize_t
amdgpu_set_pp_dpm_fclk(struct device
*dev
,
1234 struct device_attribute
*attr
,
1238 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1239 struct amdgpu_device
*adev
= ddev
->dev_private
;
1243 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1246 ret
= amdgpu_read_mask(buf
, count
, &mask
);
1250 ret
= pm_runtime_get_sync(ddev
->dev
);
1254 if (is_support_sw_smu(adev
))
1255 ret
= smu_force_clk_levels(&adev
->smu
, SMU_FCLK
, mask
, true);
1256 else if (adev
->powerplay
.pp_funcs
->force_clock_level
)
1257 ret
= amdgpu_dpm_force_clock_level(adev
, PP_FCLK
, mask
);
1261 pm_runtime_mark_last_busy(ddev
->dev
);
1262 pm_runtime_put_autosuspend(ddev
->dev
);
1270 static ssize_t
amdgpu_get_pp_dpm_dcefclk(struct device
*dev
,
1271 struct device_attribute
*attr
,
1274 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1275 struct amdgpu_device
*adev
= ddev
->dev_private
;
1279 if (amdgpu_sriov_vf(adev
))
1282 ret
= pm_runtime_get_sync(ddev
->dev
);
1286 if (is_support_sw_smu(adev
))
1287 size
= smu_print_clk_levels(&adev
->smu
, SMU_DCEFCLK
, buf
);
1288 else if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
1289 size
= amdgpu_dpm_print_clock_levels(adev
, PP_DCEFCLK
, buf
);
1291 size
= snprintf(buf
, PAGE_SIZE
, "\n");
1293 pm_runtime_mark_last_busy(ddev
->dev
);
1294 pm_runtime_put_autosuspend(ddev
->dev
);
1299 static ssize_t
amdgpu_set_pp_dpm_dcefclk(struct device
*dev
,
1300 struct device_attribute
*attr
,
1304 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1305 struct amdgpu_device
*adev
= ddev
->dev_private
;
1309 if (amdgpu_sriov_vf(adev
))
1312 ret
= amdgpu_read_mask(buf
, count
, &mask
);
1316 ret
= pm_runtime_get_sync(ddev
->dev
);
1320 if (is_support_sw_smu(adev
))
1321 ret
= smu_force_clk_levels(&adev
->smu
, SMU_DCEFCLK
, mask
, true);
1322 else if (adev
->powerplay
.pp_funcs
->force_clock_level
)
1323 ret
= amdgpu_dpm_force_clock_level(adev
, PP_DCEFCLK
, mask
);
1327 pm_runtime_mark_last_busy(ddev
->dev
);
1328 pm_runtime_put_autosuspend(ddev
->dev
);
1336 static ssize_t
amdgpu_get_pp_dpm_pcie(struct device
*dev
,
1337 struct device_attribute
*attr
,
1340 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1341 struct amdgpu_device
*adev
= ddev
->dev_private
;
1345 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1348 ret
= pm_runtime_get_sync(ddev
->dev
);
1352 if (is_support_sw_smu(adev
))
1353 size
= smu_print_clk_levels(&adev
->smu
, SMU_PCIE
, buf
);
1354 else if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
1355 size
= amdgpu_dpm_print_clock_levels(adev
, PP_PCIE
, buf
);
1357 size
= snprintf(buf
, PAGE_SIZE
, "\n");
1359 pm_runtime_mark_last_busy(ddev
->dev
);
1360 pm_runtime_put_autosuspend(ddev
->dev
);
1365 static ssize_t
amdgpu_set_pp_dpm_pcie(struct device
*dev
,
1366 struct device_attribute
*attr
,
1370 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1371 struct amdgpu_device
*adev
= ddev
->dev_private
;
1375 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1378 ret
= amdgpu_read_mask(buf
, count
, &mask
);
1382 ret
= pm_runtime_get_sync(ddev
->dev
);
1386 if (is_support_sw_smu(adev
))
1387 ret
= smu_force_clk_levels(&adev
->smu
, SMU_PCIE
, mask
, true);
1388 else if (adev
->powerplay
.pp_funcs
->force_clock_level
)
1389 ret
= amdgpu_dpm_force_clock_level(adev
, PP_PCIE
, mask
);
1393 pm_runtime_mark_last_busy(ddev
->dev
);
1394 pm_runtime_put_autosuspend(ddev
->dev
);
1402 static ssize_t
amdgpu_get_pp_sclk_od(struct device
*dev
,
1403 struct device_attribute
*attr
,
1406 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1407 struct amdgpu_device
*adev
= ddev
->dev_private
;
1411 if (amdgpu_sriov_vf(adev
))
1414 ret
= pm_runtime_get_sync(ddev
->dev
);
1418 if (is_support_sw_smu(adev
))
1419 value
= smu_get_od_percentage(&(adev
->smu
), SMU_OD_SCLK
);
1420 else if (adev
->powerplay
.pp_funcs
->get_sclk_od
)
1421 value
= amdgpu_dpm_get_sclk_od(adev
);
1423 pm_runtime_mark_last_busy(ddev
->dev
);
1424 pm_runtime_put_autosuspend(ddev
->dev
);
1426 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
1429 static ssize_t
amdgpu_set_pp_sclk_od(struct device
*dev
,
1430 struct device_attribute
*attr
,
1434 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1435 struct amdgpu_device
*adev
= ddev
->dev_private
;
1439 if (amdgpu_sriov_vf(adev
))
1442 ret
= kstrtol(buf
, 0, &value
);
1447 ret
= pm_runtime_get_sync(ddev
->dev
);
1451 if (is_support_sw_smu(adev
)) {
1452 value
= smu_set_od_percentage(&(adev
->smu
), SMU_OD_SCLK
, (uint32_t)value
);
1454 if (adev
->powerplay
.pp_funcs
->set_sclk_od
)
1455 amdgpu_dpm_set_sclk_od(adev
, (uint32_t)value
);
1457 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
1458 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
);
1460 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
1461 amdgpu_pm_compute_clocks(adev
);
1465 pm_runtime_mark_last_busy(ddev
->dev
);
1466 pm_runtime_put_autosuspend(ddev
->dev
);
1471 static ssize_t
amdgpu_get_pp_mclk_od(struct device
*dev
,
1472 struct device_attribute
*attr
,
1475 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1476 struct amdgpu_device
*adev
= ddev
->dev_private
;
1480 if (amdgpu_sriov_vf(adev
))
1483 ret
= pm_runtime_get_sync(ddev
->dev
);
1487 if (is_support_sw_smu(adev
))
1488 value
= smu_get_od_percentage(&(adev
->smu
), SMU_OD_MCLK
);
1489 else if (adev
->powerplay
.pp_funcs
->get_mclk_od
)
1490 value
= amdgpu_dpm_get_mclk_od(adev
);
1492 pm_runtime_mark_last_busy(ddev
->dev
);
1493 pm_runtime_put_autosuspend(ddev
->dev
);
1495 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
1498 static ssize_t
amdgpu_set_pp_mclk_od(struct device
*dev
,
1499 struct device_attribute
*attr
,
1503 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1504 struct amdgpu_device
*adev
= ddev
->dev_private
;
1508 if (amdgpu_sriov_vf(adev
))
1511 ret
= kstrtol(buf
, 0, &value
);
1516 ret
= pm_runtime_get_sync(ddev
->dev
);
1520 if (is_support_sw_smu(adev
)) {
1521 value
= smu_set_od_percentage(&(adev
->smu
), SMU_OD_MCLK
, (uint32_t)value
);
1523 if (adev
->powerplay
.pp_funcs
->set_mclk_od
)
1524 amdgpu_dpm_set_mclk_od(adev
, (uint32_t)value
);
1526 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
1527 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
);
1529 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
1530 amdgpu_pm_compute_clocks(adev
);
1534 pm_runtime_mark_last_busy(ddev
->dev
);
1535 pm_runtime_put_autosuspend(ddev
->dev
);
1541 * DOC: pp_power_profile_mode
1543 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1544 * related to switching between power levels in a power state. The file
1545 * pp_power_profile_mode is used for this.
1547 * Reading this file outputs a list of all of the predefined power profiles
1548 * and the relevant heuristics settings for that profile.
1550 * To select a profile or create a custom profile, first select manual using
1551 * power_dpm_force_performance_level. Writing the number of a predefined
1552 * profile to pp_power_profile_mode will enable those heuristics. To
1553 * create a custom set of heuristics, write a string of numbers to the file
1554 * starting with the number of the custom profile along with a setting
1555 * for each heuristic parameter. Due to differences across asic families
1556 * the heuristic parameters vary from family to family.
1560 static ssize_t
amdgpu_get_pp_power_profile_mode(struct device
*dev
,
1561 struct device_attribute
*attr
,
1564 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1565 struct amdgpu_device
*adev
= ddev
->dev_private
;
1569 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1572 ret
= pm_runtime_get_sync(ddev
->dev
);
1576 if (is_support_sw_smu(adev
))
1577 size
= smu_get_power_profile_mode(&adev
->smu
, buf
);
1578 else if (adev
->powerplay
.pp_funcs
->get_power_profile_mode
)
1579 size
= amdgpu_dpm_get_power_profile_mode(adev
, buf
);
1581 size
= snprintf(buf
, PAGE_SIZE
, "\n");
1583 pm_runtime_mark_last_busy(ddev
->dev
);
1584 pm_runtime_put_autosuspend(ddev
->dev
);
1590 static ssize_t
amdgpu_set_pp_power_profile_mode(struct device
*dev
,
1591 struct device_attribute
*attr
,
1596 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1597 struct amdgpu_device
*adev
= ddev
->dev_private
;
1598 uint32_t parameter_size
= 0;
1600 char *sub_str
, buf_cpy
[128];
1604 long int profile_mode
= 0;
1605 const char delimiter
[3] = {' ', '\n', '\0'};
1609 ret
= kstrtol(tmp
, 0, &profile_mode
);
1613 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1616 if (profile_mode
== PP_SMC_POWER_PROFILE_CUSTOM
) {
1617 if (count
< 2 || count
> 127)
1619 while (isspace(*++buf
))
1621 memcpy(buf_cpy
, buf
, count
-i
);
1623 while (tmp_str
[0]) {
1624 sub_str
= strsep(&tmp_str
, delimiter
);
1625 ret
= kstrtol(sub_str
, 0, ¶meter
[parameter_size
]);
1629 while (isspace(*tmp_str
))
1633 parameter
[parameter_size
] = profile_mode
;
1635 ret
= pm_runtime_get_sync(ddev
->dev
);
1639 if (is_support_sw_smu(adev
))
1640 ret
= smu_set_power_profile_mode(&adev
->smu
, parameter
, parameter_size
, true);
1641 else if (adev
->powerplay
.pp_funcs
->set_power_profile_mode
)
1642 ret
= amdgpu_dpm_set_power_profile_mode(adev
, parameter
, parameter_size
);
1644 pm_runtime_mark_last_busy(ddev
->dev
);
1645 pm_runtime_put_autosuspend(ddev
->dev
);
1656 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1657 * is as a percentage. The file gpu_busy_percent is used for this.
1658 * The SMU firmware computes a percentage of load based on the
1659 * aggregate activity level in the IP cores.
1661 static ssize_t
amdgpu_get_busy_percent(struct device
*dev
,
1662 struct device_attribute
*attr
,
1665 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1666 struct amdgpu_device
*adev
= ddev
->dev_private
;
1667 int r
, value
, size
= sizeof(value
);
1669 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1672 r
= pm_runtime_get_sync(ddev
->dev
);
1676 /* read the IP busy sensor */
1677 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_LOAD
,
1678 (void *)&value
, &size
);
1680 pm_runtime_mark_last_busy(ddev
->dev
);
1681 pm_runtime_put_autosuspend(ddev
->dev
);
1686 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
1690 * DOC: mem_busy_percent
1692 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1693 * is as a percentage. The file mem_busy_percent is used for this.
1694 * The SMU firmware computes a percentage of load based on the
1695 * aggregate activity level in the IP cores.
1697 static ssize_t
amdgpu_get_memory_busy_percent(struct device
*dev
,
1698 struct device_attribute
*attr
,
1701 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1702 struct amdgpu_device
*adev
= ddev
->dev_private
;
1703 int r
, value
, size
= sizeof(value
);
1705 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1708 r
= pm_runtime_get_sync(ddev
->dev
);
1712 /* read the IP busy sensor */
1713 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_MEM_LOAD
,
1714 (void *)&value
, &size
);
1716 pm_runtime_mark_last_busy(ddev
->dev
);
1717 pm_runtime_put_autosuspend(ddev
->dev
);
1722 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
1728 * The amdgpu driver provides a sysfs API for estimating how much data
1729 * has been received and sent by the GPU in the last second through PCIe.
1730 * The file pcie_bw is used for this.
1731 * The Perf counters count the number of received and sent messages and return
1732 * those values, as well as the maximum payload size of a PCIe packet (mps).
1733 * Note that it is not possible to easily and quickly obtain the size of each
1734 * packet transmitted, so we output the max payload size (mps) to allow for
1735 * quick estimation of the PCIe bandwidth usage
1737 static ssize_t
amdgpu_get_pcie_bw(struct device
*dev
,
1738 struct device_attribute
*attr
,
1741 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1742 struct amdgpu_device
*adev
= ddev
->dev_private
;
1743 uint64_t count0
, count1
;
1746 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1749 ret
= pm_runtime_get_sync(ddev
->dev
);
1753 amdgpu_asic_get_pcie_usage(adev
, &count0
, &count1
);
1755 pm_runtime_mark_last_busy(ddev
->dev
);
1756 pm_runtime_put_autosuspend(ddev
->dev
);
1758 return snprintf(buf
, PAGE_SIZE
, "%llu %llu %i\n",
1759 count0
, count1
, pcie_get_mps(adev
->pdev
));
1765 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1766 * The file unique_id is used for this.
1767 * This will provide a Unique ID that will persist from machine to machine
1769 * NOTE: This will only work for GFX9 and newer. This file will be absent
1770 * on unsupported ASICs (GFX8 and older)
1772 static ssize_t
amdgpu_get_unique_id(struct device
*dev
,
1773 struct device_attribute
*attr
,
1776 struct drm_device
*ddev
= dev_get_drvdata(dev
);
1777 struct amdgpu_device
*adev
= ddev
->dev_private
;
1779 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1782 if (adev
->unique_id
)
1783 return snprintf(buf
, PAGE_SIZE
, "%016llx\n", adev
->unique_id
);
1788 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
1789 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
1790 amdgpu_get_dpm_forced_performance_level
,
1791 amdgpu_set_dpm_forced_performance_level
);
1792 static DEVICE_ATTR(pp_num_states
, S_IRUGO
, amdgpu_get_pp_num_states
, NULL
);
1793 static DEVICE_ATTR(pp_cur_state
, S_IRUGO
, amdgpu_get_pp_cur_state
, NULL
);
1794 static DEVICE_ATTR(pp_force_state
, S_IRUGO
| S_IWUSR
,
1795 amdgpu_get_pp_force_state
,
1796 amdgpu_set_pp_force_state
);
1797 static DEVICE_ATTR(pp_table
, S_IRUGO
| S_IWUSR
,
1798 amdgpu_get_pp_table
,
1799 amdgpu_set_pp_table
);
1800 static DEVICE_ATTR(pp_dpm_sclk
, S_IRUGO
| S_IWUSR
,
1801 amdgpu_get_pp_dpm_sclk
,
1802 amdgpu_set_pp_dpm_sclk
);
1803 static DEVICE_ATTR(pp_dpm_mclk
, S_IRUGO
| S_IWUSR
,
1804 amdgpu_get_pp_dpm_mclk
,
1805 amdgpu_set_pp_dpm_mclk
);
1806 static DEVICE_ATTR(pp_dpm_socclk
, S_IRUGO
| S_IWUSR
,
1807 amdgpu_get_pp_dpm_socclk
,
1808 amdgpu_set_pp_dpm_socclk
);
1809 static DEVICE_ATTR(pp_dpm_fclk
, S_IRUGO
| S_IWUSR
,
1810 amdgpu_get_pp_dpm_fclk
,
1811 amdgpu_set_pp_dpm_fclk
);
1812 static DEVICE_ATTR(pp_dpm_dcefclk
, S_IRUGO
| S_IWUSR
,
1813 amdgpu_get_pp_dpm_dcefclk
,
1814 amdgpu_set_pp_dpm_dcefclk
);
1815 static DEVICE_ATTR(pp_dpm_pcie
, S_IRUGO
| S_IWUSR
,
1816 amdgpu_get_pp_dpm_pcie
,
1817 amdgpu_set_pp_dpm_pcie
);
1818 static DEVICE_ATTR(pp_sclk_od
, S_IRUGO
| S_IWUSR
,
1819 amdgpu_get_pp_sclk_od
,
1820 amdgpu_set_pp_sclk_od
);
1821 static DEVICE_ATTR(pp_mclk_od
, S_IRUGO
| S_IWUSR
,
1822 amdgpu_get_pp_mclk_od
,
1823 amdgpu_set_pp_mclk_od
);
1824 static DEVICE_ATTR(pp_power_profile_mode
, S_IRUGO
| S_IWUSR
,
1825 amdgpu_get_pp_power_profile_mode
,
1826 amdgpu_set_pp_power_profile_mode
);
1827 static DEVICE_ATTR(pp_od_clk_voltage
, S_IRUGO
| S_IWUSR
,
1828 amdgpu_get_pp_od_clk_voltage
,
1829 amdgpu_set_pp_od_clk_voltage
);
1830 static DEVICE_ATTR(gpu_busy_percent
, S_IRUGO
,
1831 amdgpu_get_busy_percent
, NULL
);
1832 static DEVICE_ATTR(mem_busy_percent
, S_IRUGO
,
1833 amdgpu_get_memory_busy_percent
, NULL
);
1834 static DEVICE_ATTR(pcie_bw
, S_IRUGO
, amdgpu_get_pcie_bw
, NULL
);
1835 static DEVICE_ATTR(pp_features
, S_IRUGO
| S_IWUSR
,
1836 amdgpu_get_pp_feature_status
,
1837 amdgpu_set_pp_feature_status
);
1838 static DEVICE_ATTR(unique_id
, S_IRUGO
, amdgpu_get_unique_id
, NULL
);
1840 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
1841 struct device_attribute
*attr
,
1844 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
1845 int channel
= to_sensor_dev_attr(attr
)->index
;
1846 int r
, temp
= 0, size
= sizeof(temp
);
1848 if (channel
>= PP_TEMP_MAX
)
1851 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
1856 case PP_TEMP_JUNCTION
:
1857 /* get current junction temperature */
1858 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_HOTSPOT_TEMP
,
1859 (void *)&temp
, &size
);
1862 /* get current edge temperature */
1863 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_EDGE_TEMP
,
1864 (void *)&temp
, &size
);
1867 /* get current memory temperature */
1868 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_MEM_TEMP
,
1869 (void *)&temp
, &size
);
1876 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
1877 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
1882 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
1885 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
1886 struct device_attribute
*attr
,
1889 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
1890 int hyst
= to_sensor_dev_attr(attr
)->index
;
1894 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
1896 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
1898 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
1901 static ssize_t
amdgpu_hwmon_show_hotspot_temp_thresh(struct device
*dev
,
1902 struct device_attribute
*attr
,
1905 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
1906 int hyst
= to_sensor_dev_attr(attr
)->index
;
1910 temp
= adev
->pm
.dpm
.thermal
.min_hotspot_temp
;
1912 temp
= adev
->pm
.dpm
.thermal
.max_hotspot_crit_temp
;
1914 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
1917 static ssize_t
amdgpu_hwmon_show_mem_temp_thresh(struct device
*dev
,
1918 struct device_attribute
*attr
,
1921 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
1922 int hyst
= to_sensor_dev_attr(attr
)->index
;
1926 temp
= adev
->pm
.dpm
.thermal
.min_mem_temp
;
1928 temp
= adev
->pm
.dpm
.thermal
.max_mem_crit_temp
;
1930 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
1933 static ssize_t
amdgpu_hwmon_show_temp_label(struct device
*dev
,
1934 struct device_attribute
*attr
,
1937 int channel
= to_sensor_dev_attr(attr
)->index
;
1939 if (channel
>= PP_TEMP_MAX
)
1942 return snprintf(buf
, PAGE_SIZE
, "%s\n", temp_label
[channel
].label
);
1945 static ssize_t
amdgpu_hwmon_show_temp_emergency(struct device
*dev
,
1946 struct device_attribute
*attr
,
1949 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
1950 int channel
= to_sensor_dev_attr(attr
)->index
;
1953 if (channel
>= PP_TEMP_MAX
)
1957 case PP_TEMP_JUNCTION
:
1958 temp
= adev
->pm
.dpm
.thermal
.max_hotspot_emergency_temp
;
1961 temp
= adev
->pm
.dpm
.thermal
.max_edge_emergency_temp
;
1964 temp
= adev
->pm
.dpm
.thermal
.max_mem_emergency_temp
;
1968 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
1971 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
1972 struct device_attribute
*attr
,
1975 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
1979 ret
= pm_runtime_get_sync(adev
->ddev
->dev
);
1983 if (is_support_sw_smu(adev
)) {
1984 pwm_mode
= smu_get_fan_control_mode(&adev
->smu
);
1986 if (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
) {
1987 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
1988 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
1992 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
1995 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
1996 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
1998 return sprintf(buf
, "%i\n", pwm_mode
);
2001 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
2002 struct device_attribute
*attr
,
2006 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2010 err
= kstrtoint(buf
, 10, &value
);
2014 ret
= pm_runtime_get_sync(adev
->ddev
->dev
);
2018 if (is_support_sw_smu(adev
)) {
2019 smu_set_fan_control_mode(&adev
->smu
, value
);
2021 if (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
) {
2022 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2023 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2027 amdgpu_dpm_set_fan_control_mode(adev
, value
);
2030 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2031 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2036 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
2037 struct device_attribute
*attr
,
2040 return sprintf(buf
, "%i\n", 0);
2043 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
2044 struct device_attribute
*attr
,
2047 return sprintf(buf
, "%i\n", 255);
2050 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
2051 struct device_attribute
*attr
,
2052 const char *buf
, size_t count
)
2054 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2059 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2063 if (is_support_sw_smu(adev
))
2064 pwm_mode
= smu_get_fan_control_mode(&adev
->smu
);
2066 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
2068 if (pwm_mode
!= AMD_FAN_CTRL_MANUAL
) {
2069 pr_info("manual fan speed control should be enabled first\n");
2070 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2071 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2075 err
= kstrtou32(buf
, 10, &value
);
2077 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2078 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2082 value
= (value
* 100) / 255;
2084 if (is_support_sw_smu(adev
))
2085 err
= smu_set_fan_speed_percent(&adev
->smu
, value
);
2086 else if (adev
->powerplay
.pp_funcs
->set_fan_speed_percent
)
2087 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
2091 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2092 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2100 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
2101 struct device_attribute
*attr
,
2104 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2108 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2112 if (is_support_sw_smu(adev
))
2113 err
= smu_get_fan_speed_percent(&adev
->smu
, &speed
);
2114 else if (adev
->powerplay
.pp_funcs
->get_fan_speed_percent
)
2115 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
2119 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2120 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2125 speed
= (speed
* 255) / 100;
2127 return sprintf(buf
, "%i\n", speed
);
2130 static ssize_t
amdgpu_hwmon_get_fan1_input(struct device
*dev
,
2131 struct device_attribute
*attr
,
2134 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2138 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2142 if (is_support_sw_smu(adev
))
2143 err
= smu_get_fan_speed_rpm(&adev
->smu
, &speed
);
2144 else if (adev
->powerplay
.pp_funcs
->get_fan_speed_rpm
)
2145 err
= amdgpu_dpm_get_fan_speed_rpm(adev
, &speed
);
2149 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2150 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2155 return sprintf(buf
, "%i\n", speed
);
2158 static ssize_t
amdgpu_hwmon_get_fan1_min(struct device
*dev
,
2159 struct device_attribute
*attr
,
2162 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2164 u32 size
= sizeof(min_rpm
);
2167 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2171 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_MIN_FAN_RPM
,
2172 (void *)&min_rpm
, &size
);
2174 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2175 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2180 return snprintf(buf
, PAGE_SIZE
, "%d\n", min_rpm
);
2183 static ssize_t
amdgpu_hwmon_get_fan1_max(struct device
*dev
,
2184 struct device_attribute
*attr
,
2187 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2189 u32 size
= sizeof(max_rpm
);
2192 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2196 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_MAX_FAN_RPM
,
2197 (void *)&max_rpm
, &size
);
2199 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2200 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2205 return snprintf(buf
, PAGE_SIZE
, "%d\n", max_rpm
);
2208 static ssize_t
amdgpu_hwmon_get_fan1_target(struct device
*dev
,
2209 struct device_attribute
*attr
,
2212 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2216 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2220 if (is_support_sw_smu(adev
))
2221 err
= smu_get_fan_speed_rpm(&adev
->smu
, &rpm
);
2222 else if (adev
->powerplay
.pp_funcs
->get_fan_speed_rpm
)
2223 err
= amdgpu_dpm_get_fan_speed_rpm(adev
, &rpm
);
2227 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2228 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2233 return sprintf(buf
, "%i\n", rpm
);
2236 static ssize_t
amdgpu_hwmon_set_fan1_target(struct device
*dev
,
2237 struct device_attribute
*attr
,
2238 const char *buf
, size_t count
)
2240 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2245 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2249 if (is_support_sw_smu(adev
))
2250 pwm_mode
= smu_get_fan_control_mode(&adev
->smu
);
2252 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
2254 if (pwm_mode
!= AMD_FAN_CTRL_MANUAL
) {
2255 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2256 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2260 err
= kstrtou32(buf
, 10, &value
);
2262 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2263 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2267 if (is_support_sw_smu(adev
))
2268 err
= smu_set_fan_speed_rpm(&adev
->smu
, value
);
2269 else if (adev
->powerplay
.pp_funcs
->set_fan_speed_rpm
)
2270 err
= amdgpu_dpm_set_fan_speed_rpm(adev
, value
);
2274 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2275 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2283 static ssize_t
amdgpu_hwmon_get_fan1_enable(struct device
*dev
,
2284 struct device_attribute
*attr
,
2287 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2291 ret
= pm_runtime_get_sync(adev
->ddev
->dev
);
2295 if (is_support_sw_smu(adev
)) {
2296 pwm_mode
= smu_get_fan_control_mode(&adev
->smu
);
2298 if (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
) {
2299 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2300 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2304 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
2307 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2308 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2310 return sprintf(buf
, "%i\n", pwm_mode
== AMD_FAN_CTRL_AUTO
? 0 : 1);
2313 static ssize_t
amdgpu_hwmon_set_fan1_enable(struct device
*dev
,
2314 struct device_attribute
*attr
,
2318 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2323 err
= kstrtoint(buf
, 10, &value
);
2328 pwm_mode
= AMD_FAN_CTRL_AUTO
;
2329 else if (value
== 1)
2330 pwm_mode
= AMD_FAN_CTRL_MANUAL
;
2334 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2338 if (is_support_sw_smu(adev
)) {
2339 smu_set_fan_control_mode(&adev
->smu
, pwm_mode
);
2341 if (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
) {
2342 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2343 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2346 amdgpu_dpm_set_fan_control_mode(adev
, pwm_mode
);
2349 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2350 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2355 static ssize_t
amdgpu_hwmon_show_vddgfx(struct device
*dev
,
2356 struct device_attribute
*attr
,
2359 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2361 int r
, size
= sizeof(vddgfx
);
2363 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2367 /* get the voltage */
2368 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDGFX
,
2369 (void *)&vddgfx
, &size
);
2371 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2372 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2377 return snprintf(buf
, PAGE_SIZE
, "%d\n", vddgfx
);
2380 static ssize_t
amdgpu_hwmon_show_vddgfx_label(struct device
*dev
,
2381 struct device_attribute
*attr
,
2384 return snprintf(buf
, PAGE_SIZE
, "vddgfx\n");
2387 static ssize_t
amdgpu_hwmon_show_vddnb(struct device
*dev
,
2388 struct device_attribute
*attr
,
2391 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2393 int r
, size
= sizeof(vddnb
);
2395 /* only APUs have vddnb */
2396 if (!(adev
->flags
& AMD_IS_APU
))
2399 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2403 /* get the voltage */
2404 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDNB
,
2405 (void *)&vddnb
, &size
);
2407 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2408 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2413 return snprintf(buf
, PAGE_SIZE
, "%d\n", vddnb
);
2416 static ssize_t
amdgpu_hwmon_show_vddnb_label(struct device
*dev
,
2417 struct device_attribute
*attr
,
2420 return snprintf(buf
, PAGE_SIZE
, "vddnb\n");
2423 static ssize_t
amdgpu_hwmon_show_power_avg(struct device
*dev
,
2424 struct device_attribute
*attr
,
2427 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2429 int r
, size
= sizeof(u32
);
2432 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2436 /* get the voltage */
2437 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_POWER
,
2438 (void *)&query
, &size
);
2440 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2441 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2446 /* convert to microwatts */
2447 uw
= (query
>> 8) * 1000000 + (query
& 0xff) * 1000;
2449 return snprintf(buf
, PAGE_SIZE
, "%u\n", uw
);
2452 static ssize_t
amdgpu_hwmon_show_power_cap_min(struct device
*dev
,
2453 struct device_attribute
*attr
,
2456 return sprintf(buf
, "%i\n", 0);
2459 static ssize_t
amdgpu_hwmon_show_power_cap_max(struct device
*dev
,
2460 struct device_attribute
*attr
,
2463 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2468 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2472 if (is_support_sw_smu(adev
)) {
2473 smu_get_power_limit(&adev
->smu
, &limit
, true, true);
2474 size
= snprintf(buf
, PAGE_SIZE
, "%u\n", limit
* 1000000);
2475 } else if (adev
->powerplay
.pp_funcs
&& adev
->powerplay
.pp_funcs
->get_power_limit
) {
2476 adev
->powerplay
.pp_funcs
->get_power_limit(adev
->powerplay
.pp_handle
, &limit
, true);
2477 size
= snprintf(buf
, PAGE_SIZE
, "%u\n", limit
* 1000000);
2479 size
= snprintf(buf
, PAGE_SIZE
, "\n");
2482 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2483 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2488 static ssize_t
amdgpu_hwmon_show_power_cap(struct device
*dev
,
2489 struct device_attribute
*attr
,
2492 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2497 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2501 if (is_support_sw_smu(adev
)) {
2502 smu_get_power_limit(&adev
->smu
, &limit
, false, true);
2503 size
= snprintf(buf
, PAGE_SIZE
, "%u\n", limit
* 1000000);
2504 } else if (adev
->powerplay
.pp_funcs
&& adev
->powerplay
.pp_funcs
->get_power_limit
) {
2505 adev
->powerplay
.pp_funcs
->get_power_limit(adev
->powerplay
.pp_handle
, &limit
, false);
2506 size
= snprintf(buf
, PAGE_SIZE
, "%u\n", limit
* 1000000);
2508 size
= snprintf(buf
, PAGE_SIZE
, "\n");
2511 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2512 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2518 static ssize_t
amdgpu_hwmon_set_power_cap(struct device
*dev
,
2519 struct device_attribute
*attr
,
2523 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2527 if (amdgpu_sriov_vf(adev
))
2530 err
= kstrtou32(buf
, 10, &value
);
2534 value
= value
/ 1000000; /* convert to Watt */
2537 err
= pm_runtime_get_sync(adev
->ddev
->dev
);
2541 if (is_support_sw_smu(adev
))
2542 err
= smu_set_power_limit(&adev
->smu
, value
);
2543 else if (adev
->powerplay
.pp_funcs
&& adev
->powerplay
.pp_funcs
->set_power_limit
)
2544 err
= adev
->powerplay
.pp_funcs
->set_power_limit(adev
->powerplay
.pp_handle
, value
);
2548 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2549 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2557 static ssize_t
amdgpu_hwmon_show_sclk(struct device
*dev
,
2558 struct device_attribute
*attr
,
2561 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2563 int r
, size
= sizeof(sclk
);
2565 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2570 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_SCLK
,
2571 (void *)&sclk
, &size
);
2573 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2574 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2579 return snprintf(buf
, PAGE_SIZE
, "%d\n", sclk
* 10 * 1000);
2582 static ssize_t
amdgpu_hwmon_show_sclk_label(struct device
*dev
,
2583 struct device_attribute
*attr
,
2586 return snprintf(buf
, PAGE_SIZE
, "sclk\n");
2589 static ssize_t
amdgpu_hwmon_show_mclk(struct device
*dev
,
2590 struct device_attribute
*attr
,
2593 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2595 int r
, size
= sizeof(mclk
);
2597 r
= pm_runtime_get_sync(adev
->ddev
->dev
);
2602 r
= amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_MCLK
,
2603 (void *)&mclk
, &size
);
2605 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
2606 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
2611 return snprintf(buf
, PAGE_SIZE
, "%d\n", mclk
* 10 * 1000);
2614 static ssize_t
amdgpu_hwmon_show_mclk_label(struct device
*dev
,
2615 struct device_attribute
*attr
,
2618 return snprintf(buf
, PAGE_SIZE
, "mclk\n");
2624 * The amdgpu driver exposes the following sensor interfaces:
2626 * - GPU temperature (via the on-die sensor)
2630 * - Northbridge voltage (APUs only)
2636 * - GPU gfx/compute engine clock
2638 * - GPU memory clock (dGPU only)
2640 * hwmon interfaces for GPU temperature:
2642 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
2643 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
2645 * - temp[1-3]_label: temperature channel label
2646 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
2648 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
2649 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
2651 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
2652 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
2654 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
2655 * - these are supported on SOC15 dGPUs only
2657 * hwmon interfaces for GPU voltage:
2659 * - in0_input: the voltage on the GPU in millivolts
2661 * - in1_input: the voltage on the Northbridge in millivolts
2663 * hwmon interfaces for GPU power:
2665 * - power1_average: average power used by the GPU in microWatts
2667 * - power1_cap_min: minimum cap supported in microWatts
2669 * - power1_cap_max: maximum cap supported in microWatts
2671 * - power1_cap: selected power cap in microWatts
2673 * hwmon interfaces for GPU fan:
2675 * - pwm1: pulse width modulation fan level (0-255)
2677 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
2679 * - pwm1_min: pulse width modulation fan control minimum level (0)
2681 * - pwm1_max: pulse width modulation fan control maximum level (255)
2683 * - fan1_min: an minimum value Unit: revolution/min (RPM)
2685 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
2687 * - fan1_input: fan speed in RPM
2689 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
2691 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
2693 * hwmon interfaces for GPU clocks:
2695 * - freq1_input: the gfx/compute clock in hertz
2697 * - freq2_input: the memory clock in hertz
2699 * You can use hwmon tools like sensors to view this information on your system.
2703 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, PP_TEMP_EDGE
);
2704 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
2705 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
2706 static SENSOR_DEVICE_ATTR(temp1_emergency
, S_IRUGO
, amdgpu_hwmon_show_temp_emergency
, NULL
, PP_TEMP_EDGE
);
2707 static SENSOR_DEVICE_ATTR(temp2_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, PP_TEMP_JUNCTION
);
2708 static SENSOR_DEVICE_ATTR(temp2_crit
, S_IRUGO
, amdgpu_hwmon_show_hotspot_temp_thresh
, NULL
, 0);
2709 static SENSOR_DEVICE_ATTR(temp2_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_hotspot_temp_thresh
, NULL
, 1);
2710 static SENSOR_DEVICE_ATTR(temp2_emergency
, S_IRUGO
, amdgpu_hwmon_show_temp_emergency
, NULL
, PP_TEMP_JUNCTION
);
2711 static SENSOR_DEVICE_ATTR(temp3_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, PP_TEMP_MEM
);
2712 static SENSOR_DEVICE_ATTR(temp3_crit
, S_IRUGO
, amdgpu_hwmon_show_mem_temp_thresh
, NULL
, 0);
2713 static SENSOR_DEVICE_ATTR(temp3_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_mem_temp_thresh
, NULL
, 1);
2714 static SENSOR_DEVICE_ATTR(temp3_emergency
, S_IRUGO
, amdgpu_hwmon_show_temp_emergency
, NULL
, PP_TEMP_MEM
);
2715 static SENSOR_DEVICE_ATTR(temp1_label
, S_IRUGO
, amdgpu_hwmon_show_temp_label
, NULL
, PP_TEMP_EDGE
);
2716 static SENSOR_DEVICE_ATTR(temp2_label
, S_IRUGO
, amdgpu_hwmon_show_temp_label
, NULL
, PP_TEMP_JUNCTION
);
2717 static SENSOR_DEVICE_ATTR(temp3_label
, S_IRUGO
, amdgpu_hwmon_show_temp_label
, NULL
, PP_TEMP_MEM
);
2718 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
2719 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
2720 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
2721 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
2722 static SENSOR_DEVICE_ATTR(fan1_input
, S_IRUGO
, amdgpu_hwmon_get_fan1_input
, NULL
, 0);
2723 static SENSOR_DEVICE_ATTR(fan1_min
, S_IRUGO
, amdgpu_hwmon_get_fan1_min
, NULL
, 0);
2724 static SENSOR_DEVICE_ATTR(fan1_max
, S_IRUGO
, amdgpu_hwmon_get_fan1_max
, NULL
, 0);
2725 static SENSOR_DEVICE_ATTR(fan1_target
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_fan1_target
, amdgpu_hwmon_set_fan1_target
, 0);
2726 static SENSOR_DEVICE_ATTR(fan1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_fan1_enable
, amdgpu_hwmon_set_fan1_enable
, 0);
2727 static SENSOR_DEVICE_ATTR(in0_input
, S_IRUGO
, amdgpu_hwmon_show_vddgfx
, NULL
, 0);
2728 static SENSOR_DEVICE_ATTR(in0_label
, S_IRUGO
, amdgpu_hwmon_show_vddgfx_label
, NULL
, 0);
2729 static SENSOR_DEVICE_ATTR(in1_input
, S_IRUGO
, amdgpu_hwmon_show_vddnb
, NULL
, 0);
2730 static SENSOR_DEVICE_ATTR(in1_label
, S_IRUGO
, amdgpu_hwmon_show_vddnb_label
, NULL
, 0);
2731 static SENSOR_DEVICE_ATTR(power1_average
, S_IRUGO
, amdgpu_hwmon_show_power_avg
, NULL
, 0);
2732 static SENSOR_DEVICE_ATTR(power1_cap_max
, S_IRUGO
, amdgpu_hwmon_show_power_cap_max
, NULL
, 0);
2733 static SENSOR_DEVICE_ATTR(power1_cap_min
, S_IRUGO
, amdgpu_hwmon_show_power_cap_min
, NULL
, 0);
2734 static SENSOR_DEVICE_ATTR(power1_cap
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_show_power_cap
, amdgpu_hwmon_set_power_cap
, 0);
2735 static SENSOR_DEVICE_ATTR(freq1_input
, S_IRUGO
, amdgpu_hwmon_show_sclk
, NULL
, 0);
2736 static SENSOR_DEVICE_ATTR(freq1_label
, S_IRUGO
, amdgpu_hwmon_show_sclk_label
, NULL
, 0);
2737 static SENSOR_DEVICE_ATTR(freq2_input
, S_IRUGO
, amdgpu_hwmon_show_mclk
, NULL
, 0);
2738 static SENSOR_DEVICE_ATTR(freq2_label
, S_IRUGO
, amdgpu_hwmon_show_mclk_label
, NULL
, 0);
2740 static struct attribute
*hwmon_attributes
[] = {
2741 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
2742 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
2743 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
2744 &sensor_dev_attr_temp2_input
.dev_attr
.attr
,
2745 &sensor_dev_attr_temp2_crit
.dev_attr
.attr
,
2746 &sensor_dev_attr_temp2_crit_hyst
.dev_attr
.attr
,
2747 &sensor_dev_attr_temp3_input
.dev_attr
.attr
,
2748 &sensor_dev_attr_temp3_crit
.dev_attr
.attr
,
2749 &sensor_dev_attr_temp3_crit_hyst
.dev_attr
.attr
,
2750 &sensor_dev_attr_temp1_emergency
.dev_attr
.attr
,
2751 &sensor_dev_attr_temp2_emergency
.dev_attr
.attr
,
2752 &sensor_dev_attr_temp3_emergency
.dev_attr
.attr
,
2753 &sensor_dev_attr_temp1_label
.dev_attr
.attr
,
2754 &sensor_dev_attr_temp2_label
.dev_attr
.attr
,
2755 &sensor_dev_attr_temp3_label
.dev_attr
.attr
,
2756 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
2757 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
2758 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
2759 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
2760 &sensor_dev_attr_fan1_input
.dev_attr
.attr
,
2761 &sensor_dev_attr_fan1_min
.dev_attr
.attr
,
2762 &sensor_dev_attr_fan1_max
.dev_attr
.attr
,
2763 &sensor_dev_attr_fan1_target
.dev_attr
.attr
,
2764 &sensor_dev_attr_fan1_enable
.dev_attr
.attr
,
2765 &sensor_dev_attr_in0_input
.dev_attr
.attr
,
2766 &sensor_dev_attr_in0_label
.dev_attr
.attr
,
2767 &sensor_dev_attr_in1_input
.dev_attr
.attr
,
2768 &sensor_dev_attr_in1_label
.dev_attr
.attr
,
2769 &sensor_dev_attr_power1_average
.dev_attr
.attr
,
2770 &sensor_dev_attr_power1_cap_max
.dev_attr
.attr
,
2771 &sensor_dev_attr_power1_cap_min
.dev_attr
.attr
,
2772 &sensor_dev_attr_power1_cap
.dev_attr
.attr
,
2773 &sensor_dev_attr_freq1_input
.dev_attr
.attr
,
2774 &sensor_dev_attr_freq1_label
.dev_attr
.attr
,
2775 &sensor_dev_attr_freq2_input
.dev_attr
.attr
,
2776 &sensor_dev_attr_freq2_label
.dev_attr
.attr
,
2780 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
2781 struct attribute
*attr
, int index
)
2783 struct device
*dev
= kobj_to_dev(kobj
);
2784 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
2785 umode_t effective_mode
= attr
->mode
;
2787 /* under multi-vf mode, the hwmon attributes are all not supported */
2788 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
2791 /* there is no fan under pp one vf mode */
2792 if (amdgpu_sriov_is_pp_one_vf(adev
) &&
2793 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
2794 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
2795 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
2796 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
||
2797 attr
== &sensor_dev_attr_fan1_input
.dev_attr
.attr
||
2798 attr
== &sensor_dev_attr_fan1_min
.dev_attr
.attr
||
2799 attr
== &sensor_dev_attr_fan1_max
.dev_attr
.attr
||
2800 attr
== &sensor_dev_attr_fan1_target
.dev_attr
.attr
||
2801 attr
== &sensor_dev_attr_fan1_enable
.dev_attr
.attr
))
2804 /* Skip fan attributes if fan is not present */
2805 if (adev
->pm
.no_fan
&& (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
2806 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
2807 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
2808 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
||
2809 attr
== &sensor_dev_attr_fan1_input
.dev_attr
.attr
||
2810 attr
== &sensor_dev_attr_fan1_min
.dev_attr
.attr
||
2811 attr
== &sensor_dev_attr_fan1_max
.dev_attr
.attr
||
2812 attr
== &sensor_dev_attr_fan1_target
.dev_attr
.attr
||
2813 attr
== &sensor_dev_attr_fan1_enable
.dev_attr
.attr
))
2816 /* Skip fan attributes on APU */
2817 if ((adev
->flags
& AMD_IS_APU
) &&
2818 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
2819 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
2820 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
2821 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
||
2822 attr
== &sensor_dev_attr_fan1_input
.dev_attr
.attr
||
2823 attr
== &sensor_dev_attr_fan1_min
.dev_attr
.attr
||
2824 attr
== &sensor_dev_attr_fan1_max
.dev_attr
.attr
||
2825 attr
== &sensor_dev_attr_fan1_target
.dev_attr
.attr
||
2826 attr
== &sensor_dev_attr_fan1_enable
.dev_attr
.attr
))
2829 /* Skip limit attributes if DPM is not enabled */
2830 if (!adev
->pm
.dpm_enabled
&&
2831 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
2832 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
2833 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
2834 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
2835 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
2836 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
||
2837 attr
== &sensor_dev_attr_fan1_input
.dev_attr
.attr
||
2838 attr
== &sensor_dev_attr_fan1_min
.dev_attr
.attr
||
2839 attr
== &sensor_dev_attr_fan1_max
.dev_attr
.attr
||
2840 attr
== &sensor_dev_attr_fan1_target
.dev_attr
.attr
||
2841 attr
== &sensor_dev_attr_fan1_enable
.dev_attr
.attr
))
2844 if (!is_support_sw_smu(adev
)) {
2845 /* mask fan attributes if we have no bindings for this asic to expose */
2846 if ((!adev
->powerplay
.pp_funcs
->get_fan_speed_percent
&&
2847 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
2848 (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
&&
2849 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
2850 effective_mode
&= ~S_IRUGO
;
2852 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_percent
&&
2853 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
2854 (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
&&
2855 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
2856 effective_mode
&= ~S_IWUSR
;
2859 if (((adev
->flags
& AMD_IS_APU
) ||
2860 adev
->family
== AMDGPU_FAMILY_SI
|| /* not implemented yet */
2861 adev
->family
== AMDGPU_FAMILY_KV
) && /* not implemented yet */
2862 (attr
== &sensor_dev_attr_power1_average
.dev_attr
.attr
||
2863 attr
== &sensor_dev_attr_power1_cap_max
.dev_attr
.attr
||
2864 attr
== &sensor_dev_attr_power1_cap_min
.dev_attr
.attr
||
2865 attr
== &sensor_dev_attr_power1_cap
.dev_attr
.attr
))
2868 if (!is_support_sw_smu(adev
)) {
2869 /* hide max/min values if we can't both query and manage the fan */
2870 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_percent
&&
2871 !adev
->powerplay
.pp_funcs
->get_fan_speed_percent
) &&
2872 (!adev
->powerplay
.pp_funcs
->set_fan_speed_rpm
&&
2873 !adev
->powerplay
.pp_funcs
->get_fan_speed_rpm
) &&
2874 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
2875 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
2878 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_rpm
&&
2879 !adev
->powerplay
.pp_funcs
->get_fan_speed_rpm
) &&
2880 (attr
== &sensor_dev_attr_fan1_max
.dev_attr
.attr
||
2881 attr
== &sensor_dev_attr_fan1_min
.dev_attr
.attr
))
2885 if ((adev
->family
== AMDGPU_FAMILY_SI
|| /* not implemented yet */
2886 adev
->family
== AMDGPU_FAMILY_KV
) && /* not implemented yet */
2887 (attr
== &sensor_dev_attr_in0_input
.dev_attr
.attr
||
2888 attr
== &sensor_dev_attr_in0_label
.dev_attr
.attr
))
2891 /* only APUs have vddnb */
2892 if (!(adev
->flags
& AMD_IS_APU
) &&
2893 (attr
== &sensor_dev_attr_in1_input
.dev_attr
.attr
||
2894 attr
== &sensor_dev_attr_in1_label
.dev_attr
.attr
))
2897 /* no mclk on APUs */
2898 if ((adev
->flags
& AMD_IS_APU
) &&
2899 (attr
== &sensor_dev_attr_freq2_input
.dev_attr
.attr
||
2900 attr
== &sensor_dev_attr_freq2_label
.dev_attr
.attr
))
2903 /* only SOC15 dGPUs support hotspot and mem temperatures */
2904 if (((adev
->flags
& AMD_IS_APU
) ||
2905 adev
->asic_type
< CHIP_VEGA10
) &&
2906 (attr
== &sensor_dev_attr_temp2_crit
.dev_attr
.attr
||
2907 attr
== &sensor_dev_attr_temp2_crit_hyst
.dev_attr
.attr
||
2908 attr
== &sensor_dev_attr_temp3_crit
.dev_attr
.attr
||
2909 attr
== &sensor_dev_attr_temp3_crit_hyst
.dev_attr
.attr
||
2910 attr
== &sensor_dev_attr_temp1_emergency
.dev_attr
.attr
||
2911 attr
== &sensor_dev_attr_temp2_emergency
.dev_attr
.attr
||
2912 attr
== &sensor_dev_attr_temp3_emergency
.dev_attr
.attr
||
2913 attr
== &sensor_dev_attr_temp2_input
.dev_attr
.attr
||
2914 attr
== &sensor_dev_attr_temp3_input
.dev_attr
.attr
||
2915 attr
== &sensor_dev_attr_temp2_label
.dev_attr
.attr
||
2916 attr
== &sensor_dev_attr_temp3_label
.dev_attr
.attr
))
2919 return effective_mode
;
2922 static const struct attribute_group hwmon_attrgroup
= {
2923 .attrs
= hwmon_attributes
,
2924 .is_visible
= hwmon_attributes_visible
,
2927 static const struct attribute_group
*hwmon_groups
[] = {
2932 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
2934 struct amdgpu_device
*adev
=
2935 container_of(work
, struct amdgpu_device
,
2936 pm
.dpm
.thermal
.work
);
2937 /* switch to the thermal state */
2938 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
2939 int temp
, size
= sizeof(temp
);
2941 if (!adev
->pm
.dpm_enabled
)
2944 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_TEMP
,
2945 (void *)&temp
, &size
)) {
2946 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
2947 /* switch back the user state */
2948 dpm_state
= adev
->pm
.dpm
.user_state
;
2950 if (adev
->pm
.dpm
.thermal
.high_to_low
)
2951 /* switch back the user state */
2952 dpm_state
= adev
->pm
.dpm
.user_state
;
2954 mutex_lock(&adev
->pm
.mutex
);
2955 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
2956 adev
->pm
.dpm
.thermal_active
= true;
2958 adev
->pm
.dpm
.thermal_active
= false;
2959 adev
->pm
.dpm
.state
= dpm_state
;
2960 mutex_unlock(&adev
->pm
.mutex
);
2962 amdgpu_pm_compute_clocks(adev
);
2965 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
2966 enum amd_pm_state_type dpm_state
)
2969 struct amdgpu_ps
*ps
;
2971 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
2974 /* check if the vblank period is too short to adjust the mclk */
2975 if (single_display
&& adev
->powerplay
.pp_funcs
->vblank_too_short
) {
2976 if (amdgpu_dpm_vblank_too_short(adev
))
2977 single_display
= false;
2980 /* certain older asics have a separare 3D performance state,
2981 * so try that first if the user selected performance
2983 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
2984 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
2985 /* balanced states don't exist at the moment */
2986 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
2987 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
2990 /* Pick the best power state based on current conditions */
2991 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
2992 ps
= &adev
->pm
.dpm
.ps
[i
];
2993 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
2994 switch (dpm_state
) {
2996 case POWER_STATE_TYPE_BATTERY
:
2997 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
2998 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
3005 case POWER_STATE_TYPE_BALANCED
:
3006 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
3007 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
3014 case POWER_STATE_TYPE_PERFORMANCE
:
3015 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
3016 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
3023 /* internal states */
3024 case POWER_STATE_TYPE_INTERNAL_UVD
:
3025 if (adev
->pm
.dpm
.uvd_ps
)
3026 return adev
->pm
.dpm
.uvd_ps
;
3029 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
3030 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
3033 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
3034 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
3037 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
3038 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
3041 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
3042 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
3045 case POWER_STATE_TYPE_INTERNAL_BOOT
:
3046 return adev
->pm
.dpm
.boot_ps
;
3047 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
3048 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
3051 case POWER_STATE_TYPE_INTERNAL_ACPI
:
3052 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
3055 case POWER_STATE_TYPE_INTERNAL_ULV
:
3056 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
3059 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
3060 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
3067 /* use a fallback state if we didn't match */
3068 switch (dpm_state
) {
3069 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
3070 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
3071 goto restart_search
;
3072 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
3073 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
3074 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
3075 if (adev
->pm
.dpm
.uvd_ps
) {
3076 return adev
->pm
.dpm
.uvd_ps
;
3078 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
3079 goto restart_search
;
3081 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
3082 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
3083 goto restart_search
;
3084 case POWER_STATE_TYPE_INTERNAL_ACPI
:
3085 dpm_state
= POWER_STATE_TYPE_BATTERY
;
3086 goto restart_search
;
3087 case POWER_STATE_TYPE_BATTERY
:
3088 case POWER_STATE_TYPE_BALANCED
:
3089 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
3090 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
3091 goto restart_search
;
3099 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
3101 struct amdgpu_ps
*ps
;
3102 enum amd_pm_state_type dpm_state
;
3106 /* if dpm init failed */
3107 if (!adev
->pm
.dpm_enabled
)
3110 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
3111 /* add other state override checks here */
3112 if ((!adev
->pm
.dpm
.thermal_active
) &&
3113 (!adev
->pm
.dpm
.uvd_active
))
3114 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
3116 dpm_state
= adev
->pm
.dpm
.state
;
3118 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
3120 adev
->pm
.dpm
.requested_ps
= ps
;
3124 if (amdgpu_dpm
== 1 && adev
->powerplay
.pp_funcs
->print_power_state
) {
3125 printk("switching from power state:\n");
3126 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
3127 printk("switching to power state:\n");
3128 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
3131 /* update whether vce is active */
3132 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
3133 if (adev
->powerplay
.pp_funcs
->display_configuration_changed
)
3134 amdgpu_dpm_display_configuration_changed(adev
);
3136 ret
= amdgpu_dpm_pre_set_power_state(adev
);
3140 if (adev
->powerplay
.pp_funcs
->check_state_equal
) {
3141 if (0 != amdgpu_dpm_check_state_equal(adev
, adev
->pm
.dpm
.current_ps
, adev
->pm
.dpm
.requested_ps
, &equal
))
3148 amdgpu_dpm_set_power_state(adev
);
3149 amdgpu_dpm_post_set_power_state(adev
);
3151 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
3152 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
3154 if (adev
->powerplay
.pp_funcs
->force_performance_level
) {
3155 if (adev
->pm
.dpm
.thermal_active
) {
3156 enum amd_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
3157 /* force low perf level for thermal */
3158 amdgpu_dpm_force_performance_level(adev
, AMD_DPM_FORCED_LEVEL_LOW
);
3159 /* save the user's level */
3160 adev
->pm
.dpm
.forced_level
= level
;
3162 /* otherwise, user selected level */
3163 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
3168 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
3172 ret
= amdgpu_dpm_set_powergating_by_smu(adev
, AMD_IP_BLOCK_TYPE_UVD
, !enable
);
3174 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
3175 enable
? "enable" : "disable", ret
);
3177 /* enable/disable Low Memory PState for UVD (4k videos) */
3178 if (adev
->asic_type
== CHIP_STONEY
&&
3179 adev
->uvd
.decode_image_width
>= WIDTH_4K
) {
3180 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
3182 if (hwmgr
&& hwmgr
->hwmgr_func
&&
3183 hwmgr
->hwmgr_func
->update_nbdpm_pstate
)
3184 hwmgr
->hwmgr_func
->update_nbdpm_pstate(hwmgr
,
3190 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
3194 ret
= amdgpu_dpm_set_powergating_by_smu(adev
, AMD_IP_BLOCK_TYPE_VCE
, !enable
);
3196 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
3197 enable
? "enable" : "disable", ret
);
3200 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
3204 if (adev
->powerplay
.pp_funcs
->print_power_state
== NULL
)
3207 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
3208 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
3212 void amdgpu_dpm_enable_jpeg(struct amdgpu_device
*adev
, bool enable
)
3216 ret
= amdgpu_dpm_set_powergating_by_smu(adev
, AMD_IP_BLOCK_TYPE_JPEG
, !enable
);
3218 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
3219 enable
? "enable" : "disable", ret
);
3222 int amdgpu_pm_load_smu_firmware(struct amdgpu_device
*adev
, uint32_t *smu_version
)
3226 if (adev
->powerplay
.pp_funcs
&& adev
->powerplay
.pp_funcs
->load_firmware
) {
3227 r
= adev
->powerplay
.pp_funcs
->load_firmware(adev
->powerplay
.pp_handle
);
3229 pr_err("smu firmware loading failed\n");
3232 *smu_version
= adev
->pm
.fw_version
;
3237 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
3239 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
3242 if (adev
->pm
.sysfs_initialized
)
3245 if (adev
->pm
.dpm_enabled
== 0)
3248 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
3251 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
3252 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
3254 "Unable to register hwmon device: %d\n", ret
);
3258 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
3260 DRM_ERROR("failed to create device file for dpm state\n");
3263 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
3265 DRM_ERROR("failed to create device file for dpm state\n");
3270 ret
= device_create_file(adev
->dev
, &dev_attr_pp_num_states
);
3272 DRM_ERROR("failed to create device file pp_num_states\n");
3275 ret
= device_create_file(adev
->dev
, &dev_attr_pp_cur_state
);
3277 DRM_ERROR("failed to create device file pp_cur_state\n");
3280 ret
= device_create_file(adev
->dev
, &dev_attr_pp_force_state
);
3282 DRM_ERROR("failed to create device file pp_force_state\n");
3285 ret
= device_create_file(adev
->dev
, &dev_attr_pp_table
);
3287 DRM_ERROR("failed to create device file pp_table\n");
3291 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
3293 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
3297 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
3298 if (adev
->asic_type
== CHIP_ARCTURUS
) {
3299 dev_attr_pp_dpm_mclk
.attr
.mode
&= ~S_IWUGO
;
3300 dev_attr_pp_dpm_mclk
.store
= NULL
;
3302 dev_attr_pp_dpm_socclk
.attr
.mode
&= ~S_IWUGO
;
3303 dev_attr_pp_dpm_socclk
.store
= NULL
;
3305 dev_attr_pp_dpm_fclk
.attr
.mode
&= ~S_IWUGO
;
3306 dev_attr_pp_dpm_fclk
.store
= NULL
;
3309 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
3311 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
3314 if (adev
->asic_type
>= CHIP_VEGA10
) {
3315 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_socclk
);
3317 DRM_ERROR("failed to create device file pp_dpm_socclk\n");
3320 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
3321 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_dcefclk
);
3323 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
3328 if (adev
->asic_type
>= CHIP_VEGA20
) {
3329 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_fclk
);
3331 DRM_ERROR("failed to create device file pp_dpm_fclk\n");
3335 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
3336 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
3338 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
3342 ret
= device_create_file(adev
->dev
, &dev_attr_pp_sclk_od
);
3344 DRM_ERROR("failed to create device file pp_sclk_od\n");
3347 ret
= device_create_file(adev
->dev
, &dev_attr_pp_mclk_od
);
3349 DRM_ERROR("failed to create device file pp_mclk_od\n");
3352 ret
= device_create_file(adev
->dev
,
3353 &dev_attr_pp_power_profile_mode
);
3355 DRM_ERROR("failed to create device file "
3356 "pp_power_profile_mode\n");
3359 if ((is_support_sw_smu(adev
) && adev
->smu
.od_enabled
) ||
3360 (!is_support_sw_smu(adev
) && hwmgr
->od_enabled
)) {
3361 ret
= device_create_file(adev
->dev
,
3362 &dev_attr_pp_od_clk_voltage
);
3364 DRM_ERROR("failed to create device file "
3365 "pp_od_clk_voltage\n");
3369 ret
= device_create_file(adev
->dev
,
3370 &dev_attr_gpu_busy_percent
);
3372 DRM_ERROR("failed to create device file "
3373 "gpu_busy_level\n");
3376 /* APU does not have its own dedicated memory */
3377 if (!(adev
->flags
& AMD_IS_APU
) &&
3378 (adev
->asic_type
!= CHIP_VEGA10
)) {
3379 ret
= device_create_file(adev
->dev
,
3380 &dev_attr_mem_busy_percent
);
3382 DRM_ERROR("failed to create device file "
3383 "mem_busy_percent\n");
3387 /* PCIe Perf counters won't work on APU nodes */
3388 if (!(adev
->flags
& AMD_IS_APU
)) {
3389 ret
= device_create_file(adev
->dev
, &dev_attr_pcie_bw
);
3391 DRM_ERROR("failed to create device file pcie_bw\n");
3395 if (adev
->unique_id
)
3396 ret
= device_create_file(adev
->dev
, &dev_attr_unique_id
);
3398 DRM_ERROR("failed to create device file unique_id\n");
3401 ret
= amdgpu_debugfs_pm_init(adev
);
3403 DRM_ERROR("Failed to register debugfs file for dpm!\n");
3407 if ((adev
->asic_type
>= CHIP_VEGA10
) &&
3408 !(adev
->flags
& AMD_IS_APU
)) {
3409 ret
= device_create_file(adev
->dev
,
3410 &dev_attr_pp_features
);
3412 DRM_ERROR("failed to create device file "
3418 adev
->pm
.sysfs_initialized
= true;
3423 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
3425 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
3427 if (adev
->pm
.dpm_enabled
== 0)
3430 if (adev
->pm
.int_hwmon_dev
)
3431 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
3432 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
3433 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
3435 device_remove_file(adev
->dev
, &dev_attr_pp_num_states
);
3436 device_remove_file(adev
->dev
, &dev_attr_pp_cur_state
);
3437 device_remove_file(adev
->dev
, &dev_attr_pp_force_state
);
3438 device_remove_file(adev
->dev
, &dev_attr_pp_table
);
3440 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
3441 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
3442 if (adev
->asic_type
>= CHIP_VEGA10
) {
3443 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_socclk
);
3444 if (adev
->asic_type
!= CHIP_ARCTURUS
)
3445 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_dcefclk
);
3447 if (adev
->asic_type
!= CHIP_ARCTURUS
)
3448 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
3449 if (adev
->asic_type
>= CHIP_VEGA20
)
3450 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_fclk
);
3451 device_remove_file(adev
->dev
, &dev_attr_pp_sclk_od
);
3452 device_remove_file(adev
->dev
, &dev_attr_pp_mclk_od
);
3453 device_remove_file(adev
->dev
,
3454 &dev_attr_pp_power_profile_mode
);
3455 if ((is_support_sw_smu(adev
) && adev
->smu
.od_enabled
) ||
3456 (!is_support_sw_smu(adev
) && hwmgr
->od_enabled
))
3457 device_remove_file(adev
->dev
,
3458 &dev_attr_pp_od_clk_voltage
);
3459 device_remove_file(adev
->dev
, &dev_attr_gpu_busy_percent
);
3460 if (!(adev
->flags
& AMD_IS_APU
) &&
3461 (adev
->asic_type
!= CHIP_VEGA10
))
3462 device_remove_file(adev
->dev
, &dev_attr_mem_busy_percent
);
3463 if (!(adev
->flags
& AMD_IS_APU
))
3464 device_remove_file(adev
->dev
, &dev_attr_pcie_bw
);
3465 if (adev
->unique_id
)
3466 device_remove_file(adev
->dev
, &dev_attr_unique_id
);
3467 if ((adev
->asic_type
>= CHIP_VEGA10
) &&
3468 !(adev
->flags
& AMD_IS_APU
))
3469 device_remove_file(adev
->dev
, &dev_attr_pp_features
);
3472 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
3476 if (!adev
->pm
.dpm_enabled
)
3479 if (adev
->mode_info
.num_crtc
)
3480 amdgpu_display_bandwidth_update(adev
);
3482 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
3483 struct amdgpu_ring
*ring
= adev
->rings
[i
];
3484 if (ring
&& ring
->sched
.ready
)
3485 amdgpu_fence_wait_empty(ring
);
3488 if (is_support_sw_smu(adev
)) {
3489 struct smu_dpm_context
*smu_dpm
= &adev
->smu
.smu_dpm
;
3490 smu_handle_task(&adev
->smu
,
3492 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
,
3495 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
3496 if (!amdgpu_device_has_dc_support(adev
)) {
3497 mutex_lock(&adev
->pm
.mutex
);
3498 amdgpu_dpm_get_active_displays(adev
);
3499 adev
->pm
.pm_display_cfg
.num_display
= adev
->pm
.dpm
.new_active_crtc_count
;
3500 adev
->pm
.pm_display_cfg
.vrefresh
= amdgpu_dpm_get_vrefresh(adev
);
3501 adev
->pm
.pm_display_cfg
.min_vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
3502 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
3503 if (adev
->pm
.pm_display_cfg
.vrefresh
> 120)
3504 adev
->pm
.pm_display_cfg
.min_vblank_time
= 0;
3505 if (adev
->powerplay
.pp_funcs
->display_configuration_change
)
3506 adev
->powerplay
.pp_funcs
->display_configuration_change(
3507 adev
->powerplay
.pp_handle
,
3508 &adev
->pm
.pm_display_cfg
);
3509 mutex_unlock(&adev
->pm
.mutex
);
3511 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
, NULL
);
3513 mutex_lock(&adev
->pm
.mutex
);
3514 amdgpu_dpm_get_active_displays(adev
);
3515 amdgpu_dpm_change_power_state_locked(adev
);
3516 mutex_unlock(&adev
->pm
.mutex
);
3524 #if defined(CONFIG_DEBUG_FS)
3526 static int amdgpu_debugfs_pm_info_pp(struct seq_file
*m
, struct amdgpu_device
*adev
)
3534 size
= sizeof(value
);
3535 seq_printf(m
, "GFX Clocks and Power:\n");
3536 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_MCLK
, (void *)&value
, &size
))
3537 seq_printf(m
, "\t%u MHz (MCLK)\n", value
/100);
3538 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_SCLK
, (void *)&value
, &size
))
3539 seq_printf(m
, "\t%u MHz (SCLK)\n", value
/100);
3540 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
, (void *)&value
, &size
))
3541 seq_printf(m
, "\t%u MHz (PSTATE_SCLK)\n", value
/100);
3542 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
, (void *)&value
, &size
))
3543 seq_printf(m
, "\t%u MHz (PSTATE_MCLK)\n", value
/100);
3544 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDGFX
, (void *)&value
, &size
))
3545 seq_printf(m
, "\t%u mV (VDDGFX)\n", value
);
3546 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDNB
, (void *)&value
, &size
))
3547 seq_printf(m
, "\t%u mV (VDDNB)\n", value
);
3548 size
= sizeof(uint32_t);
3549 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_POWER
, (void *)&query
, &size
))
3550 seq_printf(m
, "\t%u.%u W (average GPU)\n", query
>> 8, query
& 0xff);
3551 size
= sizeof(value
);
3552 seq_printf(m
, "\n");
3555 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_TEMP
, (void *)&value
, &size
))
3556 seq_printf(m
, "GPU Temperature: %u C\n", value
/1000);
3559 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_LOAD
, (void *)&value
, &size
))
3560 seq_printf(m
, "GPU Load: %u %%\n", value
);
3562 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_MEM_LOAD
, (void *)&value
, &size
))
3563 seq_printf(m
, "MEM Load: %u %%\n", value
);
3565 seq_printf(m
, "\n");
3567 /* SMC feature mask */
3568 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
, (void *)&value64
, &size
))
3569 seq_printf(m
, "SMC Feature Mask: 0x%016llx\n", value64
);
3571 if (adev
->asic_type
> CHIP_VEGA20
) {
3573 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCN_POWER_STATE
, (void *)&value
, &size
)) {
3575 seq_printf(m
, "VCN: Disabled\n");
3577 seq_printf(m
, "VCN: Enabled\n");
3578 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_DCLK
, (void *)&value
, &size
))
3579 seq_printf(m
, "\t%u MHz (DCLK)\n", value
/100);
3580 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_VCLK
, (void *)&value
, &size
))
3581 seq_printf(m
, "\t%u MHz (VCLK)\n", value
/100);
3584 seq_printf(m
, "\n");
3587 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_POWER
, (void *)&value
, &size
)) {
3589 seq_printf(m
, "UVD: Disabled\n");
3591 seq_printf(m
, "UVD: Enabled\n");
3592 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_DCLK
, (void *)&value
, &size
))
3593 seq_printf(m
, "\t%u MHz (DCLK)\n", value
/100);
3594 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_VCLK
, (void *)&value
, &size
))
3595 seq_printf(m
, "\t%u MHz (VCLK)\n", value
/100);
3598 seq_printf(m
, "\n");
3601 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCE_POWER
, (void *)&value
, &size
)) {
3603 seq_printf(m
, "VCE: Disabled\n");
3605 seq_printf(m
, "VCE: Enabled\n");
3606 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCE_ECCLK
, (void *)&value
, &size
))
3607 seq_printf(m
, "\t%u MHz (ECCLK)\n", value
/100);
3615 static void amdgpu_parse_cg_state(struct seq_file
*m
, u32 flags
)
3619 for (i
= 0; clocks
[i
].flag
; i
++)
3620 seq_printf(m
, "\t%s: %s\n", clocks
[i
].name
,
3621 (flags
& clocks
[i
].flag
) ? "On" : "Off");
3624 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
3626 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3627 struct drm_device
*dev
= node
->minor
->dev
;
3628 struct amdgpu_device
*adev
= dev
->dev_private
;
3632 r
= pm_runtime_get_sync(dev
->dev
);
3636 amdgpu_device_ip_get_clockgating_state(adev
, &flags
);
3637 seq_printf(m
, "Clock Gating Flags Mask: 0x%x\n", flags
);
3638 amdgpu_parse_cg_state(m
, flags
);
3639 seq_printf(m
, "\n");
3641 if (!adev
->pm
.dpm_enabled
) {
3642 seq_printf(m
, "dpm not enabled\n");
3643 pm_runtime_mark_last_busy(dev
->dev
);
3644 pm_runtime_put_autosuspend(dev
->dev
);
3648 if (!is_support_sw_smu(adev
) &&
3649 adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level
) {
3650 mutex_lock(&adev
->pm
.mutex
);
3651 if (adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level
)
3652 adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level(adev
, m
);
3654 seq_printf(m
, "Debugfs support not implemented for this asic\n");
3655 mutex_unlock(&adev
->pm
.mutex
);
3658 r
= amdgpu_debugfs_pm_info_pp(m
, adev
);
3661 pm_runtime_mark_last_busy(dev
->dev
);
3662 pm_runtime_put_autosuspend(dev
->dev
);
3667 static const struct drm_info_list amdgpu_pm_info_list
[] = {
3668 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
3672 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
3674 #if defined(CONFIG_DEBUG_FS)
3675 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));