2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
28 #include <linux/acpi.h>
30 #include <linux/power_supply.h>
31 #include <linux/hwmon.h>
32 #include <linux/hwmon-sysfs.h>
34 #define RADEON_IDLE_LOOP_MS 100
35 #define RADEON_RECLOCK_DELAY_MS 200
36 #define RADEON_WAIT_VBLANK_TIMEOUT 200
37 #define RADEON_WAIT_IDLE_TIMEOUT 200
39 static const char *radeon_pm_state_type_name
[5] = {
47 static void radeon_dynpm_idle_work_handler(struct work_struct
*work
);
48 static int radeon_debugfs_pm_init(struct radeon_device
*rdev
);
49 static bool radeon_pm_in_vbl(struct radeon_device
*rdev
);
50 static bool radeon_pm_debug_check_in_vbl(struct radeon_device
*rdev
, bool finish
);
51 static void radeon_pm_update_profile(struct radeon_device
*rdev
);
52 static void radeon_pm_set_clocks(struct radeon_device
*rdev
);
54 #define ACPI_AC_CLASS "ac_adapter"
56 int radeon_pm_get_type_index(struct radeon_device
*rdev
,
57 enum radeon_pm_state_type ps_type
,
61 int found_instance
= -1;
63 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
64 if (rdev
->pm
.power_state
[i
].type
== ps_type
) {
66 if (found_instance
== instance
)
70 /* return default if no match */
71 return rdev
->pm
.default_power_state_index
;
75 static int radeon_acpi_event(struct notifier_block
*nb
,
79 struct radeon_device
*rdev
= container_of(nb
, struct radeon_device
, acpi_nb
);
80 struct acpi_bus_event
*entry
= (struct acpi_bus_event
*)data
;
82 if (strcmp(entry
->device_class
, ACPI_AC_CLASS
) == 0) {
83 if (power_supply_is_system_supplied() > 0)
84 DRM_DEBUG_DRIVER("pm: AC\n");
86 DRM_DEBUG_DRIVER("pm: DC\n");
88 if (rdev
->pm
.pm_method
== PM_METHOD_PROFILE
) {
89 if (rdev
->pm
.profile
== PM_PROFILE_AUTO
) {
90 mutex_lock(&rdev
->pm
.mutex
);
91 radeon_pm_update_profile(rdev
);
92 radeon_pm_set_clocks(rdev
);
93 mutex_unlock(&rdev
->pm
.mutex
);
102 static void radeon_pm_update_profile(struct radeon_device
*rdev
)
104 switch (rdev
->pm
.profile
) {
105 case PM_PROFILE_DEFAULT
:
106 rdev
->pm
.profile_index
= PM_PROFILE_DEFAULT_IDX
;
108 case PM_PROFILE_AUTO
:
109 if (power_supply_is_system_supplied() > 0) {
110 if (rdev
->pm
.active_crtc_count
> 1)
111 rdev
->pm
.profile_index
= PM_PROFILE_HIGH_MH_IDX
;
113 rdev
->pm
.profile_index
= PM_PROFILE_HIGH_SH_IDX
;
115 if (rdev
->pm
.active_crtc_count
> 1)
116 rdev
->pm
.profile_index
= PM_PROFILE_MID_MH_IDX
;
118 rdev
->pm
.profile_index
= PM_PROFILE_MID_SH_IDX
;
122 if (rdev
->pm
.active_crtc_count
> 1)
123 rdev
->pm
.profile_index
= PM_PROFILE_LOW_MH_IDX
;
125 rdev
->pm
.profile_index
= PM_PROFILE_LOW_SH_IDX
;
128 if (rdev
->pm
.active_crtc_count
> 1)
129 rdev
->pm
.profile_index
= PM_PROFILE_MID_MH_IDX
;
131 rdev
->pm
.profile_index
= PM_PROFILE_MID_SH_IDX
;
133 case PM_PROFILE_HIGH
:
134 if (rdev
->pm
.active_crtc_count
> 1)
135 rdev
->pm
.profile_index
= PM_PROFILE_HIGH_MH_IDX
;
137 rdev
->pm
.profile_index
= PM_PROFILE_HIGH_SH_IDX
;
141 if (rdev
->pm
.active_crtc_count
== 0) {
142 rdev
->pm
.requested_power_state_index
=
143 rdev
->pm
.profiles
[rdev
->pm
.profile_index
].dpms_off_ps_idx
;
144 rdev
->pm
.requested_clock_mode_index
=
145 rdev
->pm
.profiles
[rdev
->pm
.profile_index
].dpms_off_cm_idx
;
147 rdev
->pm
.requested_power_state_index
=
148 rdev
->pm
.profiles
[rdev
->pm
.profile_index
].dpms_on_ps_idx
;
149 rdev
->pm
.requested_clock_mode_index
=
150 rdev
->pm
.profiles
[rdev
->pm
.profile_index
].dpms_on_cm_idx
;
154 static void radeon_unmap_vram_bos(struct radeon_device
*rdev
)
156 struct radeon_bo
*bo
, *n
;
158 if (list_empty(&rdev
->gem
.objects
))
161 list_for_each_entry_safe(bo
, n
, &rdev
->gem
.objects
, list
) {
162 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
163 ttm_bo_unmap_virtual(&bo
->tbo
);
167 static void radeon_sync_with_vblank(struct radeon_device
*rdev
)
169 if (rdev
->pm
.active_crtcs
) {
170 rdev
->pm
.vblank_sync
= false;
172 rdev
->irq
.vblank_queue
, rdev
->pm
.vblank_sync
,
173 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT
));
177 static void radeon_set_power_state(struct radeon_device
*rdev
)
180 bool misc_after
= false;
182 if ((rdev
->pm
.requested_clock_mode_index
== rdev
->pm
.current_clock_mode_index
) &&
183 (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
))
186 if (radeon_gui_idle(rdev
)) {
187 sclk
= rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
188 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
;
189 if (sclk
> rdev
->pm
.default_sclk
)
190 sclk
= rdev
->pm
.default_sclk
;
192 mclk
= rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
193 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
;
194 if (mclk
> rdev
->pm
.default_mclk
)
195 mclk
= rdev
->pm
.default_mclk
;
197 /* upvolt before raising clocks, downvolt after lowering clocks */
198 if (sclk
< rdev
->pm
.current_sclk
)
201 radeon_sync_with_vblank(rdev
);
203 if (rdev
->pm
.pm_method
== PM_METHOD_DYNPM
) {
204 if (!radeon_pm_in_vbl(rdev
))
208 radeon_pm_prepare(rdev
);
211 /* voltage, pcie lanes, etc.*/
212 radeon_pm_misc(rdev
);
214 /* set engine clock */
215 if (sclk
!= rdev
->pm
.current_sclk
) {
216 radeon_pm_debug_check_in_vbl(rdev
, false);
217 radeon_set_engine_clock(rdev
, sclk
);
218 radeon_pm_debug_check_in_vbl(rdev
, true);
219 rdev
->pm
.current_sclk
= sclk
;
220 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk
);
223 /* set memory clock */
224 if (rdev
->asic
->set_memory_clock
&& (mclk
!= rdev
->pm
.current_mclk
)) {
225 radeon_pm_debug_check_in_vbl(rdev
, false);
226 radeon_set_memory_clock(rdev
, mclk
);
227 radeon_pm_debug_check_in_vbl(rdev
, true);
228 rdev
->pm
.current_mclk
= mclk
;
229 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk
);
233 /* voltage, pcie lanes, etc.*/
234 radeon_pm_misc(rdev
);
236 radeon_pm_finish(rdev
);
238 rdev
->pm
.current_power_state_index
= rdev
->pm
.requested_power_state_index
;
239 rdev
->pm
.current_clock_mode_index
= rdev
->pm
.requested_clock_mode_index
;
241 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
244 static void radeon_pm_set_clocks(struct radeon_device
*rdev
)
248 /* no need to take locks, etc. if nothing's going to change */
249 if ((rdev
->pm
.requested_clock_mode_index
== rdev
->pm
.current_clock_mode_index
) &&
250 (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
))
253 mutex_lock(&rdev
->ddev
->struct_mutex
);
254 mutex_lock(&rdev
->vram_mutex
);
255 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
256 if (rdev
->ring
[i
].ring_obj
)
257 mutex_lock(&rdev
->ring
[i
].mutex
);
260 /* gui idle int has issues on older chips it seems */
261 if (rdev
->family
>= CHIP_R600
) {
262 if (rdev
->irq
.installed
) {
263 /* wait for GPU idle */
264 rdev
->pm
.gui_idle
= false;
265 rdev
->irq
.gui_idle
= true;
266 radeon_irq_set(rdev
);
267 wait_event_interruptible_timeout(
268 rdev
->irq
.idle_queue
, rdev
->pm
.gui_idle
,
269 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT
));
270 rdev
->irq
.gui_idle
= false;
271 radeon_irq_set(rdev
);
274 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
276 struct radeon_fence
*fence
;
277 radeon_ring_alloc(rdev
, ring
, 64);
278 radeon_fence_create(rdev
, &fence
, radeon_ring_index(rdev
, ring
));
279 radeon_fence_emit(rdev
, fence
);
280 radeon_ring_commit(rdev
, ring
);
281 radeon_fence_wait(fence
, false);
282 radeon_fence_unref(&fence
);
285 radeon_unmap_vram_bos(rdev
);
287 if (rdev
->irq
.installed
) {
288 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
289 if (rdev
->pm
.active_crtcs
& (1 << i
)) {
290 rdev
->pm
.req_vblank
|= (1 << i
);
291 drm_vblank_get(rdev
->ddev
, i
);
296 radeon_set_power_state(rdev
);
298 if (rdev
->irq
.installed
) {
299 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
300 if (rdev
->pm
.req_vblank
& (1 << i
)) {
301 rdev
->pm
.req_vblank
&= ~(1 << i
);
302 drm_vblank_put(rdev
->ddev
, i
);
307 /* update display watermarks based on new power state */
308 radeon_update_bandwidth_info(rdev
);
309 if (rdev
->pm
.active_crtc_count
)
310 radeon_bandwidth_update(rdev
);
312 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_NONE
;
314 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
315 if (rdev
->ring
[i
].ring_obj
)
316 mutex_unlock(&rdev
->ring
[i
].mutex
);
318 mutex_unlock(&rdev
->vram_mutex
);
319 mutex_unlock(&rdev
->ddev
->struct_mutex
);
322 static void radeon_pm_print_states(struct radeon_device
*rdev
)
325 struct radeon_power_state
*power_state
;
326 struct radeon_pm_clock_info
*clock_info
;
328 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev
->pm
.num_power_states
);
329 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
330 power_state
= &rdev
->pm
.power_state
[i
];
331 DRM_DEBUG_DRIVER("State %d: %s\n", i
,
332 radeon_pm_state_type_name
[power_state
->type
]);
333 if (i
== rdev
->pm
.default_power_state_index
)
334 DRM_DEBUG_DRIVER("\tDefault");
335 if ((rdev
->flags
& RADEON_IS_PCIE
) && !(rdev
->flags
& RADEON_IS_IGP
))
336 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state
->pcie_lanes
);
337 if (power_state
->flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
338 DRM_DEBUG_DRIVER("\tSingle display only\n");
339 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state
->num_clock_modes
);
340 for (j
= 0; j
< power_state
->num_clock_modes
; j
++) {
341 clock_info
= &(power_state
->clock_info
[j
]);
342 if (rdev
->flags
& RADEON_IS_IGP
)
343 DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
345 clock_info
->sclk
* 10,
346 clock_info
->flags
& RADEON_PM_MODE_NO_DISPLAY
? "\tNo display only" : "");
348 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
350 clock_info
->sclk
* 10,
351 clock_info
->mclk
* 10,
352 clock_info
->voltage
.voltage
,
353 clock_info
->flags
& RADEON_PM_MODE_NO_DISPLAY
? "\tNo display only" : "");
358 static ssize_t
radeon_get_pm_profile(struct device
*dev
,
359 struct device_attribute
*attr
,
362 struct drm_device
*ddev
= pci_get_drvdata(to_pci_dev(dev
));
363 struct radeon_device
*rdev
= ddev
->dev_private
;
364 int cp
= rdev
->pm
.profile
;
366 return snprintf(buf
, PAGE_SIZE
, "%s\n",
367 (cp
== PM_PROFILE_AUTO
) ? "auto" :
368 (cp
== PM_PROFILE_LOW
) ? "low" :
369 (cp
== PM_PROFILE_MID
) ? "mid" :
370 (cp
== PM_PROFILE_HIGH
) ? "high" : "default");
373 static ssize_t
radeon_set_pm_profile(struct device
*dev
,
374 struct device_attribute
*attr
,
378 struct drm_device
*ddev
= pci_get_drvdata(to_pci_dev(dev
));
379 struct radeon_device
*rdev
= ddev
->dev_private
;
381 mutex_lock(&rdev
->pm
.mutex
);
382 if (rdev
->pm
.pm_method
== PM_METHOD_PROFILE
) {
383 if (strncmp("default", buf
, strlen("default")) == 0)
384 rdev
->pm
.profile
= PM_PROFILE_DEFAULT
;
385 else if (strncmp("auto", buf
, strlen("auto")) == 0)
386 rdev
->pm
.profile
= PM_PROFILE_AUTO
;
387 else if (strncmp("low", buf
, strlen("low")) == 0)
388 rdev
->pm
.profile
= PM_PROFILE_LOW
;
389 else if (strncmp("mid", buf
, strlen("mid")) == 0)
390 rdev
->pm
.profile
= PM_PROFILE_MID
;
391 else if (strncmp("high", buf
, strlen("high")) == 0)
392 rdev
->pm
.profile
= PM_PROFILE_HIGH
;
397 radeon_pm_update_profile(rdev
);
398 radeon_pm_set_clocks(rdev
);
403 mutex_unlock(&rdev
->pm
.mutex
);
408 static ssize_t
radeon_get_pm_method(struct device
*dev
,
409 struct device_attribute
*attr
,
412 struct drm_device
*ddev
= pci_get_drvdata(to_pci_dev(dev
));
413 struct radeon_device
*rdev
= ddev
->dev_private
;
414 int pm
= rdev
->pm
.pm_method
;
416 return snprintf(buf
, PAGE_SIZE
, "%s\n",
417 (pm
== PM_METHOD_DYNPM
) ? "dynpm" : "profile");
420 static ssize_t
radeon_set_pm_method(struct device
*dev
,
421 struct device_attribute
*attr
,
425 struct drm_device
*ddev
= pci_get_drvdata(to_pci_dev(dev
));
426 struct radeon_device
*rdev
= ddev
->dev_private
;
429 if (strncmp("dynpm", buf
, strlen("dynpm")) == 0) {
430 mutex_lock(&rdev
->pm
.mutex
);
431 rdev
->pm
.pm_method
= PM_METHOD_DYNPM
;
432 rdev
->pm
.dynpm_state
= DYNPM_STATE_PAUSED
;
433 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_DEFAULT
;
434 mutex_unlock(&rdev
->pm
.mutex
);
435 } else if (strncmp("profile", buf
, strlen("profile")) == 0) {
436 mutex_lock(&rdev
->pm
.mutex
);
438 rdev
->pm
.dynpm_state
= DYNPM_STATE_DISABLED
;
439 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_NONE
;
440 rdev
->pm
.pm_method
= PM_METHOD_PROFILE
;
441 mutex_unlock(&rdev
->pm
.mutex
);
442 cancel_delayed_work_sync(&rdev
->pm
.dynpm_idle_work
);
447 radeon_pm_compute_clocks(rdev
);
452 static DEVICE_ATTR(power_profile
, S_IRUGO
| S_IWUSR
, radeon_get_pm_profile
, radeon_set_pm_profile
);
453 static DEVICE_ATTR(power_method
, S_IRUGO
| S_IWUSR
, radeon_get_pm_method
, radeon_set_pm_method
);
455 static ssize_t
radeon_hwmon_show_temp(struct device
*dev
,
456 struct device_attribute
*attr
,
459 struct drm_device
*ddev
= pci_get_drvdata(to_pci_dev(dev
));
460 struct radeon_device
*rdev
= ddev
->dev_private
;
463 switch (rdev
->pm
.int_thermal_type
) {
464 case THERMAL_TYPE_RV6XX
:
465 temp
= rv6xx_get_temp(rdev
);
467 case THERMAL_TYPE_RV770
:
468 temp
= rv770_get_temp(rdev
);
470 case THERMAL_TYPE_EVERGREEN
:
471 case THERMAL_TYPE_NI
:
472 temp
= evergreen_get_temp(rdev
);
474 case THERMAL_TYPE_SUMO
:
475 temp
= sumo_get_temp(rdev
);
482 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
485 static ssize_t
radeon_hwmon_show_name(struct device
*dev
,
486 struct device_attribute
*attr
,
489 return sprintf(buf
, "radeon\n");
492 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, radeon_hwmon_show_temp
, NULL
, 0);
493 static SENSOR_DEVICE_ATTR(name
, S_IRUGO
, radeon_hwmon_show_name
, NULL
, 0);
495 static struct attribute
*hwmon_attributes
[] = {
496 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
497 &sensor_dev_attr_name
.dev_attr
.attr
,
501 static const struct attribute_group hwmon_attrgroup
= {
502 .attrs
= hwmon_attributes
,
505 static int radeon_hwmon_init(struct radeon_device
*rdev
)
509 rdev
->pm
.int_hwmon_dev
= NULL
;
511 switch (rdev
->pm
.int_thermal_type
) {
512 case THERMAL_TYPE_RV6XX
:
513 case THERMAL_TYPE_RV770
:
514 case THERMAL_TYPE_EVERGREEN
:
515 case THERMAL_TYPE_NI
:
516 case THERMAL_TYPE_SUMO
:
517 rdev
->pm
.int_hwmon_dev
= hwmon_device_register(rdev
->dev
);
518 if (IS_ERR(rdev
->pm
.int_hwmon_dev
)) {
519 err
= PTR_ERR(rdev
->pm
.int_hwmon_dev
);
521 "Unable to register hwmon device: %d\n", err
);
524 dev_set_drvdata(rdev
->pm
.int_hwmon_dev
, rdev
->ddev
);
525 err
= sysfs_create_group(&rdev
->pm
.int_hwmon_dev
->kobj
,
529 "Unable to create hwmon sysfs file: %d\n", err
);
530 hwmon_device_unregister(rdev
->dev
);
540 static void radeon_hwmon_fini(struct radeon_device
*rdev
)
542 if (rdev
->pm
.int_hwmon_dev
) {
543 sysfs_remove_group(&rdev
->pm
.int_hwmon_dev
->kobj
, &hwmon_attrgroup
);
544 hwmon_device_unregister(rdev
->pm
.int_hwmon_dev
);
548 void radeon_pm_suspend(struct radeon_device
*rdev
)
550 mutex_lock(&rdev
->pm
.mutex
);
551 if (rdev
->pm
.pm_method
== PM_METHOD_DYNPM
) {
552 if (rdev
->pm
.dynpm_state
== DYNPM_STATE_ACTIVE
)
553 rdev
->pm
.dynpm_state
= DYNPM_STATE_SUSPENDED
;
555 mutex_unlock(&rdev
->pm
.mutex
);
557 cancel_delayed_work_sync(&rdev
->pm
.dynpm_idle_work
);
560 void radeon_pm_resume(struct radeon_device
*rdev
)
562 /* set up the default clocks if the MC ucode is loaded */
563 if (ASIC_IS_DCE5(rdev
) && rdev
->mc_fw
) {
564 if (rdev
->pm
.default_vddc
)
565 radeon_atom_set_voltage(rdev
, rdev
->pm
.default_vddc
,
566 SET_VOLTAGE_TYPE_ASIC_VDDC
);
567 if (rdev
->pm
.default_vddci
)
568 radeon_atom_set_voltage(rdev
, rdev
->pm
.default_vddci
,
569 SET_VOLTAGE_TYPE_ASIC_VDDCI
);
570 if (rdev
->pm
.default_sclk
)
571 radeon_set_engine_clock(rdev
, rdev
->pm
.default_sclk
);
572 if (rdev
->pm
.default_mclk
)
573 radeon_set_memory_clock(rdev
, rdev
->pm
.default_mclk
);
575 /* asic init will reset the default power state */
576 mutex_lock(&rdev
->pm
.mutex
);
577 rdev
->pm
.current_power_state_index
= rdev
->pm
.default_power_state_index
;
578 rdev
->pm
.current_clock_mode_index
= 0;
579 rdev
->pm
.current_sclk
= rdev
->pm
.default_sclk
;
580 rdev
->pm
.current_mclk
= rdev
->pm
.default_mclk
;
581 rdev
->pm
.current_vddc
= rdev
->pm
.power_state
[rdev
->pm
.default_power_state_index
].clock_info
[0].voltage
.voltage
;
582 rdev
->pm
.current_vddci
= rdev
->pm
.power_state
[rdev
->pm
.default_power_state_index
].clock_info
[0].voltage
.vddci
;
583 if (rdev
->pm
.pm_method
== PM_METHOD_DYNPM
584 && rdev
->pm
.dynpm_state
== DYNPM_STATE_SUSPENDED
) {
585 rdev
->pm
.dynpm_state
= DYNPM_STATE_ACTIVE
;
586 schedule_delayed_work(&rdev
->pm
.dynpm_idle_work
,
587 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
589 mutex_unlock(&rdev
->pm
.mutex
);
590 radeon_pm_compute_clocks(rdev
);
593 int radeon_pm_init(struct radeon_device
*rdev
)
597 /* default to profile method */
598 rdev
->pm
.pm_method
= PM_METHOD_PROFILE
;
599 rdev
->pm
.profile
= PM_PROFILE_DEFAULT
;
600 rdev
->pm
.dynpm_state
= DYNPM_STATE_DISABLED
;
601 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_NONE
;
602 rdev
->pm
.dynpm_can_upclock
= true;
603 rdev
->pm
.dynpm_can_downclock
= true;
604 rdev
->pm
.default_sclk
= rdev
->clock
.default_sclk
;
605 rdev
->pm
.default_mclk
= rdev
->clock
.default_mclk
;
606 rdev
->pm
.current_sclk
= rdev
->clock
.default_sclk
;
607 rdev
->pm
.current_mclk
= rdev
->clock
.default_mclk
;
608 rdev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
611 if (rdev
->is_atom_bios
)
612 radeon_atombios_get_power_modes(rdev
);
614 radeon_combios_get_power_modes(rdev
);
615 radeon_pm_print_states(rdev
);
616 radeon_pm_init_profile(rdev
);
617 /* set up the default clocks if the MC ucode is loaded */
618 if (ASIC_IS_DCE5(rdev
) && rdev
->mc_fw
) {
619 if (rdev
->pm
.default_vddc
)
620 radeon_atom_set_voltage(rdev
, rdev
->pm
.default_vddc
,
621 SET_VOLTAGE_TYPE_ASIC_VDDC
);
622 if (rdev
->pm
.default_vddci
)
623 radeon_atom_set_voltage(rdev
, rdev
->pm
.default_vddci
,
624 SET_VOLTAGE_TYPE_ASIC_VDDCI
);
625 if (rdev
->pm
.default_sclk
)
626 radeon_set_engine_clock(rdev
, rdev
->pm
.default_sclk
);
627 if (rdev
->pm
.default_mclk
)
628 radeon_set_memory_clock(rdev
, rdev
->pm
.default_mclk
);
632 /* set up the internal thermal sensor if applicable */
633 ret
= radeon_hwmon_init(rdev
);
637 INIT_DELAYED_WORK(&rdev
->pm
.dynpm_idle_work
, radeon_dynpm_idle_work_handler
);
639 if (rdev
->pm
.num_power_states
> 1) {
640 /* where's the best place to put these? */
641 ret
= device_create_file(rdev
->dev
, &dev_attr_power_profile
);
643 DRM_ERROR("failed to create device file for power profile\n");
644 ret
= device_create_file(rdev
->dev
, &dev_attr_power_method
);
646 DRM_ERROR("failed to create device file for power method\n");
649 rdev
->acpi_nb
.notifier_call
= radeon_acpi_event
;
650 register_acpi_notifier(&rdev
->acpi_nb
);
652 if (radeon_debugfs_pm_init(rdev
)) {
653 DRM_ERROR("Failed to register debugfs file for PM!\n");
656 DRM_INFO("radeon: power management initialized\n");
662 void radeon_pm_fini(struct radeon_device
*rdev
)
664 if (rdev
->pm
.num_power_states
> 1) {
665 mutex_lock(&rdev
->pm
.mutex
);
666 if (rdev
->pm
.pm_method
== PM_METHOD_PROFILE
) {
667 rdev
->pm
.profile
= PM_PROFILE_DEFAULT
;
668 radeon_pm_update_profile(rdev
);
669 radeon_pm_set_clocks(rdev
);
670 } else if (rdev
->pm
.pm_method
== PM_METHOD_DYNPM
) {
671 /* reset default clocks */
672 rdev
->pm
.dynpm_state
= DYNPM_STATE_DISABLED
;
673 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_DEFAULT
;
674 radeon_pm_set_clocks(rdev
);
676 mutex_unlock(&rdev
->pm
.mutex
);
678 cancel_delayed_work_sync(&rdev
->pm
.dynpm_idle_work
);
680 device_remove_file(rdev
->dev
, &dev_attr_power_profile
);
681 device_remove_file(rdev
->dev
, &dev_attr_power_method
);
683 unregister_acpi_notifier(&rdev
->acpi_nb
);
687 if (rdev
->pm
.power_state
)
688 kfree(rdev
->pm
.power_state
);
690 radeon_hwmon_fini(rdev
);
693 void radeon_pm_compute_clocks(struct radeon_device
*rdev
)
695 struct drm_device
*ddev
= rdev
->ddev
;
696 struct drm_crtc
*crtc
;
697 struct radeon_crtc
*radeon_crtc
;
699 if (rdev
->pm
.num_power_states
< 2)
702 mutex_lock(&rdev
->pm
.mutex
);
704 rdev
->pm
.active_crtcs
= 0;
705 rdev
->pm
.active_crtc_count
= 0;
706 list_for_each_entry(crtc
,
707 &ddev
->mode_config
.crtc_list
, head
) {
708 radeon_crtc
= to_radeon_crtc(crtc
);
709 if (radeon_crtc
->enabled
) {
710 rdev
->pm
.active_crtcs
|= (1 << radeon_crtc
->crtc_id
);
711 rdev
->pm
.active_crtc_count
++;
715 if (rdev
->pm
.pm_method
== PM_METHOD_PROFILE
) {
716 radeon_pm_update_profile(rdev
);
717 radeon_pm_set_clocks(rdev
);
718 } else if (rdev
->pm
.pm_method
== PM_METHOD_DYNPM
) {
719 if (rdev
->pm
.dynpm_state
!= DYNPM_STATE_DISABLED
) {
720 if (rdev
->pm
.active_crtc_count
> 1) {
721 if (rdev
->pm
.dynpm_state
== DYNPM_STATE_ACTIVE
) {
722 cancel_delayed_work(&rdev
->pm
.dynpm_idle_work
);
724 rdev
->pm
.dynpm_state
= DYNPM_STATE_PAUSED
;
725 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_DEFAULT
;
726 radeon_pm_get_dynpm_state(rdev
);
727 radeon_pm_set_clocks(rdev
);
729 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
731 } else if (rdev
->pm
.active_crtc_count
== 1) {
732 /* TODO: Increase clocks if needed for current mode */
734 if (rdev
->pm
.dynpm_state
== DYNPM_STATE_MINIMUM
) {
735 rdev
->pm
.dynpm_state
= DYNPM_STATE_ACTIVE
;
736 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_UPCLOCK
;
737 radeon_pm_get_dynpm_state(rdev
);
738 radeon_pm_set_clocks(rdev
);
740 schedule_delayed_work(&rdev
->pm
.dynpm_idle_work
,
741 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
742 } else if (rdev
->pm
.dynpm_state
== DYNPM_STATE_PAUSED
) {
743 rdev
->pm
.dynpm_state
= DYNPM_STATE_ACTIVE
;
744 schedule_delayed_work(&rdev
->pm
.dynpm_idle_work
,
745 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
746 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
748 } else { /* count == 0 */
749 if (rdev
->pm
.dynpm_state
!= DYNPM_STATE_MINIMUM
) {
750 cancel_delayed_work(&rdev
->pm
.dynpm_idle_work
);
752 rdev
->pm
.dynpm_state
= DYNPM_STATE_MINIMUM
;
753 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_MINIMUM
;
754 radeon_pm_get_dynpm_state(rdev
);
755 radeon_pm_set_clocks(rdev
);
761 mutex_unlock(&rdev
->pm
.mutex
);
764 static bool radeon_pm_in_vbl(struct radeon_device
*rdev
)
766 int crtc
, vpos
, hpos
, vbl_status
;
769 /* Iterate over all active crtc's. All crtc's must be in vblank,
770 * otherwise return in_vbl == false.
772 for (crtc
= 0; (crtc
< rdev
->num_crtc
) && in_vbl
; crtc
++) {
773 if (rdev
->pm
.active_crtcs
& (1 << crtc
)) {
774 vbl_status
= radeon_get_crtc_scanoutpos(rdev
->ddev
, crtc
, &vpos
, &hpos
);
775 if ((vbl_status
& DRM_SCANOUTPOS_VALID
) &&
776 !(vbl_status
& DRM_SCANOUTPOS_INVBL
))
784 static bool radeon_pm_debug_check_in_vbl(struct radeon_device
*rdev
, bool finish
)
787 bool in_vbl
= radeon_pm_in_vbl(rdev
);
790 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc
,
791 finish
? "exit" : "entry");
795 static void radeon_dynpm_idle_work_handler(struct work_struct
*work
)
797 struct radeon_device
*rdev
;
799 rdev
= container_of(work
, struct radeon_device
,
800 pm
.dynpm_idle_work
.work
);
802 resched
= ttm_bo_lock_delayed_workqueue(&rdev
->mman
.bdev
);
803 mutex_lock(&rdev
->pm
.mutex
);
804 if (rdev
->pm
.dynpm_state
== DYNPM_STATE_ACTIVE
) {
805 int not_processed
= 0;
808 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
809 not_processed
+= radeon_fence_count_emitted(rdev
, i
);
810 if (not_processed
>= 3)
814 if (not_processed
>= 3) { /* should upclock */
815 if (rdev
->pm
.dynpm_planned_action
== DYNPM_ACTION_DOWNCLOCK
) {
816 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_NONE
;
817 } else if (rdev
->pm
.dynpm_planned_action
== DYNPM_ACTION_NONE
&&
818 rdev
->pm
.dynpm_can_upclock
) {
819 rdev
->pm
.dynpm_planned_action
=
820 DYNPM_ACTION_UPCLOCK
;
821 rdev
->pm
.dynpm_action_timeout
= jiffies
+
822 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS
);
824 } else if (not_processed
== 0) { /* should downclock */
825 if (rdev
->pm
.dynpm_planned_action
== DYNPM_ACTION_UPCLOCK
) {
826 rdev
->pm
.dynpm_planned_action
= DYNPM_ACTION_NONE
;
827 } else if (rdev
->pm
.dynpm_planned_action
== DYNPM_ACTION_NONE
&&
828 rdev
->pm
.dynpm_can_downclock
) {
829 rdev
->pm
.dynpm_planned_action
=
830 DYNPM_ACTION_DOWNCLOCK
;
831 rdev
->pm
.dynpm_action_timeout
= jiffies
+
832 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS
);
836 /* Note, radeon_pm_set_clocks is called with static_switch set
837 * to false since we want to wait for vbl to avoid flicker.
839 if (rdev
->pm
.dynpm_planned_action
!= DYNPM_ACTION_NONE
&&
840 jiffies
> rdev
->pm
.dynpm_action_timeout
) {
841 radeon_pm_get_dynpm_state(rdev
);
842 radeon_pm_set_clocks(rdev
);
845 schedule_delayed_work(&rdev
->pm
.dynpm_idle_work
,
846 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
848 mutex_unlock(&rdev
->pm
.mutex
);
849 ttm_bo_unlock_delayed_workqueue(&rdev
->mman
.bdev
, resched
);
855 #if defined(CONFIG_DEBUG_FS)
857 static int radeon_debugfs_pm_info(struct seq_file
*m
, void *data
)
859 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
860 struct drm_device
*dev
= node
->minor
->dev
;
861 struct radeon_device
*rdev
= dev
->dev_private
;
863 seq_printf(m
, "default engine clock: %u0 kHz\n", rdev
->pm
.default_sclk
);
864 seq_printf(m
, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev
));
865 seq_printf(m
, "default memory clock: %u0 kHz\n", rdev
->pm
.default_mclk
);
866 if (rdev
->asic
->get_memory_clock
)
867 seq_printf(m
, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev
));
868 if (rdev
->pm
.current_vddc
)
869 seq_printf(m
, "voltage: %u mV\n", rdev
->pm
.current_vddc
);
870 if (rdev
->asic
->get_pcie_lanes
)
871 seq_printf(m
, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev
));
876 static struct drm_info_list radeon_pm_info_list
[] = {
877 {"radeon_pm_info", radeon_debugfs_pm_info
, 0, NULL
},
881 static int radeon_debugfs_pm_init(struct radeon_device
*rdev
)
883 #if defined(CONFIG_DEBUG_FS)
884 return radeon_debugfs_add_files(rdev
, radeon_pm_info_list
, ARRAY_SIZE(radeon_pm_info_list
));