2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
27 #define RADEON_IDLE_LOOP_MS 100
28 #define RADEON_RECLOCK_DELAY_MS 200
29 #define RADEON_WAIT_VBLANK_TIMEOUT 200
31 static bool radeon_pm_debug_check_in_vbl(struct radeon_device
*rdev
, bool finish
);
32 static void radeon_pm_set_clocks_locked(struct radeon_device
*rdev
);
33 static void radeon_pm_set_clocks(struct radeon_device
*rdev
);
34 static void radeon_pm_idle_work_handler(struct work_struct
*work
);
35 static int radeon_debugfs_pm_init(struct radeon_device
*rdev
);
37 static const char *pm_state_names
[4] = {
44 static const char *pm_state_types
[5] = {
52 static void radeon_print_power_mode_info(struct radeon_device
*rdev
)
57 DRM_INFO("%d Power State(s)\n", rdev
->pm
.num_power_states
);
58 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
59 if (rdev
->pm
.default_power_state
== &rdev
->pm
.power_state
[i
])
63 DRM_INFO("State %d %s %s\n", i
,
64 pm_state_types
[rdev
->pm
.power_state
[i
].type
],
65 is_default
? "(default)" : "");
66 if ((rdev
->flags
& RADEON_IS_PCIE
) && !(rdev
->flags
& RADEON_IS_IGP
))
67 DRM_INFO("\t%d PCIE Lanes\n", rdev
->pm
.power_state
[i
].non_clock_info
.pcie_lanes
);
68 DRM_INFO("\t%d Clock Mode(s)\n", rdev
->pm
.power_state
[i
].num_clock_modes
);
69 for (j
= 0; j
< rdev
->pm
.power_state
[i
].num_clock_modes
; j
++) {
70 if (rdev
->flags
& RADEON_IS_IGP
)
71 DRM_INFO("\t\t%d engine: %d\n",
73 rdev
->pm
.power_state
[i
].clock_info
[j
].sclk
* 10);
75 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
77 rdev
->pm
.power_state
[i
].clock_info
[j
].sclk
* 10,
78 rdev
->pm
.power_state
[i
].clock_info
[j
].mclk
* 10);
83 static struct radeon_power_state
* radeon_pick_power_state(struct radeon_device
*rdev
,
84 enum radeon_pm_state_type type
)
87 enum radeon_pm_state_type wanted_types
[2];
91 case POWER_STATE_TYPE_DEFAULT
:
93 return rdev
->pm
.default_power_state
;
94 case POWER_STATE_TYPE_POWERSAVE
:
95 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
96 wanted_types
[0] = POWER_STATE_TYPE_POWERSAVE
;
97 wanted_types
[1] = POWER_STATE_TYPE_BATTERY
;
100 wanted_types
[0] = POWER_STATE_TYPE_PERFORMANCE
;
104 case POWER_STATE_TYPE_BATTERY
:
105 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
106 wanted_types
[0] = POWER_STATE_TYPE_BATTERY
;
107 wanted_types
[1] = POWER_STATE_TYPE_POWERSAVE
;
110 wanted_types
[0] = POWER_STATE_TYPE_PERFORMANCE
;
114 case POWER_STATE_TYPE_BALANCED
:
115 case POWER_STATE_TYPE_PERFORMANCE
:
116 wanted_types
[0] = type
;
121 for (i
= 0; i
< wanted_count
; i
++) {
122 for (j
= 0; j
< rdev
->pm
.num_power_states
; j
++) {
123 if (rdev
->pm
.power_state
[j
].type
== wanted_types
[i
])
124 return &rdev
->pm
.power_state
[j
];
128 return rdev
->pm
.default_power_state
;
131 static struct radeon_pm_clock_info
* radeon_pick_clock_mode(struct radeon_device
*rdev
,
132 struct radeon_power_state
*power_state
,
133 enum radeon_pm_clock_mode_type type
)
136 case POWER_MODE_TYPE_DEFAULT
:
138 return power_state
->default_clock_mode
;
139 case POWER_MODE_TYPE_LOW
:
140 return &power_state
->clock_info
[0];
141 case POWER_MODE_TYPE_MID
:
142 if (power_state
->num_clock_modes
> 2)
143 return &power_state
->clock_info
[1];
145 return &power_state
->clock_info
[0];
147 case POWER_MODE_TYPE_HIGH
:
148 return &power_state
->clock_info
[power_state
->num_clock_modes
- 1];
153 static void radeon_get_power_state(struct radeon_device
*rdev
,
154 enum radeon_pm_action action
)
157 case PM_ACTION_MINIMUM
:
158 rdev
->pm
.requested_power_state
= radeon_pick_power_state(rdev
, POWER_STATE_TYPE_BATTERY
);
159 rdev
->pm
.requested_clock_mode
=
160 radeon_pick_clock_mode(rdev
, rdev
->pm
.requested_power_state
, POWER_MODE_TYPE_LOW
);
162 case PM_ACTION_DOWNCLOCK
:
163 rdev
->pm
.requested_power_state
= radeon_pick_power_state(rdev
, POWER_STATE_TYPE_POWERSAVE
);
164 rdev
->pm
.requested_clock_mode
=
165 radeon_pick_clock_mode(rdev
, rdev
->pm
.requested_power_state
, POWER_MODE_TYPE_MID
);
167 case PM_ACTION_UPCLOCK
:
168 rdev
->pm
.requested_power_state
= radeon_pick_power_state(rdev
, POWER_STATE_TYPE_DEFAULT
);
169 rdev
->pm
.requested_clock_mode
=
170 radeon_pick_clock_mode(rdev
, rdev
->pm
.requested_power_state
, POWER_MODE_TYPE_HIGH
);
174 DRM_ERROR("Requested mode for not defined action\n");
177 DRM_INFO("Requested: e: %d m: %d p: %d\n",
178 rdev
->pm
.requested_clock_mode
->sclk
,
179 rdev
->pm
.requested_clock_mode
->mclk
,
180 rdev
->pm
.requested_power_state
->non_clock_info
.pcie_lanes
);
183 static inline void radeon_sync_with_vblank(struct radeon_device
*rdev
)
185 if (rdev
->pm
.active_crtcs
) {
186 rdev
->pm
.vblank_sync
= false;
188 rdev
->irq
.vblank_queue
, rdev
->pm
.vblank_sync
,
189 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT
));
193 static void radeon_set_power_state(struct radeon_device
*rdev
)
195 /* if *_clock_mode are the same, *_power_state are as well */
196 if (rdev
->pm
.requested_clock_mode
== rdev
->pm
.current_clock_mode
)
199 DRM_INFO("Setting: e: %d m: %d p: %d\n",
200 rdev
->pm
.requested_clock_mode
->sclk
,
201 rdev
->pm
.requested_clock_mode
->mclk
,
202 rdev
->pm
.requested_power_state
->non_clock_info
.pcie_lanes
);
210 /* set engine clock */
211 radeon_sync_with_vblank(rdev
);
212 radeon_pm_debug_check_in_vbl(rdev
, false);
213 radeon_set_engine_clock(rdev
, rdev
->pm
.requested_clock_mode
->sclk
);
214 radeon_pm_debug_check_in_vbl(rdev
, true);
217 /* set memory clock */
218 if (rdev
->asic
->set_memory_clock
) {
219 radeon_sync_with_vblank(rdev
);
220 radeon_pm_debug_check_in_vbl(rdev
, false);
221 radeon_set_memory_clock(rdev
, rdev
->pm
.requested_clock_mode
->mclk
);
222 radeon_pm_debug_check_in_vbl(rdev
, true);
226 rdev
->pm
.current_power_state
= rdev
->pm
.requested_power_state
;
227 rdev
->pm
.current_clock_mode
= rdev
->pm
.requested_clock_mode
;
230 int radeon_pm_init(struct radeon_device
*rdev
)
232 rdev
->pm
.state
= PM_STATE_DISABLED
;
233 rdev
->pm
.planned_action
= PM_ACTION_NONE
;
234 rdev
->pm
.downclocked
= false;
237 if (rdev
->is_atom_bios
)
238 radeon_atombios_get_power_modes(rdev
);
240 radeon_combios_get_power_modes(rdev
);
241 radeon_print_power_mode_info(rdev
);
244 if (radeon_debugfs_pm_init(rdev
)) {
245 DRM_ERROR("Failed to register debugfs file for PM!\n");
248 INIT_DELAYED_WORK(&rdev
->pm
.idle_work
, radeon_pm_idle_work_handler
);
250 if (radeon_dynpm
!= -1 && radeon_dynpm
) {
251 rdev
->pm
.state
= PM_STATE_PAUSED
;
252 DRM_INFO("radeon: dynamic power management enabled\n");
255 DRM_INFO("radeon: power management initialized\n");
260 void radeon_pm_fini(struct radeon_device
*rdev
)
262 if (rdev
->pm
.i2c_bus
)
263 radeon_i2c_destroy(rdev
->pm
.i2c_bus
);
266 void radeon_pm_compute_clocks(struct radeon_device
*rdev
)
268 struct drm_device
*ddev
= rdev
->ddev
;
269 struct drm_connector
*connector
;
270 struct radeon_crtc
*radeon_crtc
;
273 if (rdev
->pm
.state
== PM_STATE_DISABLED
)
276 mutex_lock(&rdev
->pm
.mutex
);
278 rdev
->pm
.active_crtcs
= 0;
279 list_for_each_entry(connector
,
280 &ddev
->mode_config
.connector_list
, head
) {
281 if (connector
->encoder
&&
282 connector
->encoder
->crtc
&&
283 connector
->dpms
!= DRM_MODE_DPMS_OFF
) {
284 radeon_crtc
= to_radeon_crtc(connector
->encoder
->crtc
);
285 rdev
->pm
.active_crtcs
|= (1 << radeon_crtc
->crtc_id
);
291 if (rdev
->pm
.state
== PM_STATE_ACTIVE
) {
292 cancel_delayed_work(&rdev
->pm
.idle_work
);
294 rdev
->pm
.state
= PM_STATE_PAUSED
;
295 rdev
->pm
.planned_action
= PM_ACTION_UPCLOCK
;
296 if (rdev
->pm
.downclocked
)
297 radeon_pm_set_clocks(rdev
);
299 DRM_DEBUG("radeon: dynamic power management deactivated\n");
301 } else if (count
== 1) {
302 /* TODO: Increase clocks if needed for current mode */
304 if (rdev
->pm
.state
== PM_STATE_MINIMUM
) {
305 rdev
->pm
.state
= PM_STATE_ACTIVE
;
306 rdev
->pm
.planned_action
= PM_ACTION_UPCLOCK
;
307 radeon_pm_set_clocks(rdev
);
309 queue_delayed_work(rdev
->wq
, &rdev
->pm
.idle_work
,
310 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
312 else if (rdev
->pm
.state
== PM_STATE_PAUSED
) {
313 rdev
->pm
.state
= PM_STATE_ACTIVE
;
314 queue_delayed_work(rdev
->wq
, &rdev
->pm
.idle_work
,
315 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
316 DRM_DEBUG("radeon: dynamic power management activated\n");
319 else { /* count == 0 */
320 if (rdev
->pm
.state
!= PM_STATE_MINIMUM
) {
321 cancel_delayed_work(&rdev
->pm
.idle_work
);
323 rdev
->pm
.state
= PM_STATE_MINIMUM
;
324 rdev
->pm
.planned_action
= PM_ACTION_MINIMUM
;
325 radeon_pm_set_clocks(rdev
);
329 mutex_unlock(&rdev
->pm
.mutex
);
332 static bool radeon_pm_debug_check_in_vbl(struct radeon_device
*rdev
, bool finish
)
334 u32 stat_crtc1
= 0, stat_crtc2
= 0;
337 if (ASIC_IS_AVIVO(rdev
)) {
338 if (rdev
->pm
.active_crtcs
& (1 << 0)) {
339 stat_crtc1
= RREG32(D1CRTC_STATUS
);
340 if (!(stat_crtc1
& 1))
343 if (rdev
->pm
.active_crtcs
& (1 << 1)) {
344 stat_crtc2
= RREG32(D2CRTC_STATUS
);
345 if (!(stat_crtc2
& 1))
350 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1
,
351 stat_crtc2
, finish
? "exit" : "entry");
354 static void radeon_pm_set_clocks_locked(struct radeon_device
*rdev
)
356 /*radeon_fence_wait_last(rdev);*/
357 switch (rdev
->pm
.planned_action
) {
358 case PM_ACTION_UPCLOCK
:
359 rdev
->pm
.downclocked
= false;
361 case PM_ACTION_DOWNCLOCK
:
362 rdev
->pm
.downclocked
= true;
364 case PM_ACTION_MINIMUM
:
367 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__
);
371 radeon_set_power_state(rdev
);
372 rdev
->pm
.planned_action
= PM_ACTION_NONE
;
375 static void radeon_pm_set_clocks(struct radeon_device
*rdev
)
377 radeon_get_power_state(rdev
, rdev
->pm
.planned_action
);
378 mutex_lock(&rdev
->cp
.mutex
);
380 if (rdev
->pm
.active_crtcs
& (1 << 0)) {
381 rdev
->pm
.req_vblank
|= (1 << 0);
382 drm_vblank_get(rdev
->ddev
, 0);
384 if (rdev
->pm
.active_crtcs
& (1 << 1)) {
385 rdev
->pm
.req_vblank
|= (1 << 1);
386 drm_vblank_get(rdev
->ddev
, 1);
388 radeon_pm_set_clocks_locked(rdev
);
389 if (rdev
->pm
.req_vblank
& (1 << 0)) {
390 rdev
->pm
.req_vblank
&= ~(1 << 0);
391 drm_vblank_put(rdev
->ddev
, 0);
393 if (rdev
->pm
.req_vblank
& (1 << 1)) {
394 rdev
->pm
.req_vblank
&= ~(1 << 1);
395 drm_vblank_put(rdev
->ddev
, 1);
398 mutex_unlock(&rdev
->cp
.mutex
);
401 static void radeon_pm_idle_work_handler(struct work_struct
*work
)
403 struct radeon_device
*rdev
;
404 rdev
= container_of(work
, struct radeon_device
,
407 mutex_lock(&rdev
->pm
.mutex
);
408 if (rdev
->pm
.state
== PM_STATE_ACTIVE
) {
409 unsigned long irq_flags
;
410 int not_processed
= 0;
412 read_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
413 if (!list_empty(&rdev
->fence_drv
.emited
)) {
414 struct list_head
*ptr
;
415 list_for_each(ptr
, &rdev
->fence_drv
.emited
) {
416 /* count up to 3, that's enought info */
417 if (++not_processed
>= 3)
421 read_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
423 if (not_processed
>= 3) { /* should upclock */
424 if (rdev
->pm
.planned_action
== PM_ACTION_DOWNCLOCK
) {
425 rdev
->pm
.planned_action
= PM_ACTION_NONE
;
426 } else if (rdev
->pm
.planned_action
== PM_ACTION_NONE
&&
427 rdev
->pm
.downclocked
) {
428 rdev
->pm
.planned_action
=
430 rdev
->pm
.action_timeout
= jiffies
+
431 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS
);
433 } else if (not_processed
== 0) { /* should downclock */
434 if (rdev
->pm
.planned_action
== PM_ACTION_UPCLOCK
) {
435 rdev
->pm
.planned_action
= PM_ACTION_NONE
;
436 } else if (rdev
->pm
.planned_action
== PM_ACTION_NONE
&&
437 !rdev
->pm
.downclocked
) {
438 rdev
->pm
.planned_action
=
440 rdev
->pm
.action_timeout
= jiffies
+
441 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS
);
445 if (rdev
->pm
.planned_action
!= PM_ACTION_NONE
&&
446 jiffies
> rdev
->pm
.action_timeout
) {
447 radeon_pm_set_clocks(rdev
);
450 mutex_unlock(&rdev
->pm
.mutex
);
452 queue_delayed_work(rdev
->wq
, &rdev
->pm
.idle_work
,
453 msecs_to_jiffies(RADEON_IDLE_LOOP_MS
));
459 #if defined(CONFIG_DEBUG_FS)
461 static int radeon_debugfs_pm_info(struct seq_file
*m
, void *data
)
463 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
464 struct drm_device
*dev
= node
->minor
->dev
;
465 struct radeon_device
*rdev
= dev
->dev_private
;
467 seq_printf(m
, "state: %s\n", pm_state_names
[rdev
->pm
.state
]);
468 seq_printf(m
, "default engine clock: %u0 kHz\n", rdev
->clock
.default_sclk
);
469 seq_printf(m
, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev
));
470 seq_printf(m
, "default memory clock: %u0 kHz\n", rdev
->clock
.default_mclk
);
471 if (rdev
->asic
->get_memory_clock
)
472 seq_printf(m
, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev
));
473 if (rdev
->asic
->get_pcie_lanes
)
474 seq_printf(m
, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev
));
479 static struct drm_info_list radeon_pm_info_list
[] = {
480 {"radeon_pm_info", radeon_debugfs_pm_info
, 0, NULL
},
484 static int radeon_debugfs_pm_init(struct radeon_device
*rdev
)
486 #if defined(CONFIG_DEBUG_FS)
487 return radeon_debugfs_add_files(rdev
, radeon_pm_info_list
, ARRAY_SIZE(radeon_pm_info_list
));