2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include <linux/module.h>
34 #include "radeon_drm.h"
36 #include "radeon_asic.h"
37 #include "radeon_mode.h"
42 #define PFP_UCODE_SIZE 576
43 #define PM4_UCODE_SIZE 1792
44 #define RLC_UCODE_SIZE 768
45 #define R700_PFP_UCODE_SIZE 848
46 #define R700_PM4_UCODE_SIZE 1360
47 #define R700_RLC_UCODE_SIZE 1024
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define CAYMAN_RLC_UCODE_SIZE 1024
52 #define ARUBA_RLC_UCODE_SIZE 1536
55 MODULE_FIRMWARE("radeon/R600_pfp.bin");
56 MODULE_FIRMWARE("radeon/R600_me.bin");
57 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV610_me.bin");
59 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV630_me.bin");
61 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV620_me.bin");
63 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV635_me.bin");
65 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
66 MODULE_FIRMWARE("radeon/RV670_me.bin");
67 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
68 MODULE_FIRMWARE("radeon/RS780_me.bin");
69 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
70 MODULE_FIRMWARE("radeon/RV770_me.bin");
71 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
72 MODULE_FIRMWARE("radeon/RV730_me.bin");
73 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
74 MODULE_FIRMWARE("radeon/RV710_me.bin");
75 MODULE_FIRMWARE("radeon/R600_rlc.bin");
76 MODULE_FIRMWARE("radeon/R700_rlc.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
78 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
79 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
84 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
85 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
87 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
88 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
89 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
90 MODULE_FIRMWARE("radeon/PALM_me.bin");
91 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
92 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
93 MODULE_FIRMWARE("radeon/SUMO_me.bin");
94 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
95 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
97 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
99 /* r600,rv610,rv630,rv620,rv635,rv670 */
100 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
101 void r600_gpu_init(struct radeon_device
*rdev
);
102 void r600_fini(struct radeon_device
*rdev
);
103 void r600_irq_disable(struct radeon_device
*rdev
);
104 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
);
106 /* get temperature in millidegrees */
107 int rv6xx_get_temp(struct radeon_device
*rdev
)
109 u32 temp
= (RREG32(CG_THERMAL_STATUS
) & ASIC_T_MASK
) >>
111 int actual_temp
= temp
& 0xff;
116 return actual_temp
* 1000;
119 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
123 rdev
->pm
.dynpm_can_upclock
= true;
124 rdev
->pm
.dynpm_can_downclock
= true;
126 /* power state array is low to high, default is first */
127 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
128 int min_power_state_index
= 0;
130 if (rdev
->pm
.num_power_states
> 2)
131 min_power_state_index
= 1;
133 switch (rdev
->pm
.dynpm_planned_action
) {
134 case DYNPM_ACTION_MINIMUM
:
135 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
136 rdev
->pm
.requested_clock_mode_index
= 0;
137 rdev
->pm
.dynpm_can_downclock
= false;
139 case DYNPM_ACTION_DOWNCLOCK
:
140 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
141 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
142 rdev
->pm
.dynpm_can_downclock
= false;
144 if (rdev
->pm
.active_crtc_count
> 1) {
145 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
146 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
148 else if (i
>= rdev
->pm
.current_power_state_index
) {
149 rdev
->pm
.requested_power_state_index
=
150 rdev
->pm
.current_power_state_index
;
153 rdev
->pm
.requested_power_state_index
= i
;
158 if (rdev
->pm
.current_power_state_index
== 0)
159 rdev
->pm
.requested_power_state_index
=
160 rdev
->pm
.num_power_states
- 1;
162 rdev
->pm
.requested_power_state_index
=
163 rdev
->pm
.current_power_state_index
- 1;
166 rdev
->pm
.requested_clock_mode_index
= 0;
167 /* don't use the power state if crtcs are active and no display flag is set */
168 if ((rdev
->pm
.active_crtc_count
> 0) &&
169 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
170 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
171 RADEON_PM_MODE_NO_DISPLAY
)) {
172 rdev
->pm
.requested_power_state_index
++;
175 case DYNPM_ACTION_UPCLOCK
:
176 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
177 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
178 rdev
->pm
.dynpm_can_upclock
= false;
180 if (rdev
->pm
.active_crtc_count
> 1) {
181 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
182 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
184 else if (i
<= rdev
->pm
.current_power_state_index
) {
185 rdev
->pm
.requested_power_state_index
=
186 rdev
->pm
.current_power_state_index
;
189 rdev
->pm
.requested_power_state_index
= i
;
194 rdev
->pm
.requested_power_state_index
=
195 rdev
->pm
.current_power_state_index
+ 1;
197 rdev
->pm
.requested_clock_mode_index
= 0;
199 case DYNPM_ACTION_DEFAULT
:
200 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
201 rdev
->pm
.requested_clock_mode_index
= 0;
202 rdev
->pm
.dynpm_can_upclock
= false;
204 case DYNPM_ACTION_NONE
:
206 DRM_ERROR("Requested mode for not defined action\n");
210 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
211 /* for now just select the first power state and switch between clock modes */
212 /* power state array is low to high, default is first (0) */
213 if (rdev
->pm
.active_crtc_count
> 1) {
214 rdev
->pm
.requested_power_state_index
= -1;
215 /* start at 1 as we don't want the default mode */
216 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
217 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
219 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
220 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
221 rdev
->pm
.requested_power_state_index
= i
;
225 /* if nothing selected, grab the default state. */
226 if (rdev
->pm
.requested_power_state_index
== -1)
227 rdev
->pm
.requested_power_state_index
= 0;
229 rdev
->pm
.requested_power_state_index
= 1;
231 switch (rdev
->pm
.dynpm_planned_action
) {
232 case DYNPM_ACTION_MINIMUM
:
233 rdev
->pm
.requested_clock_mode_index
= 0;
234 rdev
->pm
.dynpm_can_downclock
= false;
236 case DYNPM_ACTION_DOWNCLOCK
:
237 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
238 if (rdev
->pm
.current_clock_mode_index
== 0) {
239 rdev
->pm
.requested_clock_mode_index
= 0;
240 rdev
->pm
.dynpm_can_downclock
= false;
242 rdev
->pm
.requested_clock_mode_index
=
243 rdev
->pm
.current_clock_mode_index
- 1;
245 rdev
->pm
.requested_clock_mode_index
= 0;
246 rdev
->pm
.dynpm_can_downclock
= false;
248 /* don't use the power state if crtcs are active and no display flag is set */
249 if ((rdev
->pm
.active_crtc_count
> 0) &&
250 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
251 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
252 RADEON_PM_MODE_NO_DISPLAY
)) {
253 rdev
->pm
.requested_clock_mode_index
++;
256 case DYNPM_ACTION_UPCLOCK
:
257 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
258 if (rdev
->pm
.current_clock_mode_index
==
259 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
260 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
261 rdev
->pm
.dynpm_can_upclock
= false;
263 rdev
->pm
.requested_clock_mode_index
=
264 rdev
->pm
.current_clock_mode_index
+ 1;
266 rdev
->pm
.requested_clock_mode_index
=
267 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
268 rdev
->pm
.dynpm_can_upclock
= false;
271 case DYNPM_ACTION_DEFAULT
:
272 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
273 rdev
->pm
.requested_clock_mode_index
= 0;
274 rdev
->pm
.dynpm_can_upclock
= false;
276 case DYNPM_ACTION_NONE
:
278 DRM_ERROR("Requested mode for not defined action\n");
283 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
284 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
285 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
286 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
287 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
288 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
292 void rs780_pm_init_profile(struct radeon_device
*rdev
)
294 if (rdev
->pm
.num_power_states
== 2) {
296 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
297 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
298 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
299 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
301 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
302 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
303 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
304 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
306 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
307 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
308 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
309 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
311 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
312 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
313 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
314 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
316 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
317 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
318 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
319 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
321 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
322 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
323 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
324 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
326 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
327 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
328 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
329 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
330 } else if (rdev
->pm
.num_power_states
== 3) {
332 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
333 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
334 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
335 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
337 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
338 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
339 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
340 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
342 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
343 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
344 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
345 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
347 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
348 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
349 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
350 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
352 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
353 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
354 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
355 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
357 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
358 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
359 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
360 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
362 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
363 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
364 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
365 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
368 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
369 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
370 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
371 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
373 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
374 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
375 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
376 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
378 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
379 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
380 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
381 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
383 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
384 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
385 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
386 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
388 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
389 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
390 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
391 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
393 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
394 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
395 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
396 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
398 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
399 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
400 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
401 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
405 void r600_pm_init_profile(struct radeon_device
*rdev
)
409 if (rdev
->family
== CHIP_R600
) {
412 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
413 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
414 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
415 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
417 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
418 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
419 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
420 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
422 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
423 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
424 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
425 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
427 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
428 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
429 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
430 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
432 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
433 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
434 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
435 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
437 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
438 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
439 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
440 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
442 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
443 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
444 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
445 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
447 if (rdev
->pm
.num_power_states
< 4) {
449 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
450 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
451 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
452 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
454 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
455 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
456 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
457 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
459 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
460 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
461 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
462 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
464 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
465 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
466 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
467 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
469 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
470 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
471 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
472 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
474 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
475 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
476 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
477 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
479 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
480 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
481 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
482 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
485 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
486 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
487 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
488 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
490 if (rdev
->flags
& RADEON_IS_MOBILITY
)
491 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
493 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
494 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= idx
;
495 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= idx
;
496 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
497 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
499 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= idx
;
500 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= idx
;
501 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
502 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
504 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
505 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= idx
;
506 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= idx
;
507 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
508 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
510 if (rdev
->flags
& RADEON_IS_MOBILITY
)
511 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
513 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
514 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= idx
;
515 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= idx
;
516 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
517 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
519 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= idx
;
520 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= idx
;
521 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
522 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
524 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
525 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= idx
;
526 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= idx
;
527 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
528 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
533 void r600_pm_misc(struct radeon_device
*rdev
)
535 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
536 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
537 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
538 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
540 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
541 /* 0xff01 is a flag rather then an actual voltage */
542 if (voltage
->voltage
== 0xff01)
544 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
545 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
546 rdev
->pm
.current_vddc
= voltage
->voltage
;
547 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage
->voltage
);
552 bool r600_gui_idle(struct radeon_device
*rdev
)
554 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
560 /* hpd for digital panel detect/disconnect */
561 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
563 bool connected
= false;
565 if (ASIC_IS_DCE3(rdev
)) {
568 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
572 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
576 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
580 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
585 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
589 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
598 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
602 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
606 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
616 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
617 enum radeon_hpd_id hpd
)
620 bool connected
= r600_hpd_sense(rdev
, hpd
);
622 if (ASIC_IS_DCE3(rdev
)) {
625 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
627 tmp
&= ~DC_HPDx_INT_POLARITY
;
629 tmp
|= DC_HPDx_INT_POLARITY
;
630 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
633 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
635 tmp
&= ~DC_HPDx_INT_POLARITY
;
637 tmp
|= DC_HPDx_INT_POLARITY
;
638 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
641 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
643 tmp
&= ~DC_HPDx_INT_POLARITY
;
645 tmp
|= DC_HPDx_INT_POLARITY
;
646 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
649 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
651 tmp
&= ~DC_HPDx_INT_POLARITY
;
653 tmp
|= DC_HPDx_INT_POLARITY
;
654 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
657 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
659 tmp
&= ~DC_HPDx_INT_POLARITY
;
661 tmp
|= DC_HPDx_INT_POLARITY
;
662 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
666 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
668 tmp
&= ~DC_HPDx_INT_POLARITY
;
670 tmp
|= DC_HPDx_INT_POLARITY
;
671 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
679 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
681 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
683 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
684 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
687 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
689 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
691 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
692 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
695 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
697 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
699 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
700 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
708 void r600_hpd_init(struct radeon_device
*rdev
)
710 struct drm_device
*dev
= rdev
->ddev
;
711 struct drm_connector
*connector
;
714 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
715 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
717 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
||
718 connector
->connector_type
== DRM_MODE_CONNECTOR_LVDS
) {
719 /* don't try to enable hpd on eDP or LVDS avoid breaking the
720 * aux dp channel on imac and help (but not completely fix)
721 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
725 if (ASIC_IS_DCE3(rdev
)) {
726 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
727 if (ASIC_IS_DCE32(rdev
))
730 switch (radeon_connector
->hpd
.hpd
) {
732 WREG32(DC_HPD1_CONTROL
, tmp
);
735 WREG32(DC_HPD2_CONTROL
, tmp
);
738 WREG32(DC_HPD3_CONTROL
, tmp
);
741 WREG32(DC_HPD4_CONTROL
, tmp
);
745 WREG32(DC_HPD5_CONTROL
, tmp
);
748 WREG32(DC_HPD6_CONTROL
, tmp
);
754 switch (radeon_connector
->hpd
.hpd
) {
756 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
759 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
762 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
768 enable
|= 1 << radeon_connector
->hpd
.hpd
;
769 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
771 radeon_irq_kms_enable_hpd(rdev
, enable
);
774 void r600_hpd_fini(struct radeon_device
*rdev
)
776 struct drm_device
*dev
= rdev
->ddev
;
777 struct drm_connector
*connector
;
778 unsigned disable
= 0;
780 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
781 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
782 if (ASIC_IS_DCE3(rdev
)) {
783 switch (radeon_connector
->hpd
.hpd
) {
785 WREG32(DC_HPD1_CONTROL
, 0);
788 WREG32(DC_HPD2_CONTROL
, 0);
791 WREG32(DC_HPD3_CONTROL
, 0);
794 WREG32(DC_HPD4_CONTROL
, 0);
798 WREG32(DC_HPD5_CONTROL
, 0);
801 WREG32(DC_HPD6_CONTROL
, 0);
807 switch (radeon_connector
->hpd
.hpd
) {
809 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
812 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
815 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
821 disable
|= 1 << radeon_connector
->hpd
.hpd
;
823 radeon_irq_kms_disable_hpd(rdev
, disable
);
829 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
834 /* flush hdp cache so updates hit vram */
835 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
836 !(rdev
->flags
& RADEON_IS_AGP
)) {
837 void __iomem
*ptr
= (void *)rdev
->gart
.ptr
;
840 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
841 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
842 * This seems to cause problems on some AGP cards. Just use the old
845 WREG32(HDP_DEBUG1
, 0);
846 tmp
= readl((void __iomem
*)ptr
);
848 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
850 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
851 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
852 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
853 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
855 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
856 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
858 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
868 int r600_pcie_gart_init(struct radeon_device
*rdev
)
872 if (rdev
->gart
.robj
) {
873 WARN(1, "R600 PCIE GART already initialized\n");
876 /* Initialize common gart structure */
877 r
= radeon_gart_init(rdev
);
880 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
881 return radeon_gart_table_vram_alloc(rdev
);
884 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
889 if (rdev
->gart
.robj
== NULL
) {
890 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
893 r
= radeon_gart_table_vram_pin(rdev
);
896 radeon_gart_restore(rdev
);
899 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
900 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
901 EFFECTIVE_L2_QUEUE_SIZE(7));
902 WREG32(VM_L2_CNTL2
, 0);
903 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
904 /* Setup TLB control */
905 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
906 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
907 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
908 ENABLE_WAIT_L2_QUERY
;
909 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
910 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
911 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
912 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
913 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
914 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
915 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
916 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
917 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
918 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
919 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
920 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
921 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
922 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
923 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
924 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
925 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
926 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
927 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
928 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
929 (u32
)(rdev
->dummy_page
.addr
>> 12));
930 for (i
= 1; i
< 7; i
++)
931 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
933 r600_pcie_gart_tlb_flush(rdev
);
934 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
935 (unsigned)(rdev
->mc
.gtt_size
>> 20),
936 (unsigned long long)rdev
->gart
.table_addr
);
937 rdev
->gart
.ready
= true;
941 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
946 /* Disable all tables */
947 for (i
= 0; i
< 7; i
++)
948 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
950 /* Disable L2 cache */
951 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
952 EFFECTIVE_L2_QUEUE_SIZE(7));
953 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
954 /* Setup L1 TLB control */
955 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
956 ENABLE_WAIT_L2_QUERY
;
957 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
958 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
959 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
960 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
961 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
962 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
963 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
964 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
965 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
966 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
967 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
968 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
969 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
970 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
971 radeon_gart_table_vram_unpin(rdev
);
974 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
976 radeon_gart_fini(rdev
);
977 r600_pcie_gart_disable(rdev
);
978 radeon_gart_table_vram_free(rdev
);
981 void r600_agp_enable(struct radeon_device
*rdev
)
987 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
988 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
989 EFFECTIVE_L2_QUEUE_SIZE(7));
990 WREG32(VM_L2_CNTL2
, 0);
991 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
992 /* Setup TLB control */
993 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
994 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
995 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
996 ENABLE_WAIT_L2_QUERY
;
997 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
998 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
999 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1000 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1001 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1002 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1003 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1004 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1005 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1006 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1007 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1008 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1009 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1010 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1011 for (i
= 0; i
< 7; i
++)
1012 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1015 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1020 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1021 /* read MC_STATUS */
1022 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1030 static void r600_mc_program(struct radeon_device
*rdev
)
1032 struct rv515_mc_save save
;
1036 /* Initialize HDP */
1037 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1038 WREG32((0x2c14 + j
), 0x00000000);
1039 WREG32((0x2c18 + j
), 0x00000000);
1040 WREG32((0x2c1c + j
), 0x00000000);
1041 WREG32((0x2c20 + j
), 0x00000000);
1042 WREG32((0x2c24 + j
), 0x00000000);
1044 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1046 rv515_mc_stop(rdev
, &save
);
1047 if (r600_mc_wait_for_idle(rdev
)) {
1048 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1050 /* Lockout access through VGA aperture (doesn't exist before R600) */
1051 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1052 /* Update configuration */
1053 if (rdev
->flags
& RADEON_IS_AGP
) {
1054 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1055 /* VRAM before AGP */
1056 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1057 rdev
->mc
.vram_start
>> 12);
1058 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1059 rdev
->mc
.gtt_end
>> 12);
1061 /* VRAM after AGP */
1062 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1063 rdev
->mc
.gtt_start
>> 12);
1064 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1065 rdev
->mc
.vram_end
>> 12);
1068 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1069 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1071 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1072 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1073 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1074 WREG32(MC_VM_FB_LOCATION
, tmp
);
1075 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1076 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1077 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1078 if (rdev
->flags
& RADEON_IS_AGP
) {
1079 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1080 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1081 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1083 WREG32(MC_VM_AGP_BASE
, 0);
1084 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1085 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1087 if (r600_mc_wait_for_idle(rdev
)) {
1088 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1090 rv515_mc_resume(rdev
, &save
);
1091 /* we need to own VRAM, so turn off the VGA renderer here
1092 * to stop it overwriting our objects */
1093 rv515_vga_render_disable(rdev
);
1097 * r600_vram_gtt_location - try to find VRAM & GTT location
1098 * @rdev: radeon device structure holding all necessary informations
1099 * @mc: memory controller structure holding memory informations
1101 * Function will place try to place VRAM at same place as in CPU (PCI)
1102 * address space as some GPU seems to have issue when we reprogram at
1103 * different address space.
1105 * If there is not enough space to fit the unvisible VRAM after the
1106 * aperture then we limit the VRAM size to the aperture.
1108 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1109 * them to be in one from GPU point of view so that we can program GPU to
1110 * catch access outside them (weird GPU policy see ??).
1112 * This function will never fails, worst case are limiting VRAM or GTT.
1114 * Note: GTT start, end, size should be initialized before calling this
1115 * function on AGP platform.
1117 static void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1119 u64 size_bf
, size_af
;
1121 if (mc
->mc_vram_size
> 0xE0000000) {
1122 /* leave room for at least 512M GTT */
1123 dev_warn(rdev
->dev
, "limiting VRAM\n");
1124 mc
->real_vram_size
= 0xE0000000;
1125 mc
->mc_vram_size
= 0xE0000000;
1127 if (rdev
->flags
& RADEON_IS_AGP
) {
1128 size_bf
= mc
->gtt_start
;
1129 size_af
= 0xFFFFFFFF - mc
->gtt_end
;
1130 if (size_bf
> size_af
) {
1131 if (mc
->mc_vram_size
> size_bf
) {
1132 dev_warn(rdev
->dev
, "limiting VRAM\n");
1133 mc
->real_vram_size
= size_bf
;
1134 mc
->mc_vram_size
= size_bf
;
1136 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1138 if (mc
->mc_vram_size
> size_af
) {
1139 dev_warn(rdev
->dev
, "limiting VRAM\n");
1140 mc
->real_vram_size
= size_af
;
1141 mc
->mc_vram_size
= size_af
;
1143 mc
->vram_start
= mc
->gtt_end
+ 1;
1145 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1146 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1147 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1148 mc
->vram_end
, mc
->real_vram_size
>> 20);
1151 if (rdev
->flags
& RADEON_IS_IGP
) {
1152 base
= RREG32(MC_VM_FB_LOCATION
) & 0xFFFF;
1155 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1156 rdev
->mc
.gtt_base_align
= 0;
1157 radeon_gtt_location(rdev
, mc
);
1161 int r600_mc_init(struct radeon_device
*rdev
)
1164 int chansize
, numchan
;
1166 /* Get VRAM informations */
1167 rdev
->mc
.vram_is_ddr
= true;
1168 tmp
= RREG32(RAMCFG
);
1169 if (tmp
& CHANSIZE_OVERRIDE
) {
1171 } else if (tmp
& CHANSIZE_MASK
) {
1176 tmp
= RREG32(CHMAP
);
1177 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1192 rdev
->mc
.vram_width
= numchan
* chansize
;
1193 /* Could aper size report 0 ? */
1194 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1195 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1196 /* Setup GPU memory space */
1197 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1198 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1199 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1200 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1202 if (rdev
->flags
& RADEON_IS_IGP
) {
1203 rs690_pm_info(rdev
);
1204 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1206 radeon_update_bandwidth_info(rdev
);
1210 int r600_vram_scratch_init(struct radeon_device
*rdev
)
1214 if (rdev
->vram_scratch
.robj
== NULL
) {
1215 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
,
1216 PAGE_SIZE
, true, RADEON_GEM_DOMAIN_VRAM
,
1217 NULL
, &rdev
->vram_scratch
.robj
);
1223 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1224 if (unlikely(r
!= 0))
1226 r
= radeon_bo_pin(rdev
->vram_scratch
.robj
,
1227 RADEON_GEM_DOMAIN_VRAM
, &rdev
->vram_scratch
.gpu_addr
);
1229 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1232 r
= radeon_bo_kmap(rdev
->vram_scratch
.robj
,
1233 (void **)&rdev
->vram_scratch
.ptr
);
1235 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1236 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1241 void r600_vram_scratch_fini(struct radeon_device
*rdev
)
1245 if (rdev
->vram_scratch
.robj
== NULL
) {
1248 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1249 if (likely(r
== 0)) {
1250 radeon_bo_kunmap(rdev
->vram_scratch
.robj
);
1251 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1252 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1254 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1257 /* We doesn't check that the GPU really needs a reset we simply do the
1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that.
1261 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
1263 struct rv515_mc_save save
;
1264 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1265 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1266 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1267 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1268 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1269 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1270 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1271 S_008010_GUI_ACTIVE(1);
1272 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1273 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1274 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1275 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1276 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1277 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1278 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1279 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1282 if (!(RREG32(GRBM_STATUS
) & GUI_ACTIVE
))
1285 dev_info(rdev
->dev
, "GPU softreset \n");
1286 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1287 RREG32(R_008010_GRBM_STATUS
));
1288 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1289 RREG32(R_008014_GRBM_STATUS2
));
1290 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1291 RREG32(R_000E50_SRBM_STATUS
));
1292 dev_info(rdev
->dev
, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1293 RREG32(CP_STALLED_STAT1
));
1294 dev_info(rdev
->dev
, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1295 RREG32(CP_STALLED_STAT2
));
1296 dev_info(rdev
->dev
, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1297 RREG32(CP_BUSY_STAT
));
1298 dev_info(rdev
->dev
, " R_008680_CP_STAT = 0x%08X\n",
1300 rv515_mc_stop(rdev
, &save
);
1301 if (r600_mc_wait_for_idle(rdev
)) {
1302 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1304 /* Disable CP parsing/prefetching */
1305 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1306 /* Check if any of the rendering block is busy and reset it */
1307 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
1308 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
1309 tmp
= S_008020_SOFT_RESET_CR(1) |
1310 S_008020_SOFT_RESET_DB(1) |
1311 S_008020_SOFT_RESET_CB(1) |
1312 S_008020_SOFT_RESET_PA(1) |
1313 S_008020_SOFT_RESET_SC(1) |
1314 S_008020_SOFT_RESET_SMX(1) |
1315 S_008020_SOFT_RESET_SPI(1) |
1316 S_008020_SOFT_RESET_SX(1) |
1317 S_008020_SOFT_RESET_SH(1) |
1318 S_008020_SOFT_RESET_TC(1) |
1319 S_008020_SOFT_RESET_TA(1) |
1320 S_008020_SOFT_RESET_VC(1) |
1321 S_008020_SOFT_RESET_VGT(1);
1322 dev_info(rdev
->dev
, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1323 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1324 RREG32(R_008020_GRBM_SOFT_RESET
);
1326 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1328 /* Reset CP (we always reset CP) */
1329 tmp
= S_008020_SOFT_RESET_CP(1);
1330 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1331 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1332 RREG32(R_008020_GRBM_SOFT_RESET
);
1334 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1335 /* Wait a little for things to settle down */
1337 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1338 RREG32(R_008010_GRBM_STATUS
));
1339 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1340 RREG32(R_008014_GRBM_STATUS2
));
1341 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1342 RREG32(R_000E50_SRBM_STATUS
));
1343 dev_info(rdev
->dev
, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1344 RREG32(CP_STALLED_STAT1
));
1345 dev_info(rdev
->dev
, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1346 RREG32(CP_STALLED_STAT2
));
1347 dev_info(rdev
->dev
, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1348 RREG32(CP_BUSY_STAT
));
1349 dev_info(rdev
->dev
, " R_008680_CP_STAT = 0x%08X\n",
1351 rv515_mc_resume(rdev
, &save
);
1355 bool r600_gpu_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
1361 srbm_status
= RREG32(R_000E50_SRBM_STATUS
);
1362 grbm_status
= RREG32(R_008010_GRBM_STATUS
);
1363 grbm_status2
= RREG32(R_008014_GRBM_STATUS2
);
1364 if (!G_008010_GUI_ACTIVE(grbm_status
)) {
1365 radeon_ring_lockup_update(ring
);
1368 /* force CP activities */
1369 radeon_ring_force_activity(rdev
, ring
);
1370 return radeon_ring_test_lockup(rdev
, ring
);
1373 int r600_asic_reset(struct radeon_device
*rdev
)
1375 return r600_gpu_soft_reset(rdev
);
1378 u32
r6xx_remap_render_backend(struct radeon_device
*rdev
,
1379 u32 tiling_pipe_num
,
1381 u32 total_max_rb_num
,
1382 u32 disabled_rb_mask
)
1384 u32 rendering_pipe_num
, rb_num_width
, req_rb_num
;
1385 u32 pipe_rb_ratio
, pipe_rb_remain
;
1386 u32 data
= 0, mask
= 1 << (max_rb_num
- 1);
1389 /* mask out the RBs that don't exist on that asic */
1390 disabled_rb_mask
|= (0xff << max_rb_num
) & 0xff;
1392 rendering_pipe_num
= 1 << tiling_pipe_num
;
1393 req_rb_num
= total_max_rb_num
- r600_count_pipe_bits(disabled_rb_mask
);
1394 BUG_ON(rendering_pipe_num
< req_rb_num
);
1396 pipe_rb_ratio
= rendering_pipe_num
/ req_rb_num
;
1397 pipe_rb_remain
= rendering_pipe_num
- pipe_rb_ratio
* req_rb_num
;
1399 if (rdev
->family
<= CHIP_RV740
) {
1407 for (i
= 0; i
< max_rb_num
; i
++) {
1408 if (!(mask
& disabled_rb_mask
)) {
1409 for (j
= 0; j
< pipe_rb_ratio
; j
++) {
1410 data
<<= rb_num_width
;
1411 data
|= max_rb_num
- i
- 1;
1413 if (pipe_rb_remain
) {
1414 data
<<= rb_num_width
;
1415 data
|= max_rb_num
- i
- 1;
1425 int r600_count_pipe_bits(uint32_t val
)
1429 for (i
= 0; i
< 32; i
++) {
1436 void r600_gpu_init(struct radeon_device
*rdev
)
1440 u32 cc_rb_backend_disable
;
1441 u32 cc_gc_shader_pipe_config
;
1445 u32 sq_gpr_resource_mgmt_1
= 0;
1446 u32 sq_gpr_resource_mgmt_2
= 0;
1447 u32 sq_thread_resource_mgmt
= 0;
1448 u32 sq_stack_resource_mgmt_1
= 0;
1449 u32 sq_stack_resource_mgmt_2
= 0;
1450 u32 disabled_rb_mask
;
1452 rdev
->config
.r600
.tiling_group_size
= 256;
1453 switch (rdev
->family
) {
1455 rdev
->config
.r600
.max_pipes
= 4;
1456 rdev
->config
.r600
.max_tile_pipes
= 8;
1457 rdev
->config
.r600
.max_simds
= 4;
1458 rdev
->config
.r600
.max_backends
= 4;
1459 rdev
->config
.r600
.max_gprs
= 256;
1460 rdev
->config
.r600
.max_threads
= 192;
1461 rdev
->config
.r600
.max_stack_entries
= 256;
1462 rdev
->config
.r600
.max_hw_contexts
= 8;
1463 rdev
->config
.r600
.max_gs_threads
= 16;
1464 rdev
->config
.r600
.sx_max_export_size
= 128;
1465 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1466 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1467 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1471 rdev
->config
.r600
.max_pipes
= 2;
1472 rdev
->config
.r600
.max_tile_pipes
= 2;
1473 rdev
->config
.r600
.max_simds
= 3;
1474 rdev
->config
.r600
.max_backends
= 1;
1475 rdev
->config
.r600
.max_gprs
= 128;
1476 rdev
->config
.r600
.max_threads
= 192;
1477 rdev
->config
.r600
.max_stack_entries
= 128;
1478 rdev
->config
.r600
.max_hw_contexts
= 8;
1479 rdev
->config
.r600
.max_gs_threads
= 4;
1480 rdev
->config
.r600
.sx_max_export_size
= 128;
1481 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1482 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1483 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1489 rdev
->config
.r600
.max_pipes
= 1;
1490 rdev
->config
.r600
.max_tile_pipes
= 1;
1491 rdev
->config
.r600
.max_simds
= 2;
1492 rdev
->config
.r600
.max_backends
= 1;
1493 rdev
->config
.r600
.max_gprs
= 128;
1494 rdev
->config
.r600
.max_threads
= 192;
1495 rdev
->config
.r600
.max_stack_entries
= 128;
1496 rdev
->config
.r600
.max_hw_contexts
= 4;
1497 rdev
->config
.r600
.max_gs_threads
= 4;
1498 rdev
->config
.r600
.sx_max_export_size
= 128;
1499 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1500 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1501 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1504 rdev
->config
.r600
.max_pipes
= 4;
1505 rdev
->config
.r600
.max_tile_pipes
= 4;
1506 rdev
->config
.r600
.max_simds
= 4;
1507 rdev
->config
.r600
.max_backends
= 4;
1508 rdev
->config
.r600
.max_gprs
= 192;
1509 rdev
->config
.r600
.max_threads
= 192;
1510 rdev
->config
.r600
.max_stack_entries
= 256;
1511 rdev
->config
.r600
.max_hw_contexts
= 8;
1512 rdev
->config
.r600
.max_gs_threads
= 16;
1513 rdev
->config
.r600
.sx_max_export_size
= 128;
1514 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1515 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1516 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1522 /* Initialize HDP */
1523 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1524 WREG32((0x2c14 + j
), 0x00000000);
1525 WREG32((0x2c18 + j
), 0x00000000);
1526 WREG32((0x2c1c + j
), 0x00000000);
1527 WREG32((0x2c20 + j
), 0x00000000);
1528 WREG32((0x2c24 + j
), 0x00000000);
1531 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1535 ramcfg
= RREG32(RAMCFG
);
1536 switch (rdev
->config
.r600
.max_tile_pipes
) {
1538 tiling_config
|= PIPE_TILING(0);
1541 tiling_config
|= PIPE_TILING(1);
1544 tiling_config
|= PIPE_TILING(2);
1547 tiling_config
|= PIPE_TILING(3);
1552 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1553 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1554 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1555 tiling_config
|= GROUP_SIZE((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1557 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1559 tiling_config
|= ROW_TILING(3);
1560 tiling_config
|= SAMPLE_SPLIT(3);
1562 tiling_config
|= ROW_TILING(tmp
);
1563 tiling_config
|= SAMPLE_SPLIT(tmp
);
1565 tiling_config
|= BANK_SWAPS(1);
1567 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1568 tmp
= R6XX_MAX_BACKENDS
-
1569 r600_count_pipe_bits((cc_rb_backend_disable
>> 16) & R6XX_MAX_BACKENDS_MASK
);
1570 if (tmp
< rdev
->config
.r600
.max_backends
) {
1571 rdev
->config
.r600
.max_backends
= tmp
;
1574 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0x00ffff00;
1575 tmp
= R6XX_MAX_PIPES
-
1576 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 8) & R6XX_MAX_PIPES_MASK
);
1577 if (tmp
< rdev
->config
.r600
.max_pipes
) {
1578 rdev
->config
.r600
.max_pipes
= tmp
;
1580 tmp
= R6XX_MAX_SIMDS
-
1581 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 16) & R6XX_MAX_SIMDS_MASK
);
1582 if (tmp
< rdev
->config
.r600
.max_simds
) {
1583 rdev
->config
.r600
.max_simds
= tmp
;
1586 disabled_rb_mask
= (RREG32(CC_RB_BACKEND_DISABLE
) >> 16) & R6XX_MAX_BACKENDS_MASK
;
1587 tmp
= (tiling_config
& PIPE_TILING__MASK
) >> PIPE_TILING__SHIFT
;
1588 tmp
= r6xx_remap_render_backend(rdev
, tmp
, rdev
->config
.r600
.max_backends
,
1589 R6XX_MAX_BACKENDS
, disabled_rb_mask
);
1590 tiling_config
|= tmp
<< 16;
1591 rdev
->config
.r600
.backend_map
= tmp
;
1593 rdev
->config
.r600
.tile_config
= tiling_config
;
1594 WREG32(GB_TILING_CONFIG
, tiling_config
);
1595 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1596 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1598 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1599 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1600 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1602 /* Setup some CP states */
1603 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1604 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1606 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1607 SYNC_WALKER
| SYNC_ALIGNER
));
1608 /* Setup various GPU states */
1609 if (rdev
->family
== CHIP_RV670
)
1610 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1612 tmp
= RREG32(SX_DEBUG_1
);
1613 tmp
|= SMX_EVENT_RELEASE
;
1614 if ((rdev
->family
> CHIP_R600
))
1615 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1616 WREG32(SX_DEBUG_1
, tmp
);
1618 if (((rdev
->family
) == CHIP_R600
) ||
1619 ((rdev
->family
) == CHIP_RV630
) ||
1620 ((rdev
->family
) == CHIP_RV610
) ||
1621 ((rdev
->family
) == CHIP_RV620
) ||
1622 ((rdev
->family
) == CHIP_RS780
) ||
1623 ((rdev
->family
) == CHIP_RS880
)) {
1624 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1626 WREG32(DB_DEBUG
, 0);
1628 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1629 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1631 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1632 WREG32(VGT_NUM_INSTANCES
, 0);
1634 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1635 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1637 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1638 if (((rdev
->family
) == CHIP_RV610
) ||
1639 ((rdev
->family
) == CHIP_RV620
) ||
1640 ((rdev
->family
) == CHIP_RS780
) ||
1641 ((rdev
->family
) == CHIP_RS880
)) {
1642 tmp
= (CACHE_FIFO_SIZE(0xa) |
1643 FETCH_FIFO_HIWATER(0xa) |
1644 DONE_FIFO_HIWATER(0xe0) |
1645 ALU_UPDATE_FIFO_HIWATER(0x8));
1646 } else if (((rdev
->family
) == CHIP_R600
) ||
1647 ((rdev
->family
) == CHIP_RV630
)) {
1648 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1649 tmp
|= DONE_FIFO_HIWATER(0x4);
1651 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1653 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1654 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1656 sq_config
= RREG32(SQ_CONFIG
);
1657 sq_config
&= ~(PS_PRIO(3) |
1661 sq_config
|= (DX9_CONSTS
|
1668 if ((rdev
->family
) == CHIP_R600
) {
1669 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1671 NUM_CLAUSE_TEMP_GPRS(4));
1672 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1674 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1675 NUM_VS_THREADS(48) |
1678 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1679 NUM_VS_STACK_ENTRIES(128));
1680 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1681 NUM_ES_STACK_ENTRIES(0));
1682 } else if (((rdev
->family
) == CHIP_RV610
) ||
1683 ((rdev
->family
) == CHIP_RV620
) ||
1684 ((rdev
->family
) == CHIP_RS780
) ||
1685 ((rdev
->family
) == CHIP_RS880
)) {
1686 /* no vertex cache */
1687 sq_config
&= ~VC_ENABLE
;
1689 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1691 NUM_CLAUSE_TEMP_GPRS(2));
1692 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1694 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1695 NUM_VS_THREADS(78) |
1697 NUM_ES_THREADS(31));
1698 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1699 NUM_VS_STACK_ENTRIES(40));
1700 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1701 NUM_ES_STACK_ENTRIES(16));
1702 } else if (((rdev
->family
) == CHIP_RV630
) ||
1703 ((rdev
->family
) == CHIP_RV635
)) {
1704 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1706 NUM_CLAUSE_TEMP_GPRS(2));
1707 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
1709 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1710 NUM_VS_THREADS(78) |
1712 NUM_ES_THREADS(31));
1713 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1714 NUM_VS_STACK_ENTRIES(40));
1715 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1716 NUM_ES_STACK_ENTRIES(16));
1717 } else if ((rdev
->family
) == CHIP_RV670
) {
1718 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1720 NUM_CLAUSE_TEMP_GPRS(2));
1721 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1723 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1724 NUM_VS_THREADS(78) |
1726 NUM_ES_THREADS(31));
1727 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
1728 NUM_VS_STACK_ENTRIES(64));
1729 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
1730 NUM_ES_STACK_ENTRIES(64));
1733 WREG32(SQ_CONFIG
, sq_config
);
1734 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1735 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1736 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1737 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1738 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1740 if (((rdev
->family
) == CHIP_RV610
) ||
1741 ((rdev
->family
) == CHIP_RV620
) ||
1742 ((rdev
->family
) == CHIP_RS780
) ||
1743 ((rdev
->family
) == CHIP_RS880
)) {
1744 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
1746 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
1749 /* More default values. 2D/3D driver should adjust as needed */
1750 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
1751 S1_X(0x4) | S1_Y(0xc)));
1752 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
1753 S1_X(0x2) | S1_Y(0x2) |
1754 S2_X(0xa) | S2_Y(0x6) |
1755 S3_X(0x6) | S3_Y(0xa)));
1756 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
1757 S1_X(0x4) | S1_Y(0xc) |
1758 S2_X(0x1) | S2_Y(0x6) |
1759 S3_X(0xa) | S3_Y(0xe)));
1760 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
1761 S5_X(0x0) | S5_Y(0x0) |
1762 S6_X(0xb) | S6_Y(0x4) |
1763 S7_X(0x7) | S7_Y(0x8)));
1765 WREG32(VGT_STRMOUT_EN
, 0);
1766 tmp
= rdev
->config
.r600
.max_pipes
* 16;
1767 switch (rdev
->family
) {
1783 WREG32(VGT_ES_PER_GS
, 128);
1784 WREG32(VGT_GS_PER_ES
, tmp
);
1785 WREG32(VGT_GS_PER_VS
, 2);
1786 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1788 /* more default values. 2D/3D driver should adjust as needed */
1789 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1790 WREG32(VGT_STRMOUT_EN
, 0);
1792 WREG32(PA_SC_MODE_CNTL
, 0);
1793 WREG32(PA_SC_AA_CONFIG
, 0);
1794 WREG32(PA_SC_LINE_STIPPLE
, 0);
1795 WREG32(SPI_INPUT_Z
, 0);
1796 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
1797 WREG32(CB_COLOR7_FRAG
, 0);
1799 /* Clear render buffer base addresses */
1800 WREG32(CB_COLOR0_BASE
, 0);
1801 WREG32(CB_COLOR1_BASE
, 0);
1802 WREG32(CB_COLOR2_BASE
, 0);
1803 WREG32(CB_COLOR3_BASE
, 0);
1804 WREG32(CB_COLOR4_BASE
, 0);
1805 WREG32(CB_COLOR5_BASE
, 0);
1806 WREG32(CB_COLOR6_BASE
, 0);
1807 WREG32(CB_COLOR7_BASE
, 0);
1808 WREG32(CB_COLOR7_FRAG
, 0);
1810 switch (rdev
->family
) {
1815 tmp
= TC_L2_SIZE(8);
1819 tmp
= TC_L2_SIZE(4);
1822 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1825 tmp
= TC_L2_SIZE(0);
1828 WREG32(TC_CNTL
, tmp
);
1830 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1831 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1833 tmp
= RREG32(ARB_POP
);
1834 tmp
|= ENABLE_TC128
;
1835 WREG32(ARB_POP
, tmp
);
1837 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1838 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1840 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1841 WREG32(VC_ENHANCE
, 0);
1846 * Indirect registers accessor
1848 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1852 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1853 (void)RREG32(PCIE_PORT_INDEX
);
1854 r
= RREG32(PCIE_PORT_DATA
);
1858 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1860 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1861 (void)RREG32(PCIE_PORT_INDEX
);
1862 WREG32(PCIE_PORT_DATA
, (v
));
1863 (void)RREG32(PCIE_PORT_DATA
);
1869 void r600_cp_stop(struct radeon_device
*rdev
)
1871 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
1872 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1873 WREG32(SCRATCH_UMSK
, 0);
1876 int r600_init_microcode(struct radeon_device
*rdev
)
1878 struct platform_device
*pdev
;
1879 const char *chip_name
;
1880 const char *rlc_chip_name
;
1881 size_t pfp_req_size
, me_req_size
, rlc_req_size
;
1887 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
1890 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
1894 switch (rdev
->family
) {
1897 rlc_chip_name
= "R600";
1900 chip_name
= "RV610";
1901 rlc_chip_name
= "R600";
1904 chip_name
= "RV630";
1905 rlc_chip_name
= "R600";
1908 chip_name
= "RV620";
1909 rlc_chip_name
= "R600";
1912 chip_name
= "RV635";
1913 rlc_chip_name
= "R600";
1916 chip_name
= "RV670";
1917 rlc_chip_name
= "R600";
1921 chip_name
= "RS780";
1922 rlc_chip_name
= "R600";
1925 chip_name
= "RV770";
1926 rlc_chip_name
= "R700";
1930 chip_name
= "RV730";
1931 rlc_chip_name
= "R700";
1934 chip_name
= "RV710";
1935 rlc_chip_name
= "R700";
1938 chip_name
= "CEDAR";
1939 rlc_chip_name
= "CEDAR";
1942 chip_name
= "REDWOOD";
1943 rlc_chip_name
= "REDWOOD";
1946 chip_name
= "JUNIPER";
1947 rlc_chip_name
= "JUNIPER";
1951 chip_name
= "CYPRESS";
1952 rlc_chip_name
= "CYPRESS";
1956 rlc_chip_name
= "SUMO";
1960 rlc_chip_name
= "SUMO";
1963 chip_name
= "SUMO2";
1964 rlc_chip_name
= "SUMO";
1969 if (rdev
->family
>= CHIP_CEDAR
) {
1970 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
1971 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
1972 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
1973 } else if (rdev
->family
>= CHIP_RV770
) {
1974 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
1975 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
1976 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
1978 pfp_req_size
= PFP_UCODE_SIZE
* 4;
1979 me_req_size
= PM4_UCODE_SIZE
* 12;
1980 rlc_req_size
= RLC_UCODE_SIZE
* 4;
1983 DRM_INFO("Loading %s Microcode\n", chip_name
);
1985 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
1986 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
1989 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
1991 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1992 rdev
->pfp_fw
->size
, fw_name
);
1997 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
1998 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
2001 if (rdev
->me_fw
->size
!= me_req_size
) {
2003 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2004 rdev
->me_fw
->size
, fw_name
);
2008 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
2009 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, &pdev
->dev
);
2012 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
2014 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2015 rdev
->rlc_fw
->size
, fw_name
);
2020 platform_device_unregister(pdev
);
2025 "r600_cp: Failed to load firmware \"%s\"\n",
2027 release_firmware(rdev
->pfp_fw
);
2028 rdev
->pfp_fw
= NULL
;
2029 release_firmware(rdev
->me_fw
);
2031 release_firmware(rdev
->rlc_fw
);
2032 rdev
->rlc_fw
= NULL
;
2037 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2039 const __be32
*fw_data
;
2042 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2051 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2054 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2055 RREG32(GRBM_SOFT_RESET
);
2057 WREG32(GRBM_SOFT_RESET
, 0);
2059 WREG32(CP_ME_RAM_WADDR
, 0);
2061 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2062 WREG32(CP_ME_RAM_WADDR
, 0);
2063 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
2064 WREG32(CP_ME_RAM_DATA
,
2065 be32_to_cpup(fw_data
++));
2067 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2068 WREG32(CP_PFP_UCODE_ADDR
, 0);
2069 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
2070 WREG32(CP_PFP_UCODE_DATA
,
2071 be32_to_cpup(fw_data
++));
2073 WREG32(CP_PFP_UCODE_ADDR
, 0);
2074 WREG32(CP_ME_RAM_WADDR
, 0);
2075 WREG32(CP_ME_RAM_RADDR
, 0);
2079 int r600_cp_start(struct radeon_device
*rdev
)
2081 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2085 r
= radeon_ring_lock(rdev
, ring
, 7);
2087 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2090 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2091 radeon_ring_write(ring
, 0x1);
2092 if (rdev
->family
>= CHIP_RV770
) {
2093 radeon_ring_write(ring
, 0x0);
2094 radeon_ring_write(ring
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2096 radeon_ring_write(ring
, 0x3);
2097 radeon_ring_write(ring
, rdev
->config
.r600
.max_hw_contexts
- 1);
2099 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2100 radeon_ring_write(ring
, 0);
2101 radeon_ring_write(ring
, 0);
2102 radeon_ring_unlock_commit(rdev
, ring
);
2105 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2109 int r600_cp_resume(struct radeon_device
*rdev
)
2111 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2117 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2118 RREG32(GRBM_SOFT_RESET
);
2120 WREG32(GRBM_SOFT_RESET
, 0);
2122 /* Set ring buffer size */
2123 rb_bufsz
= drm_order(ring
->ring_size
/ 8);
2124 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2126 tmp
|= BUF_SWAP_32BIT
;
2128 WREG32(CP_RB_CNTL
, tmp
);
2129 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
2131 /* Set the write pointer delay */
2132 WREG32(CP_RB_WPTR_DELAY
, 0);
2134 /* Initialize the ring buffer's read and write pointers */
2135 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2136 WREG32(CP_RB_RPTR_WR
, 0);
2138 WREG32(CP_RB_WPTR
, ring
->wptr
);
2140 /* set the wb address whether it's enabled or not */
2141 WREG32(CP_RB_RPTR_ADDR
,
2142 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
2143 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2144 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
2146 if (rdev
->wb
.enabled
)
2147 WREG32(SCRATCH_UMSK
, 0xff);
2149 tmp
|= RB_NO_UPDATE
;
2150 WREG32(SCRATCH_UMSK
, 0);
2154 WREG32(CP_RB_CNTL
, tmp
);
2156 WREG32(CP_RB_BASE
, ring
->gpu_addr
>> 8);
2157 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2159 ring
->rptr
= RREG32(CP_RB_RPTR
);
2161 r600_cp_start(rdev
);
2163 r
= radeon_ring_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
, ring
);
2165 ring
->ready
= false;
2171 void r600_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ring_size
)
2176 /* Align ring size */
2177 rb_bufsz
= drm_order(ring_size
/ 8);
2178 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2179 ring
->ring_size
= ring_size
;
2180 ring
->align_mask
= 16 - 1;
2182 if (radeon_ring_supports_scratch_reg(rdev
, ring
)) {
2183 r
= radeon_scratch_get(rdev
, &ring
->rptr_save_reg
);
2185 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r
);
2186 ring
->rptr_save_reg
= 0;
2191 void r600_cp_fini(struct radeon_device
*rdev
)
2193 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2195 radeon_ring_fini(rdev
, ring
);
2196 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
2201 * GPU scratch registers helpers function.
2203 void r600_scratch_init(struct radeon_device
*rdev
)
2207 rdev
->scratch
.num_reg
= 7;
2208 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
2209 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2210 rdev
->scratch
.free
[i
] = true;
2211 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
2215 int r600_ring_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2222 r
= radeon_scratch_get(rdev
, &scratch
);
2224 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2227 WREG32(scratch
, 0xCAFEDEAD);
2228 r
= radeon_ring_lock(rdev
, ring
, 3);
2230 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring
->idx
, r
);
2231 radeon_scratch_free(rdev
, scratch
);
2234 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2235 radeon_ring_write(ring
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2236 radeon_ring_write(ring
, 0xDEADBEEF);
2237 radeon_ring_unlock_commit(rdev
, ring
);
2238 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2239 tmp
= RREG32(scratch
);
2240 if (tmp
== 0xDEADBEEF)
2244 if (i
< rdev
->usec_timeout
) {
2245 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
2247 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2248 ring
->idx
, scratch
, tmp
);
2251 radeon_scratch_free(rdev
, scratch
);
2255 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2256 struct radeon_fence
*fence
)
2258 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
2260 if (rdev
->wb
.use_event
) {
2261 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
2262 /* flush read cache over gart */
2263 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2264 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
|
2265 PACKET3_VC_ACTION_ENA
|
2266 PACKET3_SH_ACTION_ENA
);
2267 radeon_ring_write(ring
, 0xFFFFFFFF);
2268 radeon_ring_write(ring
, 0);
2269 radeon_ring_write(ring
, 10); /* poll interval */
2270 /* EVENT_WRITE_EOP - flush caches, send int */
2271 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2272 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
2273 radeon_ring_write(ring
, addr
& 0xffffffff);
2274 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2275 radeon_ring_write(ring
, fence
->seq
);
2276 radeon_ring_write(ring
, 0);
2278 /* flush read cache over gart */
2279 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2280 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
|
2281 PACKET3_VC_ACTION_ENA
|
2282 PACKET3_SH_ACTION_ENA
);
2283 radeon_ring_write(ring
, 0xFFFFFFFF);
2284 radeon_ring_write(ring
, 0);
2285 radeon_ring_write(ring
, 10); /* poll interval */
2286 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2287 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0));
2288 /* wait for 3D idle clean */
2289 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2290 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2291 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2292 /* Emit fence sequence & fire IRQ */
2293 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2294 radeon_ring_write(ring
, ((rdev
->fence_drv
[fence
->ring
].scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2295 radeon_ring_write(ring
, fence
->seq
);
2296 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2297 radeon_ring_write(ring
, PACKET0(CP_INT_STATUS
, 0));
2298 radeon_ring_write(ring
, RB_INT_STAT
);
2302 void r600_semaphore_ring_emit(struct radeon_device
*rdev
,
2303 struct radeon_ring
*ring
,
2304 struct radeon_semaphore
*semaphore
,
2307 uint64_t addr
= semaphore
->gpu_addr
;
2308 unsigned sel
= emit_wait
? PACKET3_SEM_SEL_WAIT
: PACKET3_SEM_SEL_SIGNAL
;
2310 if (rdev
->family
< CHIP_CAYMAN
)
2311 sel
|= PACKET3_SEM_WAIT_ON_SIGNAL
;
2313 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_SEMAPHORE
, 1));
2314 radeon_ring_write(ring
, addr
& 0xffffffff);
2315 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | sel
);
2318 int r600_copy_blit(struct radeon_device
*rdev
,
2319 uint64_t src_offset
,
2320 uint64_t dst_offset
,
2321 unsigned num_gpu_pages
,
2322 struct radeon_fence
**fence
)
2324 struct radeon_semaphore
*sem
= NULL
;
2325 struct radeon_sa_bo
*vb
= NULL
;
2328 r
= r600_blit_prepare_copy(rdev
, num_gpu_pages
, fence
, &vb
, &sem
);
2332 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_gpu_pages
, vb
);
2333 r600_blit_done_copy(rdev
, fence
, vb
, sem
);
2337 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2338 uint32_t tiling_flags
, uint32_t pitch
,
2339 uint32_t offset
, uint32_t obj_size
)
2341 /* FIXME: implement */
2345 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2347 /* FIXME: implement */
2350 int r600_startup(struct radeon_device
*rdev
)
2352 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2355 /* enable pcie gen2 link */
2356 r600_pcie_gen2_enable(rdev
);
2358 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2359 r
= r600_init_microcode(rdev
);
2361 DRM_ERROR("Failed to load firmware!\n");
2366 r
= r600_vram_scratch_init(rdev
);
2370 r600_mc_program(rdev
);
2371 if (rdev
->flags
& RADEON_IS_AGP
) {
2372 r600_agp_enable(rdev
);
2374 r
= r600_pcie_gart_enable(rdev
);
2378 r600_gpu_init(rdev
);
2379 r
= r600_blit_init(rdev
);
2381 r600_blit_fini(rdev
);
2382 rdev
->asic
->copy
.copy
= NULL
;
2383 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2386 /* allocate wb buffer */
2387 r
= radeon_wb_init(rdev
);
2391 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
2393 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
2398 r
= r600_irq_init(rdev
);
2400 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2401 radeon_irq_kms_fini(rdev
);
2406 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
2407 R600_CP_RB_RPTR
, R600_CP_RB_WPTR
,
2408 0, 0xfffff, RADEON_CP_PACKET2
);
2412 r
= r600_cp_load_microcode(rdev
);
2415 r
= r600_cp_resume(rdev
);
2419 r
= radeon_ib_pool_init(rdev
);
2421 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2425 r
= r600_audio_init(rdev
);
2427 DRM_ERROR("radeon: audio init failed\n");
2434 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
2438 temp
= RREG32(CONFIG_CNTL
);
2439 if (state
== false) {
2445 WREG32(CONFIG_CNTL
, temp
);
2448 int r600_resume(struct radeon_device
*rdev
)
2452 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2453 * posting will perform necessary task to bring back GPU into good
2457 atom_asic_init(rdev
->mode_info
.atom_context
);
2459 rdev
->accel_working
= true;
2460 r
= r600_startup(rdev
);
2462 DRM_ERROR("r600 startup failed on resume\n");
2463 rdev
->accel_working
= false;
2470 int r600_suspend(struct radeon_device
*rdev
)
2472 r600_audio_fini(rdev
);
2474 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
2475 r600_irq_suspend(rdev
);
2476 radeon_wb_disable(rdev
);
2477 r600_pcie_gart_disable(rdev
);
2482 /* Plan is to move initialization in that function and use
2483 * helper function so that radeon_device_init pretty much
2484 * do nothing more than calling asic specific function. This
2485 * should also allow to remove a bunch of callback function
2488 int r600_init(struct radeon_device
*rdev
)
2492 if (r600_debugfs_mc_info_init(rdev
)) {
2493 DRM_ERROR("Failed to register debugfs file for mc !\n");
2496 if (!radeon_get_bios(rdev
)) {
2497 if (ASIC_IS_AVIVO(rdev
))
2500 /* Must be an ATOMBIOS */
2501 if (!rdev
->is_atom_bios
) {
2502 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2505 r
= radeon_atombios_init(rdev
);
2508 /* Post card if necessary */
2509 if (!radeon_card_posted(rdev
)) {
2511 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2514 DRM_INFO("GPU not posted. posting now...\n");
2515 atom_asic_init(rdev
->mode_info
.atom_context
);
2517 /* Initialize scratch registers */
2518 r600_scratch_init(rdev
);
2519 /* Initialize surface registers */
2520 radeon_surface_init(rdev
);
2521 /* Initialize clocks */
2522 radeon_get_clock_info(rdev
->ddev
);
2524 r
= radeon_fence_driver_init(rdev
);
2527 if (rdev
->flags
& RADEON_IS_AGP
) {
2528 r
= radeon_agp_init(rdev
);
2530 radeon_agp_disable(rdev
);
2532 r
= r600_mc_init(rdev
);
2535 /* Memory manager */
2536 r
= radeon_bo_init(rdev
);
2540 r
= radeon_irq_kms_init(rdev
);
2544 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ring_obj
= NULL
;
2545 r600_ring_init(rdev
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
], 1024 * 1024);
2547 rdev
->ih
.ring_obj
= NULL
;
2548 r600_ih_ring_init(rdev
, 64 * 1024);
2550 r
= r600_pcie_gart_init(rdev
);
2554 rdev
->accel_working
= true;
2555 r
= r600_startup(rdev
);
2557 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2559 r600_irq_fini(rdev
);
2560 radeon_wb_fini(rdev
);
2561 radeon_ib_pool_fini(rdev
);
2562 radeon_irq_kms_fini(rdev
);
2563 r600_pcie_gart_fini(rdev
);
2564 rdev
->accel_working
= false;
2570 void r600_fini(struct radeon_device
*rdev
)
2572 r600_audio_fini(rdev
);
2573 r600_blit_fini(rdev
);
2575 r600_irq_fini(rdev
);
2576 radeon_wb_fini(rdev
);
2577 radeon_ib_pool_fini(rdev
);
2578 radeon_irq_kms_fini(rdev
);
2579 r600_pcie_gart_fini(rdev
);
2580 r600_vram_scratch_fini(rdev
);
2581 radeon_agp_fini(rdev
);
2582 radeon_gem_fini(rdev
);
2583 radeon_fence_driver_fini(rdev
);
2584 radeon_bo_fini(rdev
);
2585 radeon_atombios_fini(rdev
);
2594 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2596 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
2599 if (ring
->rptr_save_reg
) {
2600 next_rptr
= ring
->wptr
+ 3 + 4;
2601 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2602 radeon_ring_write(ring
, ((ring
->rptr_save_reg
-
2603 PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2604 radeon_ring_write(ring
, next_rptr
);
2605 } else if (rdev
->wb
.enabled
) {
2606 next_rptr
= ring
->wptr
+ 5 + 4;
2607 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_WRITE
, 3));
2608 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
2609 radeon_ring_write(ring
, (upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff) | (1 << 18));
2610 radeon_ring_write(ring
, next_rptr
);
2611 radeon_ring_write(ring
, 0);
2614 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2615 radeon_ring_write(ring
,
2619 (ib
->gpu_addr
& 0xFFFFFFFC));
2620 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
2621 radeon_ring_write(ring
, ib
->length_dw
);
2624 int r600_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2626 struct radeon_ib ib
;
2632 r
= radeon_scratch_get(rdev
, &scratch
);
2634 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
2637 WREG32(scratch
, 0xCAFEDEAD);
2638 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, 256);
2640 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
2643 ib
.ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
2644 ib
.ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2645 ib
.ptr
[2] = 0xDEADBEEF;
2647 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
2649 radeon_scratch_free(rdev
, scratch
);
2650 radeon_ib_free(rdev
, &ib
);
2651 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
2654 r
= radeon_fence_wait(ib
.fence
, false);
2656 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
2659 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2660 tmp
= RREG32(scratch
);
2661 if (tmp
== 0xDEADBEEF)
2665 if (i
< rdev
->usec_timeout
) {
2666 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
2668 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2672 radeon_scratch_free(rdev
, scratch
);
2673 radeon_ib_free(rdev
, &ib
);
2680 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2681 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2682 * writing to the ring and the GPU consuming, the GPU writes to the ring
2683 * and host consumes. As the host irq handler processes interrupts, it
2684 * increments the rptr. When the rptr catches up with the wptr, all the
2685 * current interrupts have been processed.
2688 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2692 /* Align ring size */
2693 rb_bufsz
= drm_order(ring_size
/ 4);
2694 ring_size
= (1 << rb_bufsz
) * 4;
2695 rdev
->ih
.ring_size
= ring_size
;
2696 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
2700 int r600_ih_ring_alloc(struct radeon_device
*rdev
)
2704 /* Allocate ring buffer */
2705 if (rdev
->ih
.ring_obj
== NULL
) {
2706 r
= radeon_bo_create(rdev
, rdev
->ih
.ring_size
,
2708 RADEON_GEM_DOMAIN_GTT
,
2709 NULL
, &rdev
->ih
.ring_obj
);
2711 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
2714 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2715 if (unlikely(r
!= 0))
2717 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
2718 RADEON_GEM_DOMAIN_GTT
,
2719 &rdev
->ih
.gpu_addr
);
2721 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2722 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
2725 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
2726 (void **)&rdev
->ih
.ring
);
2727 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2729 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
2736 void r600_ih_ring_fini(struct radeon_device
*rdev
)
2739 if (rdev
->ih
.ring_obj
) {
2740 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2741 if (likely(r
== 0)) {
2742 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
2743 radeon_bo_unpin(rdev
->ih
.ring_obj
);
2744 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2746 radeon_bo_unref(&rdev
->ih
.ring_obj
);
2747 rdev
->ih
.ring
= NULL
;
2748 rdev
->ih
.ring_obj
= NULL
;
2752 void r600_rlc_stop(struct radeon_device
*rdev
)
2755 if ((rdev
->family
>= CHIP_RV770
) &&
2756 (rdev
->family
<= CHIP_RV740
)) {
2757 /* r7xx asics need to soft reset RLC before halting */
2758 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
2759 RREG32(SRBM_SOFT_RESET
);
2761 WREG32(SRBM_SOFT_RESET
, 0);
2762 RREG32(SRBM_SOFT_RESET
);
2765 WREG32(RLC_CNTL
, 0);
2768 static void r600_rlc_start(struct radeon_device
*rdev
)
2770 WREG32(RLC_CNTL
, RLC_ENABLE
);
2773 static int r600_rlc_init(struct radeon_device
*rdev
)
2776 const __be32
*fw_data
;
2781 r600_rlc_stop(rdev
);
2783 WREG32(RLC_HB_CNTL
, 0);
2785 if (rdev
->family
== CHIP_ARUBA
) {
2786 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE
, rdev
->rlc
.save_restore_gpu_addr
>> 8);
2787 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE
, rdev
->rlc
.clear_state_gpu_addr
>> 8);
2789 if (rdev
->family
<= CHIP_CAYMAN
) {
2790 WREG32(RLC_HB_BASE
, 0);
2791 WREG32(RLC_HB_RPTR
, 0);
2792 WREG32(RLC_HB_WPTR
, 0);
2794 if (rdev
->family
<= CHIP_CAICOS
) {
2795 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
2796 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
2798 WREG32(RLC_MC_CNTL
, 0);
2799 WREG32(RLC_UCODE_CNTL
, 0);
2801 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
2802 if (rdev
->family
>= CHIP_ARUBA
) {
2803 for (i
= 0; i
< ARUBA_RLC_UCODE_SIZE
; i
++) {
2804 WREG32(RLC_UCODE_ADDR
, i
);
2805 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2807 } else if (rdev
->family
>= CHIP_CAYMAN
) {
2808 for (i
= 0; i
< CAYMAN_RLC_UCODE_SIZE
; i
++) {
2809 WREG32(RLC_UCODE_ADDR
, i
);
2810 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2812 } else if (rdev
->family
>= CHIP_CEDAR
) {
2813 for (i
= 0; i
< EVERGREEN_RLC_UCODE_SIZE
; i
++) {
2814 WREG32(RLC_UCODE_ADDR
, i
);
2815 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2817 } else if (rdev
->family
>= CHIP_RV770
) {
2818 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
2819 WREG32(RLC_UCODE_ADDR
, i
);
2820 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2823 for (i
= 0; i
< RLC_UCODE_SIZE
; i
++) {
2824 WREG32(RLC_UCODE_ADDR
, i
);
2825 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2828 WREG32(RLC_UCODE_ADDR
, 0);
2830 r600_rlc_start(rdev
);
2835 static void r600_enable_interrupts(struct radeon_device
*rdev
)
2837 u32 ih_cntl
= RREG32(IH_CNTL
);
2838 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2840 ih_cntl
|= ENABLE_INTR
;
2841 ih_rb_cntl
|= IH_RB_ENABLE
;
2842 WREG32(IH_CNTL
, ih_cntl
);
2843 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2844 rdev
->ih
.enabled
= true;
2847 void r600_disable_interrupts(struct radeon_device
*rdev
)
2849 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2850 u32 ih_cntl
= RREG32(IH_CNTL
);
2852 ih_rb_cntl
&= ~IH_RB_ENABLE
;
2853 ih_cntl
&= ~ENABLE_INTR
;
2854 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2855 WREG32(IH_CNTL
, ih_cntl
);
2856 /* set rptr, wptr to 0 */
2857 WREG32(IH_RB_RPTR
, 0);
2858 WREG32(IH_RB_WPTR
, 0);
2859 rdev
->ih
.enabled
= false;
2863 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
2867 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2868 WREG32(GRBM_INT_CNTL
, 0);
2869 WREG32(DxMODE_INT_MASK
, 0);
2870 WREG32(D1GRPH_INTERRUPT_CONTROL
, 0);
2871 WREG32(D2GRPH_INTERRUPT_CONTROL
, 0);
2872 if (ASIC_IS_DCE3(rdev
)) {
2873 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
2874 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
2875 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2876 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2877 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2878 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2879 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2880 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2881 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2882 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2883 if (ASIC_IS_DCE32(rdev
)) {
2884 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2885 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2886 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2887 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2888 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
2889 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
2890 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
2891 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
2893 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
2894 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
2895 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
2896 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
2899 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2900 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2901 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2902 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
2903 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2904 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
2905 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2906 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
2907 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
2908 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
2909 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
2910 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
2914 int r600_irq_init(struct radeon_device
*rdev
)
2918 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
2921 ret
= r600_ih_ring_alloc(rdev
);
2926 r600_disable_interrupts(rdev
);
2929 ret
= r600_rlc_init(rdev
);
2931 r600_ih_ring_fini(rdev
);
2935 /* setup interrupt control */
2936 /* set dummy read address to ring address */
2937 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
2938 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
2939 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2940 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2942 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
2943 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2944 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
2945 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
2947 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
2948 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
2950 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
2951 IH_WPTR_OVERFLOW_CLEAR
|
2954 if (rdev
->wb
.enabled
)
2955 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
2957 /* set the writeback address whether it's enabled or not */
2958 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
2959 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
2961 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2963 /* set rptr, wptr to 0 */
2964 WREG32(IH_RB_RPTR
, 0);
2965 WREG32(IH_RB_WPTR
, 0);
2967 /* Default settings for IH_CNTL (disabled at first) */
2968 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2969 /* RPTR_REARM only works if msi's are enabled */
2970 if (rdev
->msi_enabled
)
2971 ih_cntl
|= RPTR_REARM
;
2972 WREG32(IH_CNTL
, ih_cntl
);
2974 /* force the active interrupt state to all disabled */
2975 if (rdev
->family
>= CHIP_CEDAR
)
2976 evergreen_disable_interrupt_state(rdev
);
2978 r600_disable_interrupt_state(rdev
);
2980 /* at this point everything should be setup correctly to enable master */
2981 pci_set_master(rdev
->pdev
);
2984 r600_enable_interrupts(rdev
);
2989 void r600_irq_suspend(struct radeon_device
*rdev
)
2991 r600_irq_disable(rdev
);
2992 r600_rlc_stop(rdev
);
2995 void r600_irq_fini(struct radeon_device
*rdev
)
2997 r600_irq_suspend(rdev
);
2998 r600_ih_ring_fini(rdev
);
3001 int r600_irq_set(struct radeon_device
*rdev
)
3003 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3005 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
3006 u32 grbm_int_cntl
= 0;
3008 u32 d1grph
= 0, d2grph
= 0;
3010 if (!rdev
->irq
.installed
) {
3011 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3014 /* don't enable anything if the ih is disabled */
3015 if (!rdev
->ih
.enabled
) {
3016 r600_disable_interrupts(rdev
);
3017 /* force the active interrupt state to all disabled */
3018 r600_disable_interrupt_state(rdev
);
3022 if (ASIC_IS_DCE3(rdev
)) {
3023 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3024 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3025 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3026 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3027 if (ASIC_IS_DCE32(rdev
)) {
3028 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3029 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3030 hdmi0
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3031 hdmi1
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3033 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3034 hdmi1
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3037 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3038 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3039 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3040 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3041 hdmi1
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3044 if (atomic_read(&rdev
->irq
.ring_int
[RADEON_RING_TYPE_GFX_INDEX
])) {
3045 DRM_DEBUG("r600_irq_set: sw int\n");
3046 cp_int_cntl
|= RB_INT_ENABLE
;
3047 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3049 if (rdev
->irq
.crtc_vblank_int
[0] ||
3050 atomic_read(&rdev
->irq
.pflip
[0])) {
3051 DRM_DEBUG("r600_irq_set: vblank 0\n");
3052 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3054 if (rdev
->irq
.crtc_vblank_int
[1] ||
3055 atomic_read(&rdev
->irq
.pflip
[1])) {
3056 DRM_DEBUG("r600_irq_set: vblank 1\n");
3057 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3059 if (rdev
->irq
.hpd
[0]) {
3060 DRM_DEBUG("r600_irq_set: hpd 1\n");
3061 hpd1
|= DC_HPDx_INT_EN
;
3063 if (rdev
->irq
.hpd
[1]) {
3064 DRM_DEBUG("r600_irq_set: hpd 2\n");
3065 hpd2
|= DC_HPDx_INT_EN
;
3067 if (rdev
->irq
.hpd
[2]) {
3068 DRM_DEBUG("r600_irq_set: hpd 3\n");
3069 hpd3
|= DC_HPDx_INT_EN
;
3071 if (rdev
->irq
.hpd
[3]) {
3072 DRM_DEBUG("r600_irq_set: hpd 4\n");
3073 hpd4
|= DC_HPDx_INT_EN
;
3075 if (rdev
->irq
.hpd
[4]) {
3076 DRM_DEBUG("r600_irq_set: hpd 5\n");
3077 hpd5
|= DC_HPDx_INT_EN
;
3079 if (rdev
->irq
.hpd
[5]) {
3080 DRM_DEBUG("r600_irq_set: hpd 6\n");
3081 hpd6
|= DC_HPDx_INT_EN
;
3083 if (rdev
->irq
.afmt
[0]) {
3084 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3085 hdmi0
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3087 if (rdev
->irq
.afmt
[1]) {
3088 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3089 hdmi1
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3091 if (rdev
->irq
.gui_idle
) {
3092 DRM_DEBUG("gui idle\n");
3093 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
3096 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3097 WREG32(DxMODE_INT_MASK
, mode_int
);
3098 WREG32(D1GRPH_INTERRUPT_CONTROL
, d1grph
);
3099 WREG32(D2GRPH_INTERRUPT_CONTROL
, d2grph
);
3100 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3101 if (ASIC_IS_DCE3(rdev
)) {
3102 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3103 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3104 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3105 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3106 if (ASIC_IS_DCE32(rdev
)) {
3107 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3108 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3109 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, hdmi0
);
3110 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, hdmi1
);
3112 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3113 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3116 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3117 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3118 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3119 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3120 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3126 static void r600_irq_ack(struct radeon_device
*rdev
)
3130 if (ASIC_IS_DCE3(rdev
)) {
3131 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3132 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3133 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3134 if (ASIC_IS_DCE32(rdev
)) {
3135 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET0
);
3136 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET1
);
3138 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3139 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(DCE3_HDMI1_STATUS
);
3142 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3143 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3144 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= 0;
3145 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3146 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(HDMI1_STATUS
);
3148 rdev
->irq
.stat_regs
.r600
.d1grph_int
= RREG32(D1GRPH_INTERRUPT_STATUS
);
3149 rdev
->irq
.stat_regs
.r600
.d2grph_int
= RREG32(D2GRPH_INTERRUPT_STATUS
);
3151 if (rdev
->irq
.stat_regs
.r600
.d1grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3152 WREG32(D1GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3153 if (rdev
->irq
.stat_regs
.r600
.d2grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3154 WREG32(D2GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3155 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
3156 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3157 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
)
3158 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3159 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
)
3160 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3161 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
)
3162 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3163 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3164 if (ASIC_IS_DCE3(rdev
)) {
3165 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3166 tmp
|= DC_HPDx_INT_ACK
;
3167 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3169 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3170 tmp
|= DC_HPDx_INT_ACK
;
3171 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3174 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3175 if (ASIC_IS_DCE3(rdev
)) {
3176 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3177 tmp
|= DC_HPDx_INT_ACK
;
3178 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3180 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3181 tmp
|= DC_HPDx_INT_ACK
;
3182 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3185 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3186 if (ASIC_IS_DCE3(rdev
)) {
3187 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3188 tmp
|= DC_HPDx_INT_ACK
;
3189 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3191 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3192 tmp
|= DC_HPDx_INT_ACK
;
3193 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3196 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3197 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3198 tmp
|= DC_HPDx_INT_ACK
;
3199 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3201 if (ASIC_IS_DCE32(rdev
)) {
3202 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3203 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3204 tmp
|= DC_HPDx_INT_ACK
;
3205 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3207 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3208 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3209 tmp
|= DC_HPDx_INT_ACK
;
3210 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3212 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& AFMT_AZ_FORMAT_WTRIG
) {
3213 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
);
3214 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3215 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3217 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& AFMT_AZ_FORMAT_WTRIG
) {
3218 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
);
3219 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3220 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3223 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3224 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
);
3225 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3226 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3228 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3229 if (ASIC_IS_DCE3(rdev
)) {
3230 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
);
3231 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3232 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3234 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
);
3235 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3236 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3242 void r600_irq_disable(struct radeon_device
*rdev
)
3244 r600_disable_interrupts(rdev
);
3245 /* Wait and acknowledge irq */
3248 r600_disable_interrupt_state(rdev
);
3251 static u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3255 if (rdev
->wb
.enabled
)
3256 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
3258 wptr
= RREG32(IH_RB_WPTR
);
3260 if (wptr
& RB_OVERFLOW
) {
3261 /* When a ring buffer overflow happen start parsing interrupt
3262 * from the last not overwritten vector (wptr + 16). Hopefully
3263 * this should allow us to catchup.
3265 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3266 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3267 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3268 tmp
= RREG32(IH_RB_CNTL
);
3269 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3270 WREG32(IH_RB_CNTL
, tmp
);
3272 return (wptr
& rdev
->ih
.ptr_mask
);
3276 * Each IV ring entry is 128 bits:
3277 * [7:0] - interrupt source id
3279 * [59:32] - interrupt source data
3280 * [127:60] - reserved
3282 * The basic interrupt vector entries
3283 * are decoded as follows:
3284 * src_id src_data description
3289 * 19 0 FP Hot plug detection A
3290 * 19 1 FP Hot plug detection B
3291 * 19 2 DAC A auto-detection
3292 * 19 3 DAC B auto-detection
3298 * 181 - EOP Interrupt
3301 * Note, these are based on r600 and may need to be
3302 * adjusted or added to on newer asics
3305 int r600_irq_process(struct radeon_device
*rdev
)
3309 u32 src_id
, src_data
;
3311 bool queue_hotplug
= false;
3312 bool queue_hdmi
= false;
3314 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
3317 /* No MSIs, need a dummy read to flush PCI DMAs */
3318 if (!rdev
->msi_enabled
)
3321 wptr
= r600_get_ih_wptr(rdev
);
3324 /* is somebody else already processing irqs? */
3325 if (atomic_xchg(&rdev
->ih
.lock
, 1))
3328 rptr
= rdev
->ih
.rptr
;
3329 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3331 /* Order reading of wptr vs. reading of IH ring data */
3334 /* display interrupts */
3337 while (rptr
!= wptr
) {
3338 /* wptr/rptr are in bytes! */
3339 ring_index
= rptr
/ 4;
3340 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
3341 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
3344 case 1: /* D1 vblank/vline */
3346 case 0: /* D1 vblank */
3347 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3348 if (rdev
->irq
.crtc_vblank_int
[0]) {
3349 drm_handle_vblank(rdev
->ddev
, 0);
3350 rdev
->pm
.vblank_sync
= true;
3351 wake_up(&rdev
->irq
.vblank_queue
);
3353 if (atomic_read(&rdev
->irq
.pflip
[0]))
3354 radeon_crtc_handle_flip(rdev
, 0);
3355 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3356 DRM_DEBUG("IH: D1 vblank\n");
3359 case 1: /* D1 vline */
3360 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
3361 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3362 DRM_DEBUG("IH: D1 vline\n");
3366 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3370 case 5: /* D2 vblank/vline */
3372 case 0: /* D2 vblank */
3373 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
) {
3374 if (rdev
->irq
.crtc_vblank_int
[1]) {
3375 drm_handle_vblank(rdev
->ddev
, 1);
3376 rdev
->pm
.vblank_sync
= true;
3377 wake_up(&rdev
->irq
.vblank_queue
);
3379 if (atomic_read(&rdev
->irq
.pflip
[1]))
3380 radeon_crtc_handle_flip(rdev
, 1);
3381 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
3382 DRM_DEBUG("IH: D2 vblank\n");
3385 case 1: /* D1 vline */
3386 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
) {
3387 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
3388 DRM_DEBUG("IH: D2 vline\n");
3392 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3396 case 19: /* HPD/DAC hotplug */
3399 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3400 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD1_INTERRUPT
;
3401 queue_hotplug
= true;
3402 DRM_DEBUG("IH: HPD1\n");
3406 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3407 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD2_INTERRUPT
;
3408 queue_hotplug
= true;
3409 DRM_DEBUG("IH: HPD2\n");
3413 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3414 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
3415 queue_hotplug
= true;
3416 DRM_DEBUG("IH: HPD3\n");
3420 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3421 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
3422 queue_hotplug
= true;
3423 DRM_DEBUG("IH: HPD4\n");
3427 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3428 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
3429 queue_hotplug
= true;
3430 DRM_DEBUG("IH: HPD5\n");
3434 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3435 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
3436 queue_hotplug
= true;
3437 DRM_DEBUG("IH: HPD6\n");
3441 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3448 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3449 rdev
->irq
.stat_regs
.r600
.hdmi0_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
3451 DRM_DEBUG("IH: HDMI0\n");
3455 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3456 rdev
->irq
.stat_regs
.r600
.hdmi1_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
3458 DRM_DEBUG("IH: HDMI1\n");
3462 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3466 case 176: /* CP_INT in ring buffer */
3467 case 177: /* CP_INT in IB1 */
3468 case 178: /* CP_INT in IB2 */
3469 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3470 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3472 case 181: /* CP EOP event */
3473 DRM_DEBUG("IH: CP EOP\n");
3474 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3476 case 233: /* GUI IDLE */
3477 DRM_DEBUG("IH: GUI idle\n");
3478 wake_up(&rdev
->irq
.idle_queue
);
3481 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3485 /* wptr/rptr are in bytes! */
3487 rptr
&= rdev
->ih
.ptr_mask
;
3490 schedule_work(&rdev
->hotplug_work
);
3492 schedule_work(&rdev
->audio_work
);
3493 rdev
->ih
.rptr
= rptr
;
3494 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3495 atomic_set(&rdev
->ih
.lock
, 0);
3497 /* make sure wptr hasn't changed while processing */
3498 wptr
= r600_get_ih_wptr(rdev
);
3508 #if defined(CONFIG_DEBUG_FS)
3510 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
3512 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3513 struct drm_device
*dev
= node
->minor
->dev
;
3514 struct radeon_device
*rdev
= dev
->dev_private
;
3516 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3517 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3521 static struct drm_info_list r600_mc_info_list
[] = {
3522 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3526 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3528 #if defined(CONFIG_DEBUG_FS)
3529 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3536 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3537 * rdev: radeon device structure
3538 * bo: buffer object struct which userspace is waiting for idle
3540 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3541 * through ring buffer, this leads to corruption in rendering, see
3542 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3543 * directly perform HDP flush by writing register through MMIO.
3545 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
3547 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3548 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3549 * This seems to cause problems on some AGP cards. Just use the old
3552 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
3553 rdev
->vram_scratch
.ptr
&& !(rdev
->flags
& RADEON_IS_AGP
)) {
3554 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
3557 WREG32(HDP_DEBUG1
, 0);
3558 tmp
= readl((void __iomem
*)ptr
);
3560 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
3563 void r600_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
3565 u32 link_width_cntl
, mask
, target_reg
;
3567 if (rdev
->flags
& RADEON_IS_IGP
)
3570 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3573 /* x2 cards have a special sequence */
3574 if (ASIC_IS_X2(rdev
))
3577 /* FIXME wait for idle */
3581 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
3584 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
3587 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
3590 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
3593 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
3596 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
3600 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
3604 link_width_cntl
= RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
3606 if ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) ==
3607 (mask
<< RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
))
3610 if (link_width_cntl
& R600_PCIE_LC_UPCONFIGURE_DIS
)
3613 link_width_cntl
&= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK
|
3614 RADEON_PCIE_LC_RECONFIG_NOW
|
3615 R600_PCIE_LC_RENEGOTIATE_EN
|
3616 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
);
3617 link_width_cntl
|= mask
;
3619 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3621 /* some northbridges can renegotiate the link rather than requiring
3622 * a complete re-config.
3623 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3625 if (link_width_cntl
& R600_PCIE_LC_RENEGOTIATION_SUPPORT
)
3626 link_width_cntl
|= R600_PCIE_LC_RENEGOTIATE_EN
| R600_PCIE_LC_UPCONFIGURE_SUPPORT
;
3628 link_width_cntl
|= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
;
3630 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, (link_width_cntl
|
3631 RADEON_PCIE_LC_RECONFIG_NOW
));
3633 if (rdev
->family
>= CHIP_RV770
)
3634 target_reg
= R700_TARGET_AND_CURRENT_PROFILE_INDEX
;
3636 target_reg
= R600_TARGET_AND_CURRENT_PROFILE_INDEX
;
3638 /* wait for lane set to complete */
3639 link_width_cntl
= RREG32(target_reg
);
3640 while (link_width_cntl
== 0xffffffff)
3641 link_width_cntl
= RREG32(target_reg
);
3645 int r600_get_pcie_lanes(struct radeon_device
*rdev
)
3647 u32 link_width_cntl
;
3649 if (rdev
->flags
& RADEON_IS_IGP
)
3652 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3655 /* x2 cards have a special sequence */
3656 if (ASIC_IS_X2(rdev
))
3659 /* FIXME wait for idle */
3661 link_width_cntl
= RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
3663 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
3664 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
3666 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
3668 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
3670 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
3672 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
3674 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
3680 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
)
3682 u32 link_width_cntl
, lanes
, speed_cntl
, training_cntl
, tmp
;
3687 if (radeon_pcie_gen2
== 0)
3690 if (rdev
->flags
& RADEON_IS_IGP
)
3693 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3696 /* x2 cards have a special sequence */
3697 if (ASIC_IS_X2(rdev
))
3700 /* only RV6xx+ chips are supported */
3701 if (rdev
->family
<= CHIP_R600
)
3704 ret
= drm_pcie_get_speed_cap_mask(rdev
->ddev
, &mask
);
3708 if (!(mask
& DRM_PCIE_SPEED_50
))
3711 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3713 /* 55 nm r6xx asics */
3714 if ((rdev
->family
== CHIP_RV670
) ||
3715 (rdev
->family
== CHIP_RV620
) ||
3716 (rdev
->family
== CHIP_RV635
)) {
3717 /* advertise upconfig capability */
3718 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3719 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3720 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3721 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3722 if (link_width_cntl
& LC_RENEGOTIATION_SUPPORT
) {
3723 lanes
= (link_width_cntl
& LC_LINK_WIDTH_RD_MASK
) >> LC_LINK_WIDTH_RD_SHIFT
;
3724 link_width_cntl
&= ~(LC_LINK_WIDTH_MASK
|
3725 LC_RECONFIG_ARC_MISSING_ESCAPE
);
3726 link_width_cntl
|= lanes
| LC_RECONFIG_NOW
| LC_RENEGOTIATE_EN
;
3727 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3729 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3730 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3734 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3735 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) &&
3736 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
3738 /* 55 nm r6xx asics */
3739 if ((rdev
->family
== CHIP_RV670
) ||
3740 (rdev
->family
== CHIP_RV620
) ||
3741 (rdev
->family
== CHIP_RV635
)) {
3742 WREG32(MM_CFGREGS_CNTL
, 0x8);
3743 link_cntl2
= RREG32(0x4088);
3744 WREG32(MM_CFGREGS_CNTL
, 0);
3745 /* not supported yet */
3746 if (link_cntl2
& SELECTABLE_DEEMPHASIS
)
3750 speed_cntl
&= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK
;
3751 speed_cntl
|= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT
);
3752 speed_cntl
&= ~LC_VOLTAGE_TIMER_SEL_MASK
;
3753 speed_cntl
&= ~LC_FORCE_DIS_HW_SPEED_CHANGE
;
3754 speed_cntl
|= LC_FORCE_EN_HW_SPEED_CHANGE
;
3755 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3757 tmp
= RREG32(0x541c);
3758 WREG32(0x541c, tmp
| 0x8);
3759 WREG32(MM_CFGREGS_CNTL
, MM_WR_TO_CFG_EN
);
3760 link_cntl2
= RREG16(0x4088);
3761 link_cntl2
&= ~TARGET_LINK_SPEED_MASK
;
3763 WREG16(0x4088, link_cntl2
);
3764 WREG32(MM_CFGREGS_CNTL
, 0);
3766 if ((rdev
->family
== CHIP_RV670
) ||
3767 (rdev
->family
== CHIP_RV620
) ||
3768 (rdev
->family
== CHIP_RV635
)) {
3769 training_cntl
= RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL
);
3770 training_cntl
&= ~LC_POINT_7_PLUS_EN
;
3771 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL
, training_cntl
);
3773 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3774 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
3775 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3778 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3779 speed_cntl
|= LC_GEN2_EN_STRAP
;
3780 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3783 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3784 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3786 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3788 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3789 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3794 * r600_get_gpu_clock - return GPU clock counter snapshot
3796 * @rdev: radeon_device pointer
3798 * Fetches a GPU clock counter snapshot (R6xx-cayman).
3799 * Returns the 64 bit clock counter snapshot.
3801 uint64_t r600_get_gpu_clock(struct radeon_device
*rdev
)
3805 mutex_lock(&rdev
->gpu_clock_mutex
);
3806 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
3807 clock
= (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB
) |
3808 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
3809 mutex_unlock(&rdev
->gpu_clock_mutex
);