2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <linux/pci.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/gcd.h>
31 #include <asm/div64.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_device.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_edid.h>
37 #include <drm/drm_fb_helper.h>
38 #include <drm/drm_fourcc.h>
39 #include <drm/drm_gem_framebuffer_helper.h>
40 #include <drm/drm_plane_helper.h>
41 #include <drm/drm_probe_helper.h>
42 #include <drm/drm_vblank.h>
43 #include <drm/radeon_drm.h>
48 static void avivo_crtc_load_lut(struct drm_crtc
*crtc
)
50 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
51 struct drm_device
*dev
= crtc
->dev
;
52 struct radeon_device
*rdev
= dev
->dev_private
;
56 DRM_DEBUG_KMS("%d\n", radeon_crtc
->crtc_id
);
57 WREG32(AVIVO_DC_LUTA_CONTROL
+ radeon_crtc
->crtc_offset
, 0);
59 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0);
60 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0);
61 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0);
63 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0xffff);
64 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0xffff);
65 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0xffff);
67 WREG32(AVIVO_DC_LUT_RW_SELECT
, radeon_crtc
->crtc_id
);
68 WREG32(AVIVO_DC_LUT_RW_MODE
, 0);
69 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK
, 0x0000003f);
71 WREG8(AVIVO_DC_LUT_RW_INDEX
, 0);
72 r
= crtc
->gamma_store
;
73 g
= r
+ crtc
->gamma_size
;
74 b
= g
+ crtc
->gamma_size
;
75 for (i
= 0; i
< 256; i
++) {
76 WREG32(AVIVO_DC_LUT_30_COLOR
,
77 ((*r
++ & 0xffc0) << 14) |
78 ((*g
++ & 0xffc0) << 4) |
82 /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
83 WREG32_P(AVIVO_D1GRPH_LUT_SEL
+ radeon_crtc
->crtc_offset
, radeon_crtc
->crtc_id
, ~1);
86 static void dce4_crtc_load_lut(struct drm_crtc
*crtc
)
88 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
89 struct drm_device
*dev
= crtc
->dev
;
90 struct radeon_device
*rdev
= dev
->dev_private
;
94 DRM_DEBUG_KMS("%d\n", radeon_crtc
->crtc_id
);
95 WREG32(EVERGREEN_DC_LUT_CONTROL
+ radeon_crtc
->crtc_offset
, 0);
97 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0);
98 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0);
99 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0);
101 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0xffff);
102 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0xffff);
103 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0xffff);
105 WREG32(EVERGREEN_DC_LUT_RW_MODE
+ radeon_crtc
->crtc_offset
, 0);
106 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK
+ radeon_crtc
->crtc_offset
, 0x00000007);
108 WREG32(EVERGREEN_DC_LUT_RW_INDEX
+ radeon_crtc
->crtc_offset
, 0);
109 r
= crtc
->gamma_store
;
110 g
= r
+ crtc
->gamma_size
;
111 b
= g
+ crtc
->gamma_size
;
112 for (i
= 0; i
< 256; i
++) {
113 WREG32(EVERGREEN_DC_LUT_30_COLOR
+ radeon_crtc
->crtc_offset
,
114 ((*r
++ & 0xffc0) << 14) |
115 ((*g
++ & 0xffc0) << 4) |
120 static void dce5_crtc_load_lut(struct drm_crtc
*crtc
)
122 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
123 struct drm_device
*dev
= crtc
->dev
;
124 struct radeon_device
*rdev
= dev
->dev_private
;
128 DRM_DEBUG_KMS("%d\n", radeon_crtc
->crtc_id
);
130 WREG32(NI_INPUT_CSC_CONTROL
+ radeon_crtc
->crtc_offset
,
131 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS
) |
132 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS
)));
133 WREG32(NI_PRESCALE_GRPH_CONTROL
+ radeon_crtc
->crtc_offset
,
134 NI_GRPH_PRESCALE_BYPASS
);
135 WREG32(NI_PRESCALE_OVL_CONTROL
+ radeon_crtc
->crtc_offset
,
136 NI_OVL_PRESCALE_BYPASS
);
137 WREG32(NI_INPUT_GAMMA_CONTROL
+ radeon_crtc
->crtc_offset
,
138 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT
) |
139 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT
)));
141 WREG32(EVERGREEN_DC_LUT_CONTROL
+ radeon_crtc
->crtc_offset
, 0);
143 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0);
144 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0);
145 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0);
147 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0xffff);
148 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0xffff);
149 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0xffff);
151 WREG32(EVERGREEN_DC_LUT_RW_MODE
+ radeon_crtc
->crtc_offset
, 0);
152 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK
+ radeon_crtc
->crtc_offset
, 0x00000007);
154 WREG32(EVERGREEN_DC_LUT_RW_INDEX
+ radeon_crtc
->crtc_offset
, 0);
155 r
= crtc
->gamma_store
;
156 g
= r
+ crtc
->gamma_size
;
157 b
= g
+ crtc
->gamma_size
;
158 for (i
= 0; i
< 256; i
++) {
159 WREG32(EVERGREEN_DC_LUT_30_COLOR
+ radeon_crtc
->crtc_offset
,
160 ((*r
++ & 0xffc0) << 14) |
161 ((*g
++ & 0xffc0) << 4) |
165 WREG32(NI_DEGAMMA_CONTROL
+ radeon_crtc
->crtc_offset
,
166 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
) |
167 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
) |
168 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
) |
169 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
)));
170 WREG32(NI_GAMUT_REMAP_CONTROL
+ radeon_crtc
->crtc_offset
,
171 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS
) |
172 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS
)));
173 WREG32(NI_REGAMMA_CONTROL
+ radeon_crtc
->crtc_offset
,
174 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS
) |
175 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS
)));
176 WREG32(NI_OUTPUT_CSC_CONTROL
+ radeon_crtc
->crtc_offset
,
177 (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc
->output_csc
) |
178 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS
)));
179 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
180 WREG32(0x6940 + radeon_crtc
->crtc_offset
, 0);
181 if (ASIC_IS_DCE8(rdev
)) {
182 /* XXX this only needs to be programmed once per crtc at startup,
183 * not sure where the best place for it is
185 WREG32(CIK_ALPHA_CONTROL
+ radeon_crtc
->crtc_offset
,
186 CIK_CURSOR_ALPHA_BLND_ENA
);
190 static void legacy_crtc_load_lut(struct drm_crtc
*crtc
)
192 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
193 struct drm_device
*dev
= crtc
->dev
;
194 struct radeon_device
*rdev
= dev
->dev_private
;
199 dac2_cntl
= RREG32(RADEON_DAC_CNTL2
);
200 if (radeon_crtc
->crtc_id
== 0)
201 dac2_cntl
&= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL
;
203 dac2_cntl
|= RADEON_DAC2_PALETTE_ACC_CTL
;
204 WREG32(RADEON_DAC_CNTL2
, dac2_cntl
);
206 WREG8(RADEON_PALETTE_INDEX
, 0);
207 r
= crtc
->gamma_store
;
208 g
= r
+ crtc
->gamma_size
;
209 b
= g
+ crtc
->gamma_size
;
210 for (i
= 0; i
< 256; i
++) {
211 WREG32(RADEON_PALETTE_30_DATA
,
212 ((*r
++ & 0xffc0) << 14) |
213 ((*g
++ & 0xffc0) << 4) |
218 void radeon_crtc_load_lut(struct drm_crtc
*crtc
)
220 struct drm_device
*dev
= crtc
->dev
;
221 struct radeon_device
*rdev
= dev
->dev_private
;
226 if (ASIC_IS_DCE5(rdev
))
227 dce5_crtc_load_lut(crtc
);
228 else if (ASIC_IS_DCE4(rdev
))
229 dce4_crtc_load_lut(crtc
);
230 else if (ASIC_IS_AVIVO(rdev
))
231 avivo_crtc_load_lut(crtc
);
233 legacy_crtc_load_lut(crtc
);
236 static int radeon_crtc_gamma_set(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
237 u16
*blue
, uint32_t size
,
238 struct drm_modeset_acquire_ctx
*ctx
)
240 radeon_crtc_load_lut(crtc
);
245 static void radeon_crtc_destroy(struct drm_crtc
*crtc
)
247 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
249 drm_crtc_cleanup(crtc
);
250 destroy_workqueue(radeon_crtc
->flip_queue
);
255 * radeon_unpin_work_func - unpin old buffer object
257 * @__work - kernel work item
259 * Unpin the old frame buffer object outside of the interrupt handler
261 static void radeon_unpin_work_func(struct work_struct
*__work
)
263 struct radeon_flip_work
*work
=
264 container_of(__work
, struct radeon_flip_work
, unpin_work
);
267 /* unpin of the old buffer */
268 r
= radeon_bo_reserve(work
->old_rbo
, false);
269 if (likely(r
== 0)) {
270 r
= radeon_bo_unpin(work
->old_rbo
);
271 if (unlikely(r
!= 0)) {
272 DRM_ERROR("failed to unpin buffer after flip\n");
274 radeon_bo_unreserve(work
->old_rbo
);
276 DRM_ERROR("failed to reserve buffer after flip\n");
278 drm_gem_object_put_unlocked(&work
->old_rbo
->tbo
.base
);
282 void radeon_crtc_handle_vblank(struct radeon_device
*rdev
, int crtc_id
)
284 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
289 /* can happen during initialization */
290 if (radeon_crtc
== NULL
)
293 /* Skip the pageflip completion check below (based on polling) on
294 * asics which reliably support hw pageflip completion irqs. pflip
295 * irqs are a reliable and race-free method of handling pageflip
296 * completion detection. A use_pflipirq module parameter < 2 allows
297 * to override this in case of asics with faulty pflip irqs.
298 * A module parameter of 0 would only use this polling based path,
299 * a parameter of 1 would use pflip irq only as a backup to this
300 * path, as in Linux 3.16.
302 if ((radeon_use_pflipirq
== 2) && ASIC_IS_DCE4(rdev
))
305 spin_lock_irqsave(&rdev
->ddev
->event_lock
, flags
);
306 if (radeon_crtc
->flip_status
!= RADEON_FLIP_SUBMITTED
) {
307 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
308 "RADEON_FLIP_SUBMITTED(%d)\n",
309 radeon_crtc
->flip_status
,
310 RADEON_FLIP_SUBMITTED
);
311 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
315 update_pending
= radeon_page_flip_pending(rdev
, crtc_id
);
317 /* Has the pageflip already completed in crtc, or is it certain
318 * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides
319 * distance to start of "fudged earlier" vblank in vpos, distance to
320 * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in
321 * the last few scanlines before start of real vblank, where the vblank
322 * irq can fire, so we have sampled update_pending a bit too early and
323 * know the flip will complete at leading edge of the upcoming real
324 * vblank. On pre-AVIVO hardware, flips also complete inside the real
325 * vblank, not only at leading edge, so if update_pending for hpos >= 0
326 * == inside real vblank, the flip will complete almost immediately.
327 * Note that this method of completion handling is still not 100% race
328 * free, as we could execute before the radeon_flip_work_func managed
329 * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op,
330 * but the flip still gets programmed into hw and completed during
331 * vblank, leading to a delayed emission of the flip completion event.
332 * This applies at least to pre-AVIVO hardware, where flips are always
333 * completing inside vblank, not only at leading edge of vblank.
335 if (update_pending
&&
336 (DRM_SCANOUTPOS_VALID
&
337 radeon_get_crtc_scanoutpos(rdev
->ddev
, crtc_id
,
338 GET_DISTANCE_TO_VBLANKSTART
,
339 &vpos
, &hpos
, NULL
, NULL
,
340 &rdev
->mode_info
.crtcs
[crtc_id
]->base
.hwmode
)) &&
341 ((vpos
>= 0 && hpos
< 0) || (hpos
>= 0 && !ASIC_IS_AVIVO(rdev
)))) {
342 /* crtc didn't flip in this target vblank interval,
343 * but flip is pending in crtc. Based on the current
344 * scanout position we know that the current frame is
345 * (nearly) complete and the flip will (likely)
346 * complete before the start of the next frame.
350 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
352 radeon_crtc_handle_flip(rdev
, crtc_id
);
356 * radeon_crtc_handle_flip - page flip completed
358 * @rdev: radeon device pointer
359 * @crtc_id: crtc number this event is for
361 * Called when we are sure that a page flip for this crtc is completed.
363 void radeon_crtc_handle_flip(struct radeon_device
*rdev
, int crtc_id
)
365 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
366 struct radeon_flip_work
*work
;
369 /* this can happen at init */
370 if (radeon_crtc
== NULL
)
373 spin_lock_irqsave(&rdev
->ddev
->event_lock
, flags
);
374 work
= radeon_crtc
->flip_work
;
375 if (radeon_crtc
->flip_status
!= RADEON_FLIP_SUBMITTED
) {
376 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
377 "RADEON_FLIP_SUBMITTED(%d)\n",
378 radeon_crtc
->flip_status
,
379 RADEON_FLIP_SUBMITTED
);
380 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
384 /* Pageflip completed. Clean up. */
385 radeon_crtc
->flip_status
= RADEON_FLIP_NONE
;
386 radeon_crtc
->flip_work
= NULL
;
388 /* wakeup userspace */
390 drm_crtc_send_vblank_event(&radeon_crtc
->base
, work
->event
);
392 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
394 drm_crtc_vblank_put(&radeon_crtc
->base
);
395 radeon_irq_kms_pflip_irq_put(rdev
, work
->crtc_id
);
396 queue_work(radeon_crtc
->flip_queue
, &work
->unpin_work
);
400 * radeon_flip_work_func - page flip framebuffer
402 * @work - kernel work item
404 * Wait for the buffer object to become idle and do the actual page flip
406 static void radeon_flip_work_func(struct work_struct
*__work
)
408 struct radeon_flip_work
*work
=
409 container_of(__work
, struct radeon_flip_work
, flip_work
);
410 struct radeon_device
*rdev
= work
->rdev
;
411 struct drm_device
*dev
= rdev
->ddev
;
412 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[work
->crtc_id
];
414 struct drm_crtc
*crtc
= &radeon_crtc
->base
;
419 down_read(&rdev
->exclusive_lock
);
421 struct radeon_fence
*fence
;
423 fence
= to_radeon_fence(work
->fence
);
424 if (fence
&& fence
->rdev
== rdev
) {
425 r
= radeon_fence_wait(fence
, false);
427 up_read(&rdev
->exclusive_lock
);
429 r
= radeon_gpu_reset(rdev
);
430 } while (r
== -EAGAIN
);
431 down_read(&rdev
->exclusive_lock
);
434 r
= dma_fence_wait(work
->fence
, false);
437 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r
);
439 /* We continue with the page flip even if we failed to wait on
440 * the fence, otherwise the DRM core and userspace will be
441 * confused about which BO the CRTC is scanning out
444 dma_fence_put(work
->fence
);
448 /* Wait until we're out of the vertical blank period before the one
449 * targeted by the flip. Always wait on pre DCE4 to avoid races with
450 * flip completion handling from vblank irq, as these old asics don't
451 * have reliable pageflip completion interrupts.
453 while (radeon_crtc
->enabled
&&
454 (radeon_get_crtc_scanoutpos(dev
, work
->crtc_id
, 0,
455 &vpos
, &hpos
, NULL
, NULL
,
457 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
458 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
459 (!ASIC_IS_AVIVO(rdev
) ||
460 ((int) (work
->target_vblank
-
461 dev
->driver
->get_vblank_counter(dev
, work
->crtc_id
)) > 0)))
462 usleep_range(1000, 2000);
464 /* We borrow the event spin lock for protecting flip_status */
465 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
467 /* set the proper interrupt */
468 radeon_irq_kms_pflip_irq_get(rdev
, radeon_crtc
->crtc_id
);
470 /* do the flip (mmio) */
471 radeon_page_flip(rdev
, radeon_crtc
->crtc_id
, work
->base
, work
->async
);
473 radeon_crtc
->flip_status
= RADEON_FLIP_SUBMITTED
;
474 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
475 up_read(&rdev
->exclusive_lock
);
478 static int radeon_crtc_page_flip_target(struct drm_crtc
*crtc
,
479 struct drm_framebuffer
*fb
,
480 struct drm_pending_vblank_event
*event
,
481 uint32_t page_flip_flags
,
483 struct drm_modeset_acquire_ctx
*ctx
)
485 struct drm_device
*dev
= crtc
->dev
;
486 struct radeon_device
*rdev
= dev
->dev_private
;
487 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
488 struct drm_gem_object
*obj
;
489 struct radeon_flip_work
*work
;
490 struct radeon_bo
*new_rbo
;
491 uint32_t tiling_flags
, pitch_pixels
;
496 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
500 INIT_WORK(&work
->flip_work
, radeon_flip_work_func
);
501 INIT_WORK(&work
->unpin_work
, radeon_unpin_work_func
);
504 work
->crtc_id
= radeon_crtc
->crtc_id
;
506 work
->async
= (page_flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
508 /* schedule unpin of the old buffer */
509 obj
= crtc
->primary
->fb
->obj
[0];
511 /* take a reference to the old object */
512 drm_gem_object_get(obj
);
513 work
->old_rbo
= gem_to_radeon_bo(obj
);
516 new_rbo
= gem_to_radeon_bo(obj
);
518 /* pin the new buffer */
519 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
520 work
->old_rbo
, new_rbo
);
522 r
= radeon_bo_reserve(new_rbo
, false);
523 if (unlikely(r
!= 0)) {
524 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
527 /* Only 27 bit offset for legacy CRTC */
528 r
= radeon_bo_pin_restricted(new_rbo
, RADEON_GEM_DOMAIN_VRAM
,
529 ASIC_IS_AVIVO(rdev
) ? 0 : 1 << 27, &base
);
530 if (unlikely(r
!= 0)) {
531 radeon_bo_unreserve(new_rbo
);
533 DRM_ERROR("failed to pin new rbo buffer before flip\n");
536 work
->fence
= dma_fence_get(dma_resv_get_excl(new_rbo
->tbo
.base
.resv
));
537 radeon_bo_get_tiling_flags(new_rbo
, &tiling_flags
, NULL
);
538 radeon_bo_unreserve(new_rbo
);
540 if (!ASIC_IS_AVIVO(rdev
)) {
541 /* crtc offset is from display base addr not FB location */
542 base
-= radeon_crtc
->legacy_display_base_addr
;
543 pitch_pixels
= fb
->pitches
[0] / fb
->format
->cpp
[0];
545 if (tiling_flags
& RADEON_TILING_MACRO
) {
546 if (ASIC_IS_R300(rdev
)) {
549 int byteshift
= fb
->format
->cpp
[0] * 8 >> 4;
550 int tile_addr
= (((crtc
->y
>> 3) * pitch_pixels
+ crtc
->x
) >> (8 - byteshift
)) << 11;
551 base
+= tile_addr
+ ((crtc
->x
<< byteshift
) % 256) + ((crtc
->y
% 8) << 8);
554 int offset
= crtc
->y
* pitch_pixels
+ crtc
->x
;
555 switch (fb
->format
->cpp
[0] * 8) {
576 work
->target_vblank
= target
- (uint32_t)drm_crtc_vblank_count(crtc
) +
577 dev
->driver
->get_vblank_counter(dev
, work
->crtc_id
);
579 /* We borrow the event spin lock for protecting flip_work */
580 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
582 if (radeon_crtc
->flip_status
!= RADEON_FLIP_NONE
) {
583 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
584 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
588 radeon_crtc
->flip_status
= RADEON_FLIP_PENDING
;
589 radeon_crtc
->flip_work
= work
;
592 crtc
->primary
->fb
= fb
;
594 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
596 queue_work(radeon_crtc
->flip_queue
, &work
->flip_work
);
600 if (unlikely(radeon_bo_reserve(new_rbo
, false) != 0)) {
601 DRM_ERROR("failed to reserve new rbo in error path\n");
604 if (unlikely(radeon_bo_unpin(new_rbo
) != 0)) {
605 DRM_ERROR("failed to unpin new rbo in error path\n");
607 radeon_bo_unreserve(new_rbo
);
610 drm_gem_object_put_unlocked(&work
->old_rbo
->tbo
.base
);
611 dma_fence_put(work
->fence
);
617 radeon_crtc_set_config(struct drm_mode_set
*set
,
618 struct drm_modeset_acquire_ctx
*ctx
)
620 struct drm_device
*dev
;
621 struct radeon_device
*rdev
;
622 struct drm_crtc
*crtc
;
626 if (!set
|| !set
->crtc
)
629 dev
= set
->crtc
->dev
;
631 ret
= pm_runtime_get_sync(dev
->dev
);
635 ret
= drm_crtc_helper_set_config(set
, ctx
);
637 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
641 pm_runtime_mark_last_busy(dev
->dev
);
643 rdev
= dev
->dev_private
;
644 /* if we have active crtcs and we don't have a power ref,
645 take the current one */
646 if (active
&& !rdev
->have_disp_power_ref
) {
647 rdev
->have_disp_power_ref
= true;
650 /* if we have no active crtcs, then drop the power ref
652 if (!active
&& rdev
->have_disp_power_ref
) {
653 pm_runtime_put_autosuspend(dev
->dev
);
654 rdev
->have_disp_power_ref
= false;
657 /* drop the power reference we got coming in here */
658 pm_runtime_put_autosuspend(dev
->dev
);
662 static const struct drm_crtc_funcs radeon_crtc_funcs
= {
663 .cursor_set2
= radeon_crtc_cursor_set2
,
664 .cursor_move
= radeon_crtc_cursor_move
,
665 .gamma_set
= radeon_crtc_gamma_set
,
666 .set_config
= radeon_crtc_set_config
,
667 .destroy
= radeon_crtc_destroy
,
668 .page_flip_target
= radeon_crtc_page_flip_target
,
671 static void radeon_crtc_init(struct drm_device
*dev
, int index
)
673 struct radeon_device
*rdev
= dev
->dev_private
;
674 struct radeon_crtc
*radeon_crtc
;
677 radeon_crtc
= kzalloc(sizeof(struct radeon_crtc
) + (RADEONFB_CONN_LIMIT
* sizeof(struct drm_connector
*)), GFP_KERNEL
);
678 if (radeon_crtc
== NULL
)
681 drm_crtc_init(dev
, &radeon_crtc
->base
, &radeon_crtc_funcs
);
683 drm_mode_crtc_set_gamma_size(&radeon_crtc
->base
, 256);
684 radeon_crtc
->crtc_id
= index
;
685 radeon_crtc
->flip_queue
= alloc_workqueue("radeon-crtc", WQ_HIGHPRI
, 0);
686 rdev
->mode_info
.crtcs
[index
] = radeon_crtc
;
688 if (rdev
->family
>= CHIP_BONAIRE
) {
689 radeon_crtc
->max_cursor_width
= CIK_CURSOR_WIDTH
;
690 radeon_crtc
->max_cursor_height
= CIK_CURSOR_HEIGHT
;
692 radeon_crtc
->max_cursor_width
= CURSOR_WIDTH
;
693 radeon_crtc
->max_cursor_height
= CURSOR_HEIGHT
;
695 dev
->mode_config
.cursor_width
= radeon_crtc
->max_cursor_width
;
696 dev
->mode_config
.cursor_height
= radeon_crtc
->max_cursor_height
;
699 radeon_crtc
->mode_set
.crtc
= &radeon_crtc
->base
;
700 radeon_crtc
->mode_set
.connectors
= (struct drm_connector
**)(radeon_crtc
+ 1);
701 radeon_crtc
->mode_set
.num_connectors
= 0;
704 for (i
= 0; i
< 256; i
++) {
705 radeon_crtc
->lut_r
[i
] = i
<< 2;
706 radeon_crtc
->lut_g
[i
] = i
<< 2;
707 radeon_crtc
->lut_b
[i
] = i
<< 2;
710 if (rdev
->is_atom_bios
&& (ASIC_IS_AVIVO(rdev
) || radeon_r4xx_atom
))
711 radeon_atombios_init_crtc(dev
, radeon_crtc
);
713 radeon_legacy_init_crtc(dev
, radeon_crtc
);
716 static const char *encoder_names
[38] = {
736 "INTERNAL_KLDSCP_TMDS1",
737 "INTERNAL_KLDSCP_DVO1",
738 "INTERNAL_KLDSCP_DAC1",
739 "INTERNAL_KLDSCP_DAC2",
748 "INTERNAL_KLDSCP_LVTMA",
757 static const char *hpd_names
[6] = {
766 static void radeon_print_display_setup(struct drm_device
*dev
)
768 struct drm_connector
*connector
;
769 struct radeon_connector
*radeon_connector
;
770 struct drm_encoder
*encoder
;
771 struct radeon_encoder
*radeon_encoder
;
775 DRM_INFO("Radeon Display Connectors\n");
776 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
777 radeon_connector
= to_radeon_connector(connector
);
778 DRM_INFO("Connector %d:\n", i
);
779 DRM_INFO(" %s\n", connector
->name
);
780 if (radeon_connector
->hpd
.hpd
!= RADEON_HPD_NONE
)
781 DRM_INFO(" %s\n", hpd_names
[radeon_connector
->hpd
.hpd
]);
782 if (radeon_connector
->ddc_bus
) {
783 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
784 radeon_connector
->ddc_bus
->rec
.mask_clk_reg
,
785 radeon_connector
->ddc_bus
->rec
.mask_data_reg
,
786 radeon_connector
->ddc_bus
->rec
.a_clk_reg
,
787 radeon_connector
->ddc_bus
->rec
.a_data_reg
,
788 radeon_connector
->ddc_bus
->rec
.en_clk_reg
,
789 radeon_connector
->ddc_bus
->rec
.en_data_reg
,
790 radeon_connector
->ddc_bus
->rec
.y_clk_reg
,
791 radeon_connector
->ddc_bus
->rec
.y_data_reg
);
792 if (radeon_connector
->router
.ddc_valid
)
793 DRM_INFO(" DDC Router 0x%x/0x%x\n",
794 radeon_connector
->router
.ddc_mux_control_pin
,
795 radeon_connector
->router
.ddc_mux_state
);
796 if (radeon_connector
->router
.cd_valid
)
797 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
798 radeon_connector
->router
.cd_mux_control_pin
,
799 radeon_connector
->router
.cd_mux_state
);
801 if (connector
->connector_type
== DRM_MODE_CONNECTOR_VGA
||
802 connector
->connector_type
== DRM_MODE_CONNECTOR_DVII
||
803 connector
->connector_type
== DRM_MODE_CONNECTOR_DVID
||
804 connector
->connector_type
== DRM_MODE_CONNECTOR_DVIA
||
805 connector
->connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
806 connector
->connector_type
== DRM_MODE_CONNECTOR_HDMIB
)
807 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
809 DRM_INFO(" Encoders:\n");
810 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
811 radeon_encoder
= to_radeon_encoder(encoder
);
812 devices
= radeon_encoder
->devices
& radeon_connector
->devices
;
814 if (devices
& ATOM_DEVICE_CRT1_SUPPORT
)
815 DRM_INFO(" CRT1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
816 if (devices
& ATOM_DEVICE_CRT2_SUPPORT
)
817 DRM_INFO(" CRT2: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
818 if (devices
& ATOM_DEVICE_LCD1_SUPPORT
)
819 DRM_INFO(" LCD1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
820 if (devices
& ATOM_DEVICE_DFP1_SUPPORT
)
821 DRM_INFO(" DFP1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
822 if (devices
& ATOM_DEVICE_DFP2_SUPPORT
)
823 DRM_INFO(" DFP2: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
824 if (devices
& ATOM_DEVICE_DFP3_SUPPORT
)
825 DRM_INFO(" DFP3: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
826 if (devices
& ATOM_DEVICE_DFP4_SUPPORT
)
827 DRM_INFO(" DFP4: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
828 if (devices
& ATOM_DEVICE_DFP5_SUPPORT
)
829 DRM_INFO(" DFP5: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
830 if (devices
& ATOM_DEVICE_DFP6_SUPPORT
)
831 DRM_INFO(" DFP6: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
832 if (devices
& ATOM_DEVICE_TV1_SUPPORT
)
833 DRM_INFO(" TV1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
834 if (devices
& ATOM_DEVICE_CV_SUPPORT
)
835 DRM_INFO(" CV: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
842 static bool radeon_setup_enc_conn(struct drm_device
*dev
)
844 struct radeon_device
*rdev
= dev
->dev_private
;
848 if (rdev
->is_atom_bios
) {
849 ret
= radeon_get_atom_connector_info_from_supported_devices_table(dev
);
851 ret
= radeon_get_atom_connector_info_from_object_table(dev
);
853 ret
= radeon_get_legacy_connector_info_from_bios(dev
);
855 ret
= radeon_get_legacy_connector_info_from_table(dev
);
858 if (!ASIC_IS_AVIVO(rdev
))
859 ret
= radeon_get_legacy_connector_info_from_table(dev
);
862 radeon_setup_encoder_clones(dev
);
863 radeon_print_display_setup(dev
);
872 * avivo_reduce_ratio - fractional number reduction
876 * @nom_min: minimum value for nominator
877 * @den_min: minimum value for denominator
879 * Find the greatest common divisor and apply it on both nominator and
880 * denominator, but make nominator and denominator are at least as large
881 * as their minimum values.
883 static void avivo_reduce_ratio(unsigned *nom
, unsigned *den
,
884 unsigned nom_min
, unsigned den_min
)
888 /* reduce the numbers to a simpler ratio */
889 tmp
= gcd(*nom
, *den
);
893 /* make sure nominator is large enough */
894 if (*nom
< nom_min
) {
895 tmp
= DIV_ROUND_UP(nom_min
, *nom
);
900 /* make sure the denominator is large enough */
901 if (*den
< den_min
) {
902 tmp
= DIV_ROUND_UP(den_min
, *den
);
909 * avivo_get_fb_ref_div - feedback and ref divider calculation
913 * @post_div: post divider
914 * @fb_div_max: feedback divider maximum
915 * @ref_div_max: reference divider maximum
916 * @fb_div: resulting feedback divider
917 * @ref_div: resulting reference divider
919 * Calculate feedback and reference divider for a given post divider. Makes
920 * sure we stay within the limits.
922 static void avivo_get_fb_ref_div(unsigned nom
, unsigned den
, unsigned post_div
,
923 unsigned fb_div_max
, unsigned ref_div_max
,
924 unsigned *fb_div
, unsigned *ref_div
)
926 /* limit reference * post divider to a maximum */
927 ref_div_max
= max(min(100 / post_div
, ref_div_max
), 1u);
929 /* get matching reference and feedback divider */
930 *ref_div
= min(max(den
/post_div
, 1u), ref_div_max
);
931 *fb_div
= DIV_ROUND_CLOSEST(nom
* *ref_div
* post_div
, den
);
933 /* limit fb divider to its maximum */
934 if (*fb_div
> fb_div_max
) {
935 *ref_div
= (*ref_div
* fb_div_max
)/(*fb_div
);
936 *fb_div
= fb_div_max
;
941 * radeon_compute_pll_avivo - compute PLL paramaters
943 * @pll: information about the PLL
944 * @dot_clock_p: resulting pixel clock
945 * fb_div_p: resulting feedback divider
946 * frac_fb_div_p: fractional part of the feedback divider
947 * ref_div_p: resulting reference divider
948 * post_div_p: resulting reference divider
950 * Try to calculate the PLL parameters to generate the given frequency:
951 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
953 void radeon_compute_pll_avivo(struct radeon_pll
*pll
,
961 unsigned target_clock
= pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
?
964 unsigned fb_div_min
, fb_div_max
, fb_div
;
965 unsigned post_div_min
, post_div_max
, post_div
;
966 unsigned ref_div_min
, ref_div_max
, ref_div
;
967 unsigned post_div_best
, diff_best
;
970 /* determine allowed feedback divider range */
971 fb_div_min
= pll
->min_feedback_div
;
972 fb_div_max
= pll
->max_feedback_div
;
974 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
979 /* determine allowed ref divider range */
980 if (pll
->flags
& RADEON_PLL_USE_REF_DIV
)
981 ref_div_min
= pll
->reference_div
;
983 ref_div_min
= pll
->min_ref_div
;
985 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
&&
986 pll
->flags
& RADEON_PLL_USE_REF_DIV
)
987 ref_div_max
= pll
->reference_div
;
988 else if (pll
->flags
& RADEON_PLL_PREFER_MINM_OVER_MAXP
)
989 /* fix for problems on RS880 */
990 ref_div_max
= min(pll
->max_ref_div
, 7u);
992 ref_div_max
= pll
->max_ref_div
;
994 /* determine allowed post divider range */
995 if (pll
->flags
& RADEON_PLL_USE_POST_DIV
) {
996 post_div_min
= pll
->post_div
;
997 post_div_max
= pll
->post_div
;
999 unsigned vco_min
, vco_max
;
1001 if (pll
->flags
& RADEON_PLL_IS_LCD
) {
1002 vco_min
= pll
->lcd_pll_out_min
;
1003 vco_max
= pll
->lcd_pll_out_max
;
1005 vco_min
= pll
->pll_out_min
;
1006 vco_max
= pll
->pll_out_max
;
1009 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
1014 post_div_min
= vco_min
/ target_clock
;
1015 if ((target_clock
* post_div_min
) < vco_min
)
1017 if (post_div_min
< pll
->min_post_div
)
1018 post_div_min
= pll
->min_post_div
;
1020 post_div_max
= vco_max
/ target_clock
;
1021 if ((target_clock
* post_div_max
) > vco_max
)
1023 if (post_div_max
> pll
->max_post_div
)
1024 post_div_max
= pll
->max_post_div
;
1027 /* represent the searched ratio as fractional number */
1029 den
= pll
->reference_freq
;
1031 /* reduce the numbers to a simpler ratio */
1032 avivo_reduce_ratio(&nom
, &den
, fb_div_min
, post_div_min
);
1034 /* now search for a post divider */
1035 if (pll
->flags
& RADEON_PLL_PREFER_MINM_OVER_MAXP
)
1036 post_div_best
= post_div_min
;
1038 post_div_best
= post_div_max
;
1041 for (post_div
= post_div_min
; post_div
<= post_div_max
; ++post_div
) {
1043 avivo_get_fb_ref_div(nom
, den
, post_div
, fb_div_max
,
1044 ref_div_max
, &fb_div
, &ref_div
);
1045 diff
= abs(target_clock
- (pll
->reference_freq
* fb_div
) /
1046 (ref_div
* post_div
));
1048 if (diff
< diff_best
|| (diff
== diff_best
&&
1049 !(pll
->flags
& RADEON_PLL_PREFER_MINM_OVER_MAXP
))) {
1051 post_div_best
= post_div
;
1055 post_div
= post_div_best
;
1057 /* get the feedback and reference divider for the optimal value */
1058 avivo_get_fb_ref_div(nom
, den
, post_div
, fb_div_max
, ref_div_max
,
1061 /* reduce the numbers to a simpler ratio once more */
1062 /* this also makes sure that the reference divider is large enough */
1063 avivo_reduce_ratio(&fb_div
, &ref_div
, fb_div_min
, ref_div_min
);
1065 /* avoid high jitter with small fractional dividers */
1066 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
&& (fb_div
% 10)) {
1067 fb_div_min
= max(fb_div_min
, (9 - (fb_div
% 10)) * 20 + 50);
1068 if (fb_div
< fb_div_min
) {
1069 unsigned tmp
= DIV_ROUND_UP(fb_div_min
, fb_div
);
1075 /* and finally save the result */
1076 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
1077 *fb_div_p
= fb_div
/ 10;
1078 *frac_fb_div_p
= fb_div
% 10;
1084 *dot_clock_p
= ((pll
->reference_freq
* *fb_div_p
* 10) +
1085 (pll
->reference_freq
* *frac_fb_div_p
)) /
1086 (ref_div
* post_div
* 10);
1087 *ref_div_p
= ref_div
;
1088 *post_div_p
= post_div
;
1090 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1091 freq
, *dot_clock_p
* 10, *fb_div_p
, *frac_fb_div_p
,
1096 static inline uint32_t radeon_div(uint64_t n
, uint32_t d
)
1106 void radeon_compute_pll_legacy(struct radeon_pll
*pll
,
1108 uint32_t *dot_clock_p
,
1110 uint32_t *frac_fb_div_p
,
1111 uint32_t *ref_div_p
,
1112 uint32_t *post_div_p
)
1114 uint32_t min_ref_div
= pll
->min_ref_div
;
1115 uint32_t max_ref_div
= pll
->max_ref_div
;
1116 uint32_t min_post_div
= pll
->min_post_div
;
1117 uint32_t max_post_div
= pll
->max_post_div
;
1118 uint32_t min_fractional_feed_div
= 0;
1119 uint32_t max_fractional_feed_div
= 0;
1120 uint32_t best_vco
= pll
->best_vco
;
1121 uint32_t best_post_div
= 1;
1122 uint32_t best_ref_div
= 1;
1123 uint32_t best_feedback_div
= 1;
1124 uint32_t best_frac_feedback_div
= 0;
1125 uint32_t best_freq
= -1;
1126 uint32_t best_error
= 0xffffffff;
1127 uint32_t best_vco_diff
= 1;
1129 u32 pll_out_min
, pll_out_max
;
1131 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq
, pll
->min_ref_div
, pll
->max_ref_div
);
1134 if (pll
->flags
& RADEON_PLL_IS_LCD
) {
1135 pll_out_min
= pll
->lcd_pll_out_min
;
1136 pll_out_max
= pll
->lcd_pll_out_max
;
1138 pll_out_min
= pll
->pll_out_min
;
1139 pll_out_max
= pll
->pll_out_max
;
1142 if (pll_out_min
> 64800)
1143 pll_out_min
= 64800;
1145 if (pll
->flags
& RADEON_PLL_USE_REF_DIV
)
1146 min_ref_div
= max_ref_div
= pll
->reference_div
;
1148 while (min_ref_div
< max_ref_div
-1) {
1149 uint32_t mid
= (min_ref_div
+ max_ref_div
) / 2;
1150 uint32_t pll_in
= pll
->reference_freq
/ mid
;
1151 if (pll_in
< pll
->pll_in_min
)
1153 else if (pll_in
> pll
->pll_in_max
)
1160 if (pll
->flags
& RADEON_PLL_USE_POST_DIV
)
1161 min_post_div
= max_post_div
= pll
->post_div
;
1163 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
1164 min_fractional_feed_div
= pll
->min_frac_feedback_div
;
1165 max_fractional_feed_div
= pll
->max_frac_feedback_div
;
1168 for (post_div
= max_post_div
; post_div
>= min_post_div
; --post_div
) {
1171 if ((pll
->flags
& RADEON_PLL_NO_ODD_POST_DIV
) && (post_div
& 1))
1174 /* legacy radeons only have a few post_divs */
1175 if (pll
->flags
& RADEON_PLL_LEGACY
) {
1176 if ((post_div
== 5) ||
1187 for (ref_div
= min_ref_div
; ref_div
<= max_ref_div
; ++ref_div
) {
1188 uint32_t feedback_div
, current_freq
= 0, error
, vco_diff
;
1189 uint32_t pll_in
= pll
->reference_freq
/ ref_div
;
1190 uint32_t min_feed_div
= pll
->min_feedback_div
;
1191 uint32_t max_feed_div
= pll
->max_feedback_div
+ 1;
1193 if (pll_in
< pll
->pll_in_min
|| pll_in
> pll
->pll_in_max
)
1196 while (min_feed_div
< max_feed_div
) {
1198 uint32_t min_frac_feed_div
= min_fractional_feed_div
;
1199 uint32_t max_frac_feed_div
= max_fractional_feed_div
+ 1;
1200 uint32_t frac_feedback_div
;
1203 feedback_div
= (min_feed_div
+ max_feed_div
) / 2;
1205 tmp
= (uint64_t)pll
->reference_freq
* feedback_div
;
1206 vco
= radeon_div(tmp
, ref_div
);
1208 if (vco
< pll_out_min
) {
1209 min_feed_div
= feedback_div
+ 1;
1211 } else if (vco
> pll_out_max
) {
1212 max_feed_div
= feedback_div
;
1216 while (min_frac_feed_div
< max_frac_feed_div
) {
1217 frac_feedback_div
= (min_frac_feed_div
+ max_frac_feed_div
) / 2;
1218 tmp
= (uint64_t)pll
->reference_freq
* 10000 * feedback_div
;
1219 tmp
+= (uint64_t)pll
->reference_freq
* 1000 * frac_feedback_div
;
1220 current_freq
= radeon_div(tmp
, ref_div
* post_div
);
1222 if (pll
->flags
& RADEON_PLL_PREFER_CLOSEST_LOWER
) {
1223 if (freq
< current_freq
)
1226 error
= freq
- current_freq
;
1228 error
= abs(current_freq
- freq
);
1229 vco_diff
= abs(vco
- best_vco
);
1231 if ((best_vco
== 0 && error
< best_error
) ||
1233 ((best_error
> 100 && error
< best_error
- 100) ||
1234 (abs(error
- best_error
) < 100 && vco_diff
< best_vco_diff
)))) {
1235 best_post_div
= post_div
;
1236 best_ref_div
= ref_div
;
1237 best_feedback_div
= feedback_div
;
1238 best_frac_feedback_div
= frac_feedback_div
;
1239 best_freq
= current_freq
;
1241 best_vco_diff
= vco_diff
;
1242 } else if (current_freq
== freq
) {
1243 if (best_freq
== -1) {
1244 best_post_div
= post_div
;
1245 best_ref_div
= ref_div
;
1246 best_feedback_div
= feedback_div
;
1247 best_frac_feedback_div
= frac_feedback_div
;
1248 best_freq
= current_freq
;
1250 best_vco_diff
= vco_diff
;
1251 } else if (((pll
->flags
& RADEON_PLL_PREFER_LOW_REF_DIV
) && (ref_div
< best_ref_div
)) ||
1252 ((pll
->flags
& RADEON_PLL_PREFER_HIGH_REF_DIV
) && (ref_div
> best_ref_div
)) ||
1253 ((pll
->flags
& RADEON_PLL_PREFER_LOW_FB_DIV
) && (feedback_div
< best_feedback_div
)) ||
1254 ((pll
->flags
& RADEON_PLL_PREFER_HIGH_FB_DIV
) && (feedback_div
> best_feedback_div
)) ||
1255 ((pll
->flags
& RADEON_PLL_PREFER_LOW_POST_DIV
) && (post_div
< best_post_div
)) ||
1256 ((pll
->flags
& RADEON_PLL_PREFER_HIGH_POST_DIV
) && (post_div
> best_post_div
))) {
1257 best_post_div
= post_div
;
1258 best_ref_div
= ref_div
;
1259 best_feedback_div
= feedback_div
;
1260 best_frac_feedback_div
= frac_feedback_div
;
1261 best_freq
= current_freq
;
1263 best_vco_diff
= vco_diff
;
1266 if (current_freq
< freq
)
1267 min_frac_feed_div
= frac_feedback_div
+ 1;
1269 max_frac_feed_div
= frac_feedback_div
;
1271 if (current_freq
< freq
)
1272 min_feed_div
= feedback_div
+ 1;
1274 max_feed_div
= feedback_div
;
1279 *dot_clock_p
= best_freq
/ 10000;
1280 *fb_div_p
= best_feedback_div
;
1281 *frac_fb_div_p
= best_frac_feedback_div
;
1282 *ref_div_p
= best_ref_div
;
1283 *post_div_p
= best_post_div
;
1284 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1286 best_freq
/ 1000, best_feedback_div
, best_frac_feedback_div
,
1287 best_ref_div
, best_post_div
);
1291 static const struct drm_framebuffer_funcs radeon_fb_funcs
= {
1292 .destroy
= drm_gem_fb_destroy
,
1293 .create_handle
= drm_gem_fb_create_handle
,
1297 radeon_framebuffer_init(struct drm_device
*dev
,
1298 struct drm_framebuffer
*fb
,
1299 const struct drm_mode_fb_cmd2
*mode_cmd
,
1300 struct drm_gem_object
*obj
)
1304 drm_helper_mode_fill_fb_struct(dev
, fb
, mode_cmd
);
1305 ret
= drm_framebuffer_init(dev
, fb
, &radeon_fb_funcs
);
1313 static struct drm_framebuffer
*
1314 radeon_user_framebuffer_create(struct drm_device
*dev
,
1315 struct drm_file
*file_priv
,
1316 const struct drm_mode_fb_cmd2
*mode_cmd
)
1318 struct drm_gem_object
*obj
;
1319 struct drm_framebuffer
*fb
;
1322 obj
= drm_gem_object_lookup(file_priv
, mode_cmd
->handles
[0]);
1324 dev_err(&dev
->pdev
->dev
, "No GEM object associated to handle 0x%08X, "
1325 "can't create framebuffer\n", mode_cmd
->handles
[0]);
1326 return ERR_PTR(-ENOENT
);
1329 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1330 if (obj
->import_attach
) {
1331 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
1332 return ERR_PTR(-EINVAL
);
1335 fb
= kzalloc(sizeof(*fb
), GFP_KERNEL
);
1337 drm_gem_object_put_unlocked(obj
);
1338 return ERR_PTR(-ENOMEM
);
1341 ret
= radeon_framebuffer_init(dev
, fb
, mode_cmd
, obj
);
1344 drm_gem_object_put_unlocked(obj
);
1345 return ERR_PTR(ret
);
1351 static const struct drm_mode_config_funcs radeon_mode_funcs
= {
1352 .fb_create
= radeon_user_framebuffer_create
,
1353 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1356 static const struct drm_prop_enum_list radeon_tmds_pll_enum_list
[] =
1361 static const struct drm_prop_enum_list radeon_tv_std_enum_list
[] =
1362 { { TV_STD_NTSC
, "ntsc" },
1363 { TV_STD_PAL
, "pal" },
1364 { TV_STD_PAL_M
, "pal-m" },
1365 { TV_STD_PAL_60
, "pal-60" },
1366 { TV_STD_NTSC_J
, "ntsc-j" },
1367 { TV_STD_SCART_PAL
, "scart-pal" },
1368 { TV_STD_PAL_CN
, "pal-cn" },
1369 { TV_STD_SECAM
, "secam" },
1372 static const struct drm_prop_enum_list radeon_underscan_enum_list
[] =
1373 { { UNDERSCAN_OFF
, "off" },
1374 { UNDERSCAN_ON
, "on" },
1375 { UNDERSCAN_AUTO
, "auto" },
1378 static const struct drm_prop_enum_list radeon_audio_enum_list
[] =
1379 { { RADEON_AUDIO_DISABLE
, "off" },
1380 { RADEON_AUDIO_ENABLE
, "on" },
1381 { RADEON_AUDIO_AUTO
, "auto" },
1384 /* XXX support different dither options? spatial, temporal, both, etc. */
1385 static const struct drm_prop_enum_list radeon_dither_enum_list
[] =
1386 { { RADEON_FMT_DITHER_DISABLE
, "off" },
1387 { RADEON_FMT_DITHER_ENABLE
, "on" },
1390 static const struct drm_prop_enum_list radeon_output_csc_enum_list
[] =
1391 { { RADEON_OUTPUT_CSC_BYPASS
, "bypass" },
1392 { RADEON_OUTPUT_CSC_TVRGB
, "tvrgb" },
1393 { RADEON_OUTPUT_CSC_YCBCR601
, "ycbcr601" },
1394 { RADEON_OUTPUT_CSC_YCBCR709
, "ycbcr709" },
1397 static int radeon_modeset_create_props(struct radeon_device
*rdev
)
1401 if (rdev
->is_atom_bios
) {
1402 rdev
->mode_info
.coherent_mode_property
=
1403 drm_property_create_range(rdev
->ddev
, 0 , "coherent", 0, 1);
1404 if (!rdev
->mode_info
.coherent_mode_property
)
1408 if (!ASIC_IS_AVIVO(rdev
)) {
1409 sz
= ARRAY_SIZE(radeon_tmds_pll_enum_list
);
1410 rdev
->mode_info
.tmds_pll_property
=
1411 drm_property_create_enum(rdev
->ddev
, 0,
1413 radeon_tmds_pll_enum_list
, sz
);
1416 rdev
->mode_info
.load_detect_property
=
1417 drm_property_create_range(rdev
->ddev
, 0, "load detection", 0, 1);
1418 if (!rdev
->mode_info
.load_detect_property
)
1421 drm_mode_create_scaling_mode_property(rdev
->ddev
);
1423 sz
= ARRAY_SIZE(radeon_tv_std_enum_list
);
1424 rdev
->mode_info
.tv_std_property
=
1425 drm_property_create_enum(rdev
->ddev
, 0,
1427 radeon_tv_std_enum_list
, sz
);
1429 sz
= ARRAY_SIZE(radeon_underscan_enum_list
);
1430 rdev
->mode_info
.underscan_property
=
1431 drm_property_create_enum(rdev
->ddev
, 0,
1433 radeon_underscan_enum_list
, sz
);
1435 rdev
->mode_info
.underscan_hborder_property
=
1436 drm_property_create_range(rdev
->ddev
, 0,
1437 "underscan hborder", 0, 128);
1438 if (!rdev
->mode_info
.underscan_hborder_property
)
1441 rdev
->mode_info
.underscan_vborder_property
=
1442 drm_property_create_range(rdev
->ddev
, 0,
1443 "underscan vborder", 0, 128);
1444 if (!rdev
->mode_info
.underscan_vborder_property
)
1447 sz
= ARRAY_SIZE(radeon_audio_enum_list
);
1448 rdev
->mode_info
.audio_property
=
1449 drm_property_create_enum(rdev
->ddev
, 0,
1451 radeon_audio_enum_list
, sz
);
1453 sz
= ARRAY_SIZE(radeon_dither_enum_list
);
1454 rdev
->mode_info
.dither_property
=
1455 drm_property_create_enum(rdev
->ddev
, 0,
1457 radeon_dither_enum_list
, sz
);
1459 sz
= ARRAY_SIZE(radeon_output_csc_enum_list
);
1460 rdev
->mode_info
.output_csc_property
=
1461 drm_property_create_enum(rdev
->ddev
, 0,
1463 radeon_output_csc_enum_list
, sz
);
1468 void radeon_update_display_priority(struct radeon_device
*rdev
)
1470 /* adjustment options for the display watermarks */
1471 if ((radeon_disp_priority
== 0) || (radeon_disp_priority
> 2)) {
1472 /* set display priority to high for r3xx, rv515 chips
1473 * this avoids flickering due to underflow to the
1474 * display controllers during heavy acceleration.
1475 * Don't force high on rs4xx igp chips as it seems to
1476 * affect the sound card. See kernel bug 15982.
1478 if ((ASIC_IS_R300(rdev
) || (rdev
->family
== CHIP_RV515
)) &&
1479 !(rdev
->flags
& RADEON_IS_IGP
))
1480 rdev
->disp_priority
= 2;
1482 rdev
->disp_priority
= 0;
1484 rdev
->disp_priority
= radeon_disp_priority
;
1489 * Allocate hdmi structs and determine register offsets
1491 static void radeon_afmt_init(struct radeon_device
*rdev
)
1495 for (i
= 0; i
< RADEON_MAX_AFMT_BLOCKS
; i
++)
1496 rdev
->mode_info
.afmt
[i
] = NULL
;
1498 if (ASIC_IS_NODCE(rdev
)) {
1500 } else if (ASIC_IS_DCE4(rdev
)) {
1501 static uint32_t eg_offsets
[] = {
1502 EVERGREEN_CRTC0_REGISTER_OFFSET
,
1503 EVERGREEN_CRTC1_REGISTER_OFFSET
,
1504 EVERGREEN_CRTC2_REGISTER_OFFSET
,
1505 EVERGREEN_CRTC3_REGISTER_OFFSET
,
1506 EVERGREEN_CRTC4_REGISTER_OFFSET
,
1507 EVERGREEN_CRTC5_REGISTER_OFFSET
,
1512 /* DCE8 has 7 audio blocks tied to DIG encoders */
1513 /* DCE6 has 6 audio blocks tied to DIG encoders */
1514 /* DCE4/5 has 6 audio blocks tied to DIG encoders */
1515 /* DCE4.1 has 2 audio blocks tied to DIG encoders */
1516 if (ASIC_IS_DCE8(rdev
))
1518 else if (ASIC_IS_DCE6(rdev
))
1520 else if (ASIC_IS_DCE5(rdev
))
1522 else if (ASIC_IS_DCE41(rdev
))
1527 BUG_ON(num_afmt
> ARRAY_SIZE(eg_offsets
));
1528 for (i
= 0; i
< num_afmt
; i
++) {
1529 rdev
->mode_info
.afmt
[i
] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1530 if (rdev
->mode_info
.afmt
[i
]) {
1531 rdev
->mode_info
.afmt
[i
]->offset
= eg_offsets
[i
];
1532 rdev
->mode_info
.afmt
[i
]->id
= i
;
1535 } else if (ASIC_IS_DCE3(rdev
)) {
1536 /* DCE3.x has 2 audio blocks tied to DIG encoders */
1537 rdev
->mode_info
.afmt
[0] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1538 if (rdev
->mode_info
.afmt
[0]) {
1539 rdev
->mode_info
.afmt
[0]->offset
= DCE3_HDMI_OFFSET0
;
1540 rdev
->mode_info
.afmt
[0]->id
= 0;
1542 rdev
->mode_info
.afmt
[1] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1543 if (rdev
->mode_info
.afmt
[1]) {
1544 rdev
->mode_info
.afmt
[1]->offset
= DCE3_HDMI_OFFSET1
;
1545 rdev
->mode_info
.afmt
[1]->id
= 1;
1547 } else if (ASIC_IS_DCE2(rdev
)) {
1548 /* DCE2 has at least 1 routable audio block */
1549 rdev
->mode_info
.afmt
[0] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1550 if (rdev
->mode_info
.afmt
[0]) {
1551 rdev
->mode_info
.afmt
[0]->offset
= DCE2_HDMI_OFFSET0
;
1552 rdev
->mode_info
.afmt
[0]->id
= 0;
1554 /* r6xx has 2 routable audio blocks */
1555 if (rdev
->family
>= CHIP_R600
) {
1556 rdev
->mode_info
.afmt
[1] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1557 if (rdev
->mode_info
.afmt
[1]) {
1558 rdev
->mode_info
.afmt
[1]->offset
= DCE2_HDMI_OFFSET1
;
1559 rdev
->mode_info
.afmt
[1]->id
= 1;
1565 static void radeon_afmt_fini(struct radeon_device
*rdev
)
1569 for (i
= 0; i
< RADEON_MAX_AFMT_BLOCKS
; i
++) {
1570 kfree(rdev
->mode_info
.afmt
[i
]);
1571 rdev
->mode_info
.afmt
[i
] = NULL
;
1575 int radeon_modeset_init(struct radeon_device
*rdev
)
1580 drm_mode_config_init(rdev
->ddev
);
1581 rdev
->mode_info
.mode_config_initialized
= true;
1583 rdev
->ddev
->mode_config
.funcs
= &radeon_mode_funcs
;
1585 if (radeon_use_pflipirq
== 2 && rdev
->family
>= CHIP_R600
)
1586 rdev
->ddev
->mode_config
.async_page_flip
= true;
1588 if (ASIC_IS_DCE5(rdev
)) {
1589 rdev
->ddev
->mode_config
.max_width
= 16384;
1590 rdev
->ddev
->mode_config
.max_height
= 16384;
1591 } else if (ASIC_IS_AVIVO(rdev
)) {
1592 rdev
->ddev
->mode_config
.max_width
= 8192;
1593 rdev
->ddev
->mode_config
.max_height
= 8192;
1595 rdev
->ddev
->mode_config
.max_width
= 4096;
1596 rdev
->ddev
->mode_config
.max_height
= 4096;
1599 rdev
->ddev
->mode_config
.preferred_depth
= 24;
1600 rdev
->ddev
->mode_config
.prefer_shadow
= 1;
1602 rdev
->ddev
->mode_config
.fb_base
= rdev
->mc
.aper_base
;
1604 ret
= radeon_modeset_create_props(rdev
);
1609 /* init i2c buses */
1610 radeon_i2c_init(rdev
);
1612 /* check combios for a valid hardcoded EDID - Sun servers */
1613 if (!rdev
->is_atom_bios
) {
1614 /* check for hardcoded EDID in BIOS */
1615 radeon_combios_check_hardcoded_edid(rdev
);
1618 /* allocate crtcs */
1619 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1620 radeon_crtc_init(rdev
->ddev
, i
);
1623 /* okay we should have all the bios connectors */
1624 ret
= radeon_setup_enc_conn(rdev
->ddev
);
1629 /* init dig PHYs, disp eng pll */
1630 if (rdev
->is_atom_bios
) {
1631 radeon_atom_encoder_init(rdev
);
1632 radeon_atom_disp_eng_pll_init(rdev
);
1635 /* initialize hpd */
1636 radeon_hpd_init(rdev
);
1639 radeon_afmt_init(rdev
);
1641 radeon_fbdev_init(rdev
);
1642 drm_kms_helper_poll_init(rdev
->ddev
);
1644 /* do pm late init */
1645 ret
= radeon_pm_late_init(rdev
);
1650 void radeon_modeset_fini(struct radeon_device
*rdev
)
1652 if (rdev
->mode_info
.mode_config_initialized
) {
1653 drm_kms_helper_poll_fini(rdev
->ddev
);
1654 radeon_hpd_fini(rdev
);
1655 drm_helper_force_disable_all(rdev
->ddev
);
1656 radeon_fbdev_fini(rdev
);
1657 radeon_afmt_fini(rdev
);
1658 drm_mode_config_cleanup(rdev
->ddev
);
1659 rdev
->mode_info
.mode_config_initialized
= false;
1662 kfree(rdev
->mode_info
.bios_hardcoded_edid
);
1664 /* free i2c buses */
1665 radeon_i2c_fini(rdev
);
1668 static bool is_hdtv_mode(const struct drm_display_mode
*mode
)
1670 /* try and guess if this is a tv or a monitor */
1671 if ((mode
->vdisplay
== 480 && mode
->hdisplay
== 720) || /* 480p */
1672 (mode
->vdisplay
== 576) || /* 576p */
1673 (mode
->vdisplay
== 720) || /* 720p */
1674 (mode
->vdisplay
== 1080)) /* 1080p */
1680 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc
*crtc
,
1681 const struct drm_display_mode
*mode
,
1682 struct drm_display_mode
*adjusted_mode
)
1684 struct drm_device
*dev
= crtc
->dev
;
1685 struct radeon_device
*rdev
= dev
->dev_private
;
1686 struct drm_encoder
*encoder
;
1687 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
1688 struct radeon_encoder
*radeon_encoder
;
1689 struct drm_connector
*connector
;
1691 u32 src_v
= 1, dst_v
= 1;
1692 u32 src_h
= 1, dst_h
= 1;
1694 radeon_crtc
->h_border
= 0;
1695 radeon_crtc
->v_border
= 0;
1697 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
1698 if (encoder
->crtc
!= crtc
)
1700 radeon_encoder
= to_radeon_encoder(encoder
);
1701 connector
= radeon_get_connector_for_encoder(encoder
);
1705 if (radeon_encoder
->rmx_type
== RMX_OFF
)
1706 radeon_crtc
->rmx_type
= RMX_OFF
;
1707 else if (mode
->hdisplay
< radeon_encoder
->native_mode
.hdisplay
||
1708 mode
->vdisplay
< radeon_encoder
->native_mode
.vdisplay
)
1709 radeon_crtc
->rmx_type
= radeon_encoder
->rmx_type
;
1711 radeon_crtc
->rmx_type
= RMX_OFF
;
1712 /* copy native mode */
1713 memcpy(&radeon_crtc
->native_mode
,
1714 &radeon_encoder
->native_mode
,
1715 sizeof(struct drm_display_mode
));
1716 src_v
= crtc
->mode
.vdisplay
;
1717 dst_v
= radeon_crtc
->native_mode
.vdisplay
;
1718 src_h
= crtc
->mode
.hdisplay
;
1719 dst_h
= radeon_crtc
->native_mode
.hdisplay
;
1721 /* fix up for overscan on hdmi */
1722 if (ASIC_IS_AVIVO(rdev
) &&
1723 (!(mode
->flags
& DRM_MODE_FLAG_INTERLACE
)) &&
1724 ((radeon_encoder
->underscan_type
== UNDERSCAN_ON
) ||
1725 ((radeon_encoder
->underscan_type
== UNDERSCAN_AUTO
) &&
1726 drm_detect_hdmi_monitor(radeon_connector_edid(connector
)) &&
1727 is_hdtv_mode(mode
)))) {
1728 if (radeon_encoder
->underscan_hborder
!= 0)
1729 radeon_crtc
->h_border
= radeon_encoder
->underscan_hborder
;
1731 radeon_crtc
->h_border
= (mode
->hdisplay
>> 5) + 16;
1732 if (radeon_encoder
->underscan_vborder
!= 0)
1733 radeon_crtc
->v_border
= radeon_encoder
->underscan_vborder
;
1735 radeon_crtc
->v_border
= (mode
->vdisplay
>> 5) + 16;
1736 radeon_crtc
->rmx_type
= RMX_FULL
;
1737 src_v
= crtc
->mode
.vdisplay
;
1738 dst_v
= crtc
->mode
.vdisplay
- (radeon_crtc
->v_border
* 2);
1739 src_h
= crtc
->mode
.hdisplay
;
1740 dst_h
= crtc
->mode
.hdisplay
- (radeon_crtc
->h_border
* 2);
1744 if (radeon_crtc
->rmx_type
!= radeon_encoder
->rmx_type
) {
1745 /* WARNING: Right now this can't happen but
1746 * in the future we need to check that scaling
1747 * are consistent across different encoder
1748 * (ie all encoder can work with the same
1751 DRM_ERROR("Scaling not consistent across encoder.\n");
1756 if (radeon_crtc
->rmx_type
!= RMX_OFF
) {
1758 a
.full
= dfixed_const(src_v
);
1759 b
.full
= dfixed_const(dst_v
);
1760 radeon_crtc
->vsc
.full
= dfixed_div(a
, b
);
1761 a
.full
= dfixed_const(src_h
);
1762 b
.full
= dfixed_const(dst_h
);
1763 radeon_crtc
->hsc
.full
= dfixed_div(a
, b
);
1765 radeon_crtc
->vsc
.full
= dfixed_const(1);
1766 radeon_crtc
->hsc
.full
= dfixed_const(1);
1772 * Retrieve current video scanout position of crtc on a given gpu, and
1773 * an optional accurate timestamp of when query happened.
1775 * \param dev Device to query.
1776 * \param crtc Crtc to query.
1777 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1778 * For driver internal use only also supports these flags:
1780 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1781 * of a fudged earlier start of vblank.
1783 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1784 * fudged earlier start of vblank in *vpos and the distance
1785 * to true start of vblank in *hpos.
1787 * \param *vpos Location where vertical scanout position should be stored.
1788 * \param *hpos Location where horizontal scanout position should go.
1789 * \param *stime Target location for timestamp taken immediately before
1790 * scanout position query. Can be NULL to skip timestamp.
1791 * \param *etime Target location for timestamp taken immediately after
1792 * scanout position query. Can be NULL to skip timestamp.
1794 * Returns vpos as a positive number while in active scanout area.
1795 * Returns vpos as a negative number inside vblank, counting the number
1796 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1797 * until start of active scanout / end of vblank."
1799 * \return Flags, or'ed together as follows:
1801 * DRM_SCANOUTPOS_VALID = Query successful.
1802 * DRM_SCANOUTPOS_INVBL = Inside vblank.
1803 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1804 * this flag means that returned position may be offset by a constant but
1805 * unknown small number of scanlines wrt. real scanout position.
1808 int radeon_get_crtc_scanoutpos(struct drm_device
*dev
, unsigned int pipe
,
1809 unsigned int flags
, int *vpos
, int *hpos
,
1810 ktime_t
*stime
, ktime_t
*etime
,
1811 const struct drm_display_mode
*mode
)
1813 u32 stat_crtc
= 0, vbl
= 0, position
= 0;
1814 int vbl_start
, vbl_end
, vtotal
, ret
= 0;
1817 struct radeon_device
*rdev
= dev
->dev_private
;
1819 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1821 /* Get optional system timestamp before query. */
1823 *stime
= ktime_get();
1825 if (ASIC_IS_DCE4(rdev
)) {
1827 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1828 EVERGREEN_CRTC0_REGISTER_OFFSET
);
1829 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1830 EVERGREEN_CRTC0_REGISTER_OFFSET
);
1831 ret
|= DRM_SCANOUTPOS_VALID
;
1834 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1835 EVERGREEN_CRTC1_REGISTER_OFFSET
);
1836 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1837 EVERGREEN_CRTC1_REGISTER_OFFSET
);
1838 ret
|= DRM_SCANOUTPOS_VALID
;
1841 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1842 EVERGREEN_CRTC2_REGISTER_OFFSET
);
1843 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1844 EVERGREEN_CRTC2_REGISTER_OFFSET
);
1845 ret
|= DRM_SCANOUTPOS_VALID
;
1848 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1849 EVERGREEN_CRTC3_REGISTER_OFFSET
);
1850 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1851 EVERGREEN_CRTC3_REGISTER_OFFSET
);
1852 ret
|= DRM_SCANOUTPOS_VALID
;
1855 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1856 EVERGREEN_CRTC4_REGISTER_OFFSET
);
1857 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1858 EVERGREEN_CRTC4_REGISTER_OFFSET
);
1859 ret
|= DRM_SCANOUTPOS_VALID
;
1862 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1863 EVERGREEN_CRTC5_REGISTER_OFFSET
);
1864 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1865 EVERGREEN_CRTC5_REGISTER_OFFSET
);
1866 ret
|= DRM_SCANOUTPOS_VALID
;
1868 } else if (ASIC_IS_AVIVO(rdev
)) {
1870 vbl
= RREG32(AVIVO_D1CRTC_V_BLANK_START_END
);
1871 position
= RREG32(AVIVO_D1CRTC_STATUS_POSITION
);
1872 ret
|= DRM_SCANOUTPOS_VALID
;
1875 vbl
= RREG32(AVIVO_D2CRTC_V_BLANK_START_END
);
1876 position
= RREG32(AVIVO_D2CRTC_STATUS_POSITION
);
1877 ret
|= DRM_SCANOUTPOS_VALID
;
1880 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
1882 /* Assume vbl_end == 0, get vbl_start from
1885 vbl
= (RREG32(RADEON_CRTC_V_TOTAL_DISP
) &
1886 RADEON_CRTC_V_DISP
) >> RADEON_CRTC_V_DISP_SHIFT
;
1887 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
1888 position
= (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE
) >> 16) & RADEON_CRTC_V_TOTAL
;
1889 stat_crtc
= RREG32(RADEON_CRTC_STATUS
);
1890 if (!(stat_crtc
& 1))
1893 ret
|= DRM_SCANOUTPOS_VALID
;
1896 vbl
= (RREG32(RADEON_CRTC2_V_TOTAL_DISP
) &
1897 RADEON_CRTC_V_DISP
) >> RADEON_CRTC_V_DISP_SHIFT
;
1898 position
= (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE
) >> 16) & RADEON_CRTC_V_TOTAL
;
1899 stat_crtc
= RREG32(RADEON_CRTC2_STATUS
);
1900 if (!(stat_crtc
& 1))
1903 ret
|= DRM_SCANOUTPOS_VALID
;
1907 /* Get optional system timestamp after query. */
1909 *etime
= ktime_get();
1911 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1913 /* Decode into vertical and horizontal scanout position. */
1914 *vpos
= position
& 0x1fff;
1915 *hpos
= (position
>> 16) & 0x1fff;
1917 /* Valid vblank area boundaries from gpu retrieved? */
1920 ret
|= DRM_SCANOUTPOS_ACCURATE
;
1921 vbl_start
= vbl
& 0x1fff;
1922 vbl_end
= (vbl
>> 16) & 0x1fff;
1925 /* No: Fake something reasonable which gives at least ok results. */
1926 vbl_start
= mode
->crtc_vdisplay
;
1930 /* Called from driver internal vblank counter query code? */
1931 if (flags
& GET_DISTANCE_TO_VBLANKSTART
) {
1932 /* Caller wants distance from real vbl_start in *hpos */
1933 *hpos
= *vpos
- vbl_start
;
1936 /* Fudge vblank to start a few scanlines earlier to handle the
1937 * problem that vblank irqs fire a few scanlines before start
1938 * of vblank. Some driver internal callers need the true vblank
1939 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1941 * The cause of the "early" vblank irq is that the irq is triggered
1942 * by the line buffer logic when the line buffer read position enters
1943 * the vblank, whereas our crtc scanout position naturally lags the
1944 * line buffer read position.
1946 if (!(flags
& USE_REAL_VBLANKSTART
))
1947 vbl_start
-= rdev
->mode_info
.crtcs
[pipe
]->lb_vblank_lead_lines
;
1949 /* Test scanout position against vblank region. */
1950 if ((*vpos
< vbl_start
) && (*vpos
>= vbl_end
))
1955 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
1957 /* Called from driver internal vblank counter query code? */
1958 if (flags
& GET_DISTANCE_TO_VBLANKSTART
) {
1959 /* Caller wants distance from fudged earlier vbl_start */
1964 /* Check if inside vblank area and apply corrective offsets:
1965 * vpos will then be >=0 in video scanout area, but negative
1966 * within vblank area, counting down the number of lines until
1970 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1971 if (in_vbl
&& (*vpos
>= vbl_start
)) {
1972 vtotal
= mode
->crtc_vtotal
;
1973 *vpos
= *vpos
- vtotal
;
1976 /* Correct for shifted end of vbl at vbl_end. */
1977 *vpos
= *vpos
- vbl_end
;