2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <drm/radeon_drm.h>
31 #include <asm/div64.h>
33 #include <linux/pm_runtime.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_fb_helper.h>
36 #include <drm/drm_plane_helper.h>
37 #include <drm/drm_edid.h>
39 #include <linux/gcd.h>
41 static void avivo_crtc_load_lut(struct drm_crtc
*crtc
)
43 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
44 struct drm_device
*dev
= crtc
->dev
;
45 struct radeon_device
*rdev
= dev
->dev_private
;
49 DRM_DEBUG_KMS("%d\n", radeon_crtc
->crtc_id
);
50 WREG32(AVIVO_DC_LUTA_CONTROL
+ radeon_crtc
->crtc_offset
, 0);
52 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0);
53 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0);
54 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0);
56 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0xffff);
57 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0xffff);
58 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0xffff);
60 WREG32(AVIVO_DC_LUT_RW_SELECT
, radeon_crtc
->crtc_id
);
61 WREG32(AVIVO_DC_LUT_RW_MODE
, 0);
62 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK
, 0x0000003f);
64 WREG8(AVIVO_DC_LUT_RW_INDEX
, 0);
65 r
= crtc
->gamma_store
;
66 g
= r
+ crtc
->gamma_size
;
67 b
= g
+ crtc
->gamma_size
;
68 for (i
= 0; i
< 256; i
++) {
69 WREG32(AVIVO_DC_LUT_30_COLOR
,
70 ((*r
++ & 0xffc0) << 14) |
71 ((*g
++ & 0xffc0) << 4) |
75 /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
76 WREG32_P(AVIVO_D1GRPH_LUT_SEL
+ radeon_crtc
->crtc_offset
, radeon_crtc
->crtc_id
, ~1);
79 static void dce4_crtc_load_lut(struct drm_crtc
*crtc
)
81 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
82 struct drm_device
*dev
= crtc
->dev
;
83 struct radeon_device
*rdev
= dev
->dev_private
;
87 DRM_DEBUG_KMS("%d\n", radeon_crtc
->crtc_id
);
88 WREG32(EVERGREEN_DC_LUT_CONTROL
+ radeon_crtc
->crtc_offset
, 0);
90 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0);
91 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0);
92 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0);
94 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0xffff);
95 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0xffff);
96 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0xffff);
98 WREG32(EVERGREEN_DC_LUT_RW_MODE
+ radeon_crtc
->crtc_offset
, 0);
99 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK
+ radeon_crtc
->crtc_offset
, 0x00000007);
101 WREG32(EVERGREEN_DC_LUT_RW_INDEX
+ radeon_crtc
->crtc_offset
, 0);
102 r
= crtc
->gamma_store
;
103 g
= r
+ crtc
->gamma_size
;
104 b
= g
+ crtc
->gamma_size
;
105 for (i
= 0; i
< 256; i
++) {
106 WREG32(EVERGREEN_DC_LUT_30_COLOR
+ radeon_crtc
->crtc_offset
,
107 ((*r
++ & 0xffc0) << 14) |
108 ((*g
++ & 0xffc0) << 4) |
113 static void dce5_crtc_load_lut(struct drm_crtc
*crtc
)
115 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
116 struct drm_device
*dev
= crtc
->dev
;
117 struct radeon_device
*rdev
= dev
->dev_private
;
121 DRM_DEBUG_KMS("%d\n", radeon_crtc
->crtc_id
);
123 WREG32(NI_INPUT_CSC_CONTROL
+ radeon_crtc
->crtc_offset
,
124 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS
) |
125 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS
)));
126 WREG32(NI_PRESCALE_GRPH_CONTROL
+ radeon_crtc
->crtc_offset
,
127 NI_GRPH_PRESCALE_BYPASS
);
128 WREG32(NI_PRESCALE_OVL_CONTROL
+ radeon_crtc
->crtc_offset
,
129 NI_OVL_PRESCALE_BYPASS
);
130 WREG32(NI_INPUT_GAMMA_CONTROL
+ radeon_crtc
->crtc_offset
,
131 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT
) |
132 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT
)));
134 WREG32(EVERGREEN_DC_LUT_CONTROL
+ radeon_crtc
->crtc_offset
, 0);
136 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0);
137 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0);
138 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0);
140 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE
+ radeon_crtc
->crtc_offset
, 0xffff);
141 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN
+ radeon_crtc
->crtc_offset
, 0xffff);
142 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED
+ radeon_crtc
->crtc_offset
, 0xffff);
144 WREG32(EVERGREEN_DC_LUT_RW_MODE
+ radeon_crtc
->crtc_offset
, 0);
145 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK
+ radeon_crtc
->crtc_offset
, 0x00000007);
147 WREG32(EVERGREEN_DC_LUT_RW_INDEX
+ radeon_crtc
->crtc_offset
, 0);
148 r
= crtc
->gamma_store
;
149 g
= r
+ crtc
->gamma_size
;
150 b
= g
+ crtc
->gamma_size
;
151 for (i
= 0; i
< 256; i
++) {
152 WREG32(EVERGREEN_DC_LUT_30_COLOR
+ radeon_crtc
->crtc_offset
,
153 ((*r
++ & 0xffc0) << 14) |
154 ((*g
++ & 0xffc0) << 4) |
158 WREG32(NI_DEGAMMA_CONTROL
+ radeon_crtc
->crtc_offset
,
159 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
) |
160 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
) |
161 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
) |
162 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS
)));
163 WREG32(NI_GAMUT_REMAP_CONTROL
+ radeon_crtc
->crtc_offset
,
164 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS
) |
165 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS
)));
166 WREG32(NI_REGAMMA_CONTROL
+ radeon_crtc
->crtc_offset
,
167 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS
) |
168 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS
)));
169 WREG32(NI_OUTPUT_CSC_CONTROL
+ radeon_crtc
->crtc_offset
,
170 (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc
->output_csc
) |
171 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS
)));
172 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
173 WREG32(0x6940 + radeon_crtc
->crtc_offset
, 0);
174 if (ASIC_IS_DCE8(rdev
)) {
175 /* XXX this only needs to be programmed once per crtc at startup,
176 * not sure where the best place for it is
178 WREG32(CIK_ALPHA_CONTROL
+ radeon_crtc
->crtc_offset
,
179 CIK_CURSOR_ALPHA_BLND_ENA
);
183 static void legacy_crtc_load_lut(struct drm_crtc
*crtc
)
185 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
186 struct drm_device
*dev
= crtc
->dev
;
187 struct radeon_device
*rdev
= dev
->dev_private
;
192 dac2_cntl
= RREG32(RADEON_DAC_CNTL2
);
193 if (radeon_crtc
->crtc_id
== 0)
194 dac2_cntl
&= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL
;
196 dac2_cntl
|= RADEON_DAC2_PALETTE_ACC_CTL
;
197 WREG32(RADEON_DAC_CNTL2
, dac2_cntl
);
199 WREG8(RADEON_PALETTE_INDEX
, 0);
200 r
= crtc
->gamma_store
;
201 g
= r
+ crtc
->gamma_size
;
202 b
= g
+ crtc
->gamma_size
;
203 for (i
= 0; i
< 256; i
++) {
204 WREG32(RADEON_PALETTE_30_DATA
,
205 ((*r
++ & 0xffc0) << 14) |
206 ((*g
++ & 0xffc0) << 4) |
211 void radeon_crtc_load_lut(struct drm_crtc
*crtc
)
213 struct drm_device
*dev
= crtc
->dev
;
214 struct radeon_device
*rdev
= dev
->dev_private
;
219 if (ASIC_IS_DCE5(rdev
))
220 dce5_crtc_load_lut(crtc
);
221 else if (ASIC_IS_DCE4(rdev
))
222 dce4_crtc_load_lut(crtc
);
223 else if (ASIC_IS_AVIVO(rdev
))
224 avivo_crtc_load_lut(crtc
);
226 legacy_crtc_load_lut(crtc
);
229 static int radeon_crtc_gamma_set(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
230 u16
*blue
, uint32_t size
,
231 struct drm_modeset_acquire_ctx
*ctx
)
233 radeon_crtc_load_lut(crtc
);
238 static void radeon_crtc_destroy(struct drm_crtc
*crtc
)
240 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
242 drm_crtc_cleanup(crtc
);
243 destroy_workqueue(radeon_crtc
->flip_queue
);
248 * radeon_unpin_work_func - unpin old buffer object
250 * @__work - kernel work item
252 * Unpin the old frame buffer object outside of the interrupt handler
254 static void radeon_unpin_work_func(struct work_struct
*__work
)
256 struct radeon_flip_work
*work
=
257 container_of(__work
, struct radeon_flip_work
, unpin_work
);
260 /* unpin of the old buffer */
261 r
= radeon_bo_reserve(work
->old_rbo
, false);
262 if (likely(r
== 0)) {
263 r
= radeon_bo_unpin(work
->old_rbo
);
264 if (unlikely(r
!= 0)) {
265 DRM_ERROR("failed to unpin buffer after flip\n");
267 radeon_bo_unreserve(work
->old_rbo
);
269 DRM_ERROR("failed to reserve buffer after flip\n");
271 drm_gem_object_put_unlocked(&work
->old_rbo
->gem_base
);
275 void radeon_crtc_handle_vblank(struct radeon_device
*rdev
, int crtc_id
)
277 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
282 /* can happen during initialization */
283 if (radeon_crtc
== NULL
)
286 /* Skip the pageflip completion check below (based on polling) on
287 * asics which reliably support hw pageflip completion irqs. pflip
288 * irqs are a reliable and race-free method of handling pageflip
289 * completion detection. A use_pflipirq module parameter < 2 allows
290 * to override this in case of asics with faulty pflip irqs.
291 * A module parameter of 0 would only use this polling based path,
292 * a parameter of 1 would use pflip irq only as a backup to this
293 * path, as in Linux 3.16.
295 if ((radeon_use_pflipirq
== 2) && ASIC_IS_DCE4(rdev
))
298 spin_lock_irqsave(&rdev
->ddev
->event_lock
, flags
);
299 if (radeon_crtc
->flip_status
!= RADEON_FLIP_SUBMITTED
) {
300 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
301 "RADEON_FLIP_SUBMITTED(%d)\n",
302 radeon_crtc
->flip_status
,
303 RADEON_FLIP_SUBMITTED
);
304 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
308 update_pending
= radeon_page_flip_pending(rdev
, crtc_id
);
310 /* Has the pageflip already completed in crtc, or is it certain
311 * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides
312 * distance to start of "fudged earlier" vblank in vpos, distance to
313 * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in
314 * the last few scanlines before start of real vblank, where the vblank
315 * irq can fire, so we have sampled update_pending a bit too early and
316 * know the flip will complete at leading edge of the upcoming real
317 * vblank. On pre-AVIVO hardware, flips also complete inside the real
318 * vblank, not only at leading edge, so if update_pending for hpos >= 0
319 * == inside real vblank, the flip will complete almost immediately.
320 * Note that this method of completion handling is still not 100% race
321 * free, as we could execute before the radeon_flip_work_func managed
322 * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op,
323 * but the flip still gets programmed into hw and completed during
324 * vblank, leading to a delayed emission of the flip completion event.
325 * This applies at least to pre-AVIVO hardware, where flips are always
326 * completing inside vblank, not only at leading edge of vblank.
328 if (update_pending
&&
329 (DRM_SCANOUTPOS_VALID
&
330 radeon_get_crtc_scanoutpos(rdev
->ddev
, crtc_id
,
331 GET_DISTANCE_TO_VBLANKSTART
,
332 &vpos
, &hpos
, NULL
, NULL
,
333 &rdev
->mode_info
.crtcs
[crtc_id
]->base
.hwmode
)) &&
334 ((vpos
>= 0 && hpos
< 0) || (hpos
>= 0 && !ASIC_IS_AVIVO(rdev
)))) {
335 /* crtc didn't flip in this target vblank interval,
336 * but flip is pending in crtc. Based on the current
337 * scanout position we know that the current frame is
338 * (nearly) complete and the flip will (likely)
339 * complete before the start of the next frame.
343 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
345 radeon_crtc_handle_flip(rdev
, crtc_id
);
349 * radeon_crtc_handle_flip - page flip completed
351 * @rdev: radeon device pointer
352 * @crtc_id: crtc number this event is for
354 * Called when we are sure that a page flip for this crtc is completed.
356 void radeon_crtc_handle_flip(struct radeon_device
*rdev
, int crtc_id
)
358 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
359 struct radeon_flip_work
*work
;
362 /* this can happen at init */
363 if (radeon_crtc
== NULL
)
366 spin_lock_irqsave(&rdev
->ddev
->event_lock
, flags
);
367 work
= radeon_crtc
->flip_work
;
368 if (radeon_crtc
->flip_status
!= RADEON_FLIP_SUBMITTED
) {
369 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
370 "RADEON_FLIP_SUBMITTED(%d)\n",
371 radeon_crtc
->flip_status
,
372 RADEON_FLIP_SUBMITTED
);
373 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
377 /* Pageflip completed. Clean up. */
378 radeon_crtc
->flip_status
= RADEON_FLIP_NONE
;
379 radeon_crtc
->flip_work
= NULL
;
381 /* wakeup userspace */
383 drm_crtc_send_vblank_event(&radeon_crtc
->base
, work
->event
);
385 spin_unlock_irqrestore(&rdev
->ddev
->event_lock
, flags
);
387 drm_crtc_vblank_put(&radeon_crtc
->base
);
388 radeon_irq_kms_pflip_irq_put(rdev
, work
->crtc_id
);
389 queue_work(radeon_crtc
->flip_queue
, &work
->unpin_work
);
393 * radeon_flip_work_func - page flip framebuffer
395 * @work - kernel work item
397 * Wait for the buffer object to become idle and do the actual page flip
399 static void radeon_flip_work_func(struct work_struct
*__work
)
401 struct radeon_flip_work
*work
=
402 container_of(__work
, struct radeon_flip_work
, flip_work
);
403 struct radeon_device
*rdev
= work
->rdev
;
404 struct drm_device
*dev
= rdev
->ddev
;
405 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[work
->crtc_id
];
407 struct drm_crtc
*crtc
= &radeon_crtc
->base
;
412 down_read(&rdev
->exclusive_lock
);
414 struct radeon_fence
*fence
;
416 fence
= to_radeon_fence(work
->fence
);
417 if (fence
&& fence
->rdev
== rdev
) {
418 r
= radeon_fence_wait(fence
, false);
420 up_read(&rdev
->exclusive_lock
);
422 r
= radeon_gpu_reset(rdev
);
423 } while (r
== -EAGAIN
);
424 down_read(&rdev
->exclusive_lock
);
427 r
= dma_fence_wait(work
->fence
, false);
430 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r
);
432 /* We continue with the page flip even if we failed to wait on
433 * the fence, otherwise the DRM core and userspace will be
434 * confused about which BO the CRTC is scanning out
437 dma_fence_put(work
->fence
);
441 /* Wait until we're out of the vertical blank period before the one
442 * targeted by the flip. Always wait on pre DCE4 to avoid races with
443 * flip completion handling from vblank irq, as these old asics don't
444 * have reliable pageflip completion interrupts.
446 while (radeon_crtc
->enabled
&&
447 (radeon_get_crtc_scanoutpos(dev
, work
->crtc_id
, 0,
448 &vpos
, &hpos
, NULL
, NULL
,
450 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
451 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
452 (!ASIC_IS_AVIVO(rdev
) ||
453 ((int) (work
->target_vblank
-
454 dev
->driver
->get_vblank_counter(dev
, work
->crtc_id
)) > 0)))
455 usleep_range(1000, 2000);
457 /* We borrow the event spin lock for protecting flip_status */
458 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
460 /* set the proper interrupt */
461 radeon_irq_kms_pflip_irq_get(rdev
, radeon_crtc
->crtc_id
);
463 /* do the flip (mmio) */
464 radeon_page_flip(rdev
, radeon_crtc
->crtc_id
, work
->base
, work
->async
);
466 radeon_crtc
->flip_status
= RADEON_FLIP_SUBMITTED
;
467 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
468 up_read(&rdev
->exclusive_lock
);
471 static int radeon_crtc_page_flip_target(struct drm_crtc
*crtc
,
472 struct drm_framebuffer
*fb
,
473 struct drm_pending_vblank_event
*event
,
474 uint32_t page_flip_flags
,
476 struct drm_modeset_acquire_ctx
*ctx
)
478 struct drm_device
*dev
= crtc
->dev
;
479 struct radeon_device
*rdev
= dev
->dev_private
;
480 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
481 struct radeon_framebuffer
*old_radeon_fb
;
482 struct radeon_framebuffer
*new_radeon_fb
;
483 struct drm_gem_object
*obj
;
484 struct radeon_flip_work
*work
;
485 struct radeon_bo
*new_rbo
;
486 uint32_t tiling_flags
, pitch_pixels
;
491 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
495 INIT_WORK(&work
->flip_work
, radeon_flip_work_func
);
496 INIT_WORK(&work
->unpin_work
, radeon_unpin_work_func
);
499 work
->crtc_id
= radeon_crtc
->crtc_id
;
501 work
->async
= (page_flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
503 /* schedule unpin of the old buffer */
504 old_radeon_fb
= to_radeon_framebuffer(crtc
->primary
->fb
);
505 obj
= old_radeon_fb
->obj
;
507 /* take a reference to the old object */
508 drm_gem_object_get(obj
);
509 work
->old_rbo
= gem_to_radeon_bo(obj
);
511 new_radeon_fb
= to_radeon_framebuffer(fb
);
512 obj
= new_radeon_fb
->obj
;
513 new_rbo
= gem_to_radeon_bo(obj
);
515 /* pin the new buffer */
516 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
517 work
->old_rbo
, new_rbo
);
519 r
= radeon_bo_reserve(new_rbo
, false);
520 if (unlikely(r
!= 0)) {
521 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
524 /* Only 27 bit offset for legacy CRTC */
525 r
= radeon_bo_pin_restricted(new_rbo
, RADEON_GEM_DOMAIN_VRAM
,
526 ASIC_IS_AVIVO(rdev
) ? 0 : 1 << 27, &base
);
527 if (unlikely(r
!= 0)) {
528 radeon_bo_unreserve(new_rbo
);
530 DRM_ERROR("failed to pin new rbo buffer before flip\n");
533 work
->fence
= dma_fence_get(reservation_object_get_excl(new_rbo
->tbo
.resv
));
534 radeon_bo_get_tiling_flags(new_rbo
, &tiling_flags
, NULL
);
535 radeon_bo_unreserve(new_rbo
);
537 if (!ASIC_IS_AVIVO(rdev
)) {
538 /* crtc offset is from display base addr not FB location */
539 base
-= radeon_crtc
->legacy_display_base_addr
;
540 pitch_pixels
= fb
->pitches
[0] / fb
->format
->cpp
[0];
542 if (tiling_flags
& RADEON_TILING_MACRO
) {
543 if (ASIC_IS_R300(rdev
)) {
546 int byteshift
= fb
->format
->cpp
[0] * 8 >> 4;
547 int tile_addr
= (((crtc
->y
>> 3) * pitch_pixels
+ crtc
->x
) >> (8 - byteshift
)) << 11;
548 base
+= tile_addr
+ ((crtc
->x
<< byteshift
) % 256) + ((crtc
->y
% 8) << 8);
551 int offset
= crtc
->y
* pitch_pixels
+ crtc
->x
;
552 switch (fb
->format
->cpp
[0] * 8) {
573 work
->target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
574 dev
->driver
->get_vblank_counter(dev
, work
->crtc_id
);
576 /* We borrow the event spin lock for protecting flip_work */
577 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
579 if (radeon_crtc
->flip_status
!= RADEON_FLIP_NONE
) {
580 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
581 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
585 radeon_crtc
->flip_status
= RADEON_FLIP_PENDING
;
586 radeon_crtc
->flip_work
= work
;
589 crtc
->primary
->fb
= fb
;
591 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
593 queue_work(radeon_crtc
->flip_queue
, &work
->flip_work
);
597 if (unlikely(radeon_bo_reserve(new_rbo
, false) != 0)) {
598 DRM_ERROR("failed to reserve new rbo in error path\n");
601 if (unlikely(radeon_bo_unpin(new_rbo
) != 0)) {
602 DRM_ERROR("failed to unpin new rbo in error path\n");
604 radeon_bo_unreserve(new_rbo
);
607 drm_gem_object_put_unlocked(&work
->old_rbo
->gem_base
);
608 dma_fence_put(work
->fence
);
614 radeon_crtc_set_config(struct drm_mode_set
*set
,
615 struct drm_modeset_acquire_ctx
*ctx
)
617 struct drm_device
*dev
;
618 struct radeon_device
*rdev
;
619 struct drm_crtc
*crtc
;
623 if (!set
|| !set
->crtc
)
626 dev
= set
->crtc
->dev
;
628 ret
= pm_runtime_get_sync(dev
->dev
);
632 ret
= drm_crtc_helper_set_config(set
, ctx
);
634 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
638 pm_runtime_mark_last_busy(dev
->dev
);
640 rdev
= dev
->dev_private
;
641 /* if we have active crtcs and we don't have a power ref,
642 take the current one */
643 if (active
&& !rdev
->have_disp_power_ref
) {
644 rdev
->have_disp_power_ref
= true;
647 /* if we have no active crtcs, then drop the power ref
649 if (!active
&& rdev
->have_disp_power_ref
) {
650 pm_runtime_put_autosuspend(dev
->dev
);
651 rdev
->have_disp_power_ref
= false;
654 /* drop the power reference we got coming in here */
655 pm_runtime_put_autosuspend(dev
->dev
);
659 static const struct drm_crtc_funcs radeon_crtc_funcs
= {
660 .cursor_set2
= radeon_crtc_cursor_set2
,
661 .cursor_move
= radeon_crtc_cursor_move
,
662 .gamma_set
= radeon_crtc_gamma_set
,
663 .set_config
= radeon_crtc_set_config
,
664 .destroy
= radeon_crtc_destroy
,
665 .page_flip_target
= radeon_crtc_page_flip_target
,
668 static void radeon_crtc_init(struct drm_device
*dev
, int index
)
670 struct radeon_device
*rdev
= dev
->dev_private
;
671 struct radeon_crtc
*radeon_crtc
;
674 radeon_crtc
= kzalloc(sizeof(struct radeon_crtc
) + (RADEONFB_CONN_LIMIT
* sizeof(struct drm_connector
*)), GFP_KERNEL
);
675 if (radeon_crtc
== NULL
)
678 drm_crtc_init(dev
, &radeon_crtc
->base
, &radeon_crtc_funcs
);
680 drm_mode_crtc_set_gamma_size(&radeon_crtc
->base
, 256);
681 radeon_crtc
->crtc_id
= index
;
682 radeon_crtc
->flip_queue
= alloc_workqueue("radeon-crtc", WQ_HIGHPRI
, 0);
683 rdev
->mode_info
.crtcs
[index
] = radeon_crtc
;
685 if (rdev
->family
>= CHIP_BONAIRE
) {
686 radeon_crtc
->max_cursor_width
= CIK_CURSOR_WIDTH
;
687 radeon_crtc
->max_cursor_height
= CIK_CURSOR_HEIGHT
;
689 radeon_crtc
->max_cursor_width
= CURSOR_WIDTH
;
690 radeon_crtc
->max_cursor_height
= CURSOR_HEIGHT
;
692 dev
->mode_config
.cursor_width
= radeon_crtc
->max_cursor_width
;
693 dev
->mode_config
.cursor_height
= radeon_crtc
->max_cursor_height
;
696 radeon_crtc
->mode_set
.crtc
= &radeon_crtc
->base
;
697 radeon_crtc
->mode_set
.connectors
= (struct drm_connector
**)(radeon_crtc
+ 1);
698 radeon_crtc
->mode_set
.num_connectors
= 0;
701 for (i
= 0; i
< 256; i
++) {
702 radeon_crtc
->lut_r
[i
] = i
<< 2;
703 radeon_crtc
->lut_g
[i
] = i
<< 2;
704 radeon_crtc
->lut_b
[i
] = i
<< 2;
707 if (rdev
->is_atom_bios
&& (ASIC_IS_AVIVO(rdev
) || radeon_r4xx_atom
))
708 radeon_atombios_init_crtc(dev
, radeon_crtc
);
710 radeon_legacy_init_crtc(dev
, radeon_crtc
);
713 static const char *encoder_names
[38] = {
733 "INTERNAL_KLDSCP_TMDS1",
734 "INTERNAL_KLDSCP_DVO1",
735 "INTERNAL_KLDSCP_DAC1",
736 "INTERNAL_KLDSCP_DAC2",
745 "INTERNAL_KLDSCP_LVTMA",
754 static const char *hpd_names
[6] = {
763 static void radeon_print_display_setup(struct drm_device
*dev
)
765 struct drm_connector
*connector
;
766 struct radeon_connector
*radeon_connector
;
767 struct drm_encoder
*encoder
;
768 struct radeon_encoder
*radeon_encoder
;
772 DRM_INFO("Radeon Display Connectors\n");
773 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
774 radeon_connector
= to_radeon_connector(connector
);
775 DRM_INFO("Connector %d:\n", i
);
776 DRM_INFO(" %s\n", connector
->name
);
777 if (radeon_connector
->hpd
.hpd
!= RADEON_HPD_NONE
)
778 DRM_INFO(" %s\n", hpd_names
[radeon_connector
->hpd
.hpd
]);
779 if (radeon_connector
->ddc_bus
) {
780 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
781 radeon_connector
->ddc_bus
->rec
.mask_clk_reg
,
782 radeon_connector
->ddc_bus
->rec
.mask_data_reg
,
783 radeon_connector
->ddc_bus
->rec
.a_clk_reg
,
784 radeon_connector
->ddc_bus
->rec
.a_data_reg
,
785 radeon_connector
->ddc_bus
->rec
.en_clk_reg
,
786 radeon_connector
->ddc_bus
->rec
.en_data_reg
,
787 radeon_connector
->ddc_bus
->rec
.y_clk_reg
,
788 radeon_connector
->ddc_bus
->rec
.y_data_reg
);
789 if (radeon_connector
->router
.ddc_valid
)
790 DRM_INFO(" DDC Router 0x%x/0x%x\n",
791 radeon_connector
->router
.ddc_mux_control_pin
,
792 radeon_connector
->router
.ddc_mux_state
);
793 if (radeon_connector
->router
.cd_valid
)
794 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
795 radeon_connector
->router
.cd_mux_control_pin
,
796 radeon_connector
->router
.cd_mux_state
);
798 if (connector
->connector_type
== DRM_MODE_CONNECTOR_VGA
||
799 connector
->connector_type
== DRM_MODE_CONNECTOR_DVII
||
800 connector
->connector_type
== DRM_MODE_CONNECTOR_DVID
||
801 connector
->connector_type
== DRM_MODE_CONNECTOR_DVIA
||
802 connector
->connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
803 connector
->connector_type
== DRM_MODE_CONNECTOR_HDMIB
)
804 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
806 DRM_INFO(" Encoders:\n");
807 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
808 radeon_encoder
= to_radeon_encoder(encoder
);
809 devices
= radeon_encoder
->devices
& radeon_connector
->devices
;
811 if (devices
& ATOM_DEVICE_CRT1_SUPPORT
)
812 DRM_INFO(" CRT1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
813 if (devices
& ATOM_DEVICE_CRT2_SUPPORT
)
814 DRM_INFO(" CRT2: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
815 if (devices
& ATOM_DEVICE_LCD1_SUPPORT
)
816 DRM_INFO(" LCD1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
817 if (devices
& ATOM_DEVICE_DFP1_SUPPORT
)
818 DRM_INFO(" DFP1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
819 if (devices
& ATOM_DEVICE_DFP2_SUPPORT
)
820 DRM_INFO(" DFP2: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
821 if (devices
& ATOM_DEVICE_DFP3_SUPPORT
)
822 DRM_INFO(" DFP3: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
823 if (devices
& ATOM_DEVICE_DFP4_SUPPORT
)
824 DRM_INFO(" DFP4: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
825 if (devices
& ATOM_DEVICE_DFP5_SUPPORT
)
826 DRM_INFO(" DFP5: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
827 if (devices
& ATOM_DEVICE_DFP6_SUPPORT
)
828 DRM_INFO(" DFP6: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
829 if (devices
& ATOM_DEVICE_TV1_SUPPORT
)
830 DRM_INFO(" TV1: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
831 if (devices
& ATOM_DEVICE_CV_SUPPORT
)
832 DRM_INFO(" CV: %s\n", encoder_names
[radeon_encoder
->encoder_id
]);
839 static bool radeon_setup_enc_conn(struct drm_device
*dev
)
841 struct radeon_device
*rdev
= dev
->dev_private
;
845 if (rdev
->is_atom_bios
) {
846 ret
= radeon_get_atom_connector_info_from_supported_devices_table(dev
);
848 ret
= radeon_get_atom_connector_info_from_object_table(dev
);
850 ret
= radeon_get_legacy_connector_info_from_bios(dev
);
852 ret
= radeon_get_legacy_connector_info_from_table(dev
);
855 if (!ASIC_IS_AVIVO(rdev
))
856 ret
= radeon_get_legacy_connector_info_from_table(dev
);
859 radeon_setup_encoder_clones(dev
);
860 radeon_print_display_setup(dev
);
869 * avivo_reduce_ratio - fractional number reduction
873 * @nom_min: minimum value for nominator
874 * @den_min: minimum value for denominator
876 * Find the greatest common divisor and apply it on both nominator and
877 * denominator, but make nominator and denominator are at least as large
878 * as their minimum values.
880 static void avivo_reduce_ratio(unsigned *nom
, unsigned *den
,
881 unsigned nom_min
, unsigned den_min
)
885 /* reduce the numbers to a simpler ratio */
886 tmp
= gcd(*nom
, *den
);
890 /* make sure nominator is large enough */
891 if (*nom
< nom_min
) {
892 tmp
= DIV_ROUND_UP(nom_min
, *nom
);
897 /* make sure the denominator is large enough */
898 if (*den
< den_min
) {
899 tmp
= DIV_ROUND_UP(den_min
, *den
);
906 * avivo_get_fb_ref_div - feedback and ref divider calculation
910 * @post_div: post divider
911 * @fb_div_max: feedback divider maximum
912 * @ref_div_max: reference divider maximum
913 * @fb_div: resulting feedback divider
914 * @ref_div: resulting reference divider
916 * Calculate feedback and reference divider for a given post divider. Makes
917 * sure we stay within the limits.
919 static void avivo_get_fb_ref_div(unsigned nom
, unsigned den
, unsigned post_div
,
920 unsigned fb_div_max
, unsigned ref_div_max
,
921 unsigned *fb_div
, unsigned *ref_div
)
923 /* limit reference * post divider to a maximum */
924 ref_div_max
= max(min(100 / post_div
, ref_div_max
), 1u);
926 /* get matching reference and feedback divider */
927 *ref_div
= min(max(DIV_ROUND_CLOSEST(den
, post_div
), 1u), ref_div_max
);
928 *fb_div
= DIV_ROUND_CLOSEST(nom
* *ref_div
* post_div
, den
);
930 /* limit fb divider to its maximum */
931 if (*fb_div
> fb_div_max
) {
932 *ref_div
= DIV_ROUND_CLOSEST(*ref_div
* fb_div_max
, *fb_div
);
933 *fb_div
= fb_div_max
;
938 * radeon_compute_pll_avivo - compute PLL paramaters
940 * @pll: information about the PLL
941 * @dot_clock_p: resulting pixel clock
942 * fb_div_p: resulting feedback divider
943 * frac_fb_div_p: fractional part of the feedback divider
944 * ref_div_p: resulting reference divider
945 * post_div_p: resulting reference divider
947 * Try to calculate the PLL parameters to generate the given frequency:
948 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
950 void radeon_compute_pll_avivo(struct radeon_pll
*pll
,
958 unsigned target_clock
= pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
?
961 unsigned fb_div_min
, fb_div_max
, fb_div
;
962 unsigned post_div_min
, post_div_max
, post_div
;
963 unsigned ref_div_min
, ref_div_max
, ref_div
;
964 unsigned post_div_best
, diff_best
;
967 /* determine allowed feedback divider range */
968 fb_div_min
= pll
->min_feedback_div
;
969 fb_div_max
= pll
->max_feedback_div
;
971 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
976 /* determine allowed ref divider range */
977 if (pll
->flags
& RADEON_PLL_USE_REF_DIV
)
978 ref_div_min
= pll
->reference_div
;
980 ref_div_min
= pll
->min_ref_div
;
982 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
&&
983 pll
->flags
& RADEON_PLL_USE_REF_DIV
)
984 ref_div_max
= pll
->reference_div
;
985 else if (pll
->flags
& RADEON_PLL_PREFER_MINM_OVER_MAXP
)
986 /* fix for problems on RS880 */
987 ref_div_max
= min(pll
->max_ref_div
, 7u);
989 ref_div_max
= pll
->max_ref_div
;
991 /* determine allowed post divider range */
992 if (pll
->flags
& RADEON_PLL_USE_POST_DIV
) {
993 post_div_min
= pll
->post_div
;
994 post_div_max
= pll
->post_div
;
996 unsigned vco_min
, vco_max
;
998 if (pll
->flags
& RADEON_PLL_IS_LCD
) {
999 vco_min
= pll
->lcd_pll_out_min
;
1000 vco_max
= pll
->lcd_pll_out_max
;
1002 vco_min
= pll
->pll_out_min
;
1003 vco_max
= pll
->pll_out_max
;
1006 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
1011 post_div_min
= vco_min
/ target_clock
;
1012 if ((target_clock
* post_div_min
) < vco_min
)
1014 if (post_div_min
< pll
->min_post_div
)
1015 post_div_min
= pll
->min_post_div
;
1017 post_div_max
= vco_max
/ target_clock
;
1018 if ((target_clock
* post_div_max
) > vco_max
)
1020 if (post_div_max
> pll
->max_post_div
)
1021 post_div_max
= pll
->max_post_div
;
1024 /* represent the searched ratio as fractional number */
1026 den
= pll
->reference_freq
;
1028 /* reduce the numbers to a simpler ratio */
1029 avivo_reduce_ratio(&nom
, &den
, fb_div_min
, post_div_min
);
1031 /* now search for a post divider */
1032 if (pll
->flags
& RADEON_PLL_PREFER_MINM_OVER_MAXP
)
1033 post_div_best
= post_div_min
;
1035 post_div_best
= post_div_max
;
1038 for (post_div
= post_div_min
; post_div
<= post_div_max
; ++post_div
) {
1040 avivo_get_fb_ref_div(nom
, den
, post_div
, fb_div_max
,
1041 ref_div_max
, &fb_div
, &ref_div
);
1042 diff
= abs(target_clock
- (pll
->reference_freq
* fb_div
) /
1043 (ref_div
* post_div
));
1045 if (diff
< diff_best
|| (diff
== diff_best
&&
1046 !(pll
->flags
& RADEON_PLL_PREFER_MINM_OVER_MAXP
))) {
1048 post_div_best
= post_div
;
1052 post_div
= post_div_best
;
1054 /* get the feedback and reference divider for the optimal value */
1055 avivo_get_fb_ref_div(nom
, den
, post_div
, fb_div_max
, ref_div_max
,
1058 /* reduce the numbers to a simpler ratio once more */
1059 /* this also makes sure that the reference divider is large enough */
1060 avivo_reduce_ratio(&fb_div
, &ref_div
, fb_div_min
, ref_div_min
);
1062 /* avoid high jitter with small fractional dividers */
1063 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
&& (fb_div
% 10)) {
1064 fb_div_min
= max(fb_div_min
, (9 - (fb_div
% 10)) * 20 + 50);
1065 if (fb_div
< fb_div_min
) {
1066 unsigned tmp
= DIV_ROUND_UP(fb_div_min
, fb_div
);
1072 /* and finally save the result */
1073 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
1074 *fb_div_p
= fb_div
/ 10;
1075 *frac_fb_div_p
= fb_div
% 10;
1081 *dot_clock_p
= ((pll
->reference_freq
* *fb_div_p
* 10) +
1082 (pll
->reference_freq
* *frac_fb_div_p
)) /
1083 (ref_div
* post_div
* 10);
1084 *ref_div_p
= ref_div
;
1085 *post_div_p
= post_div
;
1087 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1088 freq
, *dot_clock_p
* 10, *fb_div_p
, *frac_fb_div_p
,
1093 static inline uint32_t radeon_div(uint64_t n
, uint32_t d
)
1103 void radeon_compute_pll_legacy(struct radeon_pll
*pll
,
1105 uint32_t *dot_clock_p
,
1107 uint32_t *frac_fb_div_p
,
1108 uint32_t *ref_div_p
,
1109 uint32_t *post_div_p
)
1111 uint32_t min_ref_div
= pll
->min_ref_div
;
1112 uint32_t max_ref_div
= pll
->max_ref_div
;
1113 uint32_t min_post_div
= pll
->min_post_div
;
1114 uint32_t max_post_div
= pll
->max_post_div
;
1115 uint32_t min_fractional_feed_div
= 0;
1116 uint32_t max_fractional_feed_div
= 0;
1117 uint32_t best_vco
= pll
->best_vco
;
1118 uint32_t best_post_div
= 1;
1119 uint32_t best_ref_div
= 1;
1120 uint32_t best_feedback_div
= 1;
1121 uint32_t best_frac_feedback_div
= 0;
1122 uint32_t best_freq
= -1;
1123 uint32_t best_error
= 0xffffffff;
1124 uint32_t best_vco_diff
= 1;
1126 u32 pll_out_min
, pll_out_max
;
1128 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq
, pll
->min_ref_div
, pll
->max_ref_div
);
1131 if (pll
->flags
& RADEON_PLL_IS_LCD
) {
1132 pll_out_min
= pll
->lcd_pll_out_min
;
1133 pll_out_max
= pll
->lcd_pll_out_max
;
1135 pll_out_min
= pll
->pll_out_min
;
1136 pll_out_max
= pll
->pll_out_max
;
1139 if (pll_out_min
> 64800)
1140 pll_out_min
= 64800;
1142 if (pll
->flags
& RADEON_PLL_USE_REF_DIV
)
1143 min_ref_div
= max_ref_div
= pll
->reference_div
;
1145 while (min_ref_div
< max_ref_div
-1) {
1146 uint32_t mid
= (min_ref_div
+ max_ref_div
) / 2;
1147 uint32_t pll_in
= pll
->reference_freq
/ mid
;
1148 if (pll_in
< pll
->pll_in_min
)
1150 else if (pll_in
> pll
->pll_in_max
)
1157 if (pll
->flags
& RADEON_PLL_USE_POST_DIV
)
1158 min_post_div
= max_post_div
= pll
->post_div
;
1160 if (pll
->flags
& RADEON_PLL_USE_FRAC_FB_DIV
) {
1161 min_fractional_feed_div
= pll
->min_frac_feedback_div
;
1162 max_fractional_feed_div
= pll
->max_frac_feedback_div
;
1165 for (post_div
= max_post_div
; post_div
>= min_post_div
; --post_div
) {
1168 if ((pll
->flags
& RADEON_PLL_NO_ODD_POST_DIV
) && (post_div
& 1))
1171 /* legacy radeons only have a few post_divs */
1172 if (pll
->flags
& RADEON_PLL_LEGACY
) {
1173 if ((post_div
== 5) ||
1184 for (ref_div
= min_ref_div
; ref_div
<= max_ref_div
; ++ref_div
) {
1185 uint32_t feedback_div
, current_freq
= 0, error
, vco_diff
;
1186 uint32_t pll_in
= pll
->reference_freq
/ ref_div
;
1187 uint32_t min_feed_div
= pll
->min_feedback_div
;
1188 uint32_t max_feed_div
= pll
->max_feedback_div
+ 1;
1190 if (pll_in
< pll
->pll_in_min
|| pll_in
> pll
->pll_in_max
)
1193 while (min_feed_div
< max_feed_div
) {
1195 uint32_t min_frac_feed_div
= min_fractional_feed_div
;
1196 uint32_t max_frac_feed_div
= max_fractional_feed_div
+ 1;
1197 uint32_t frac_feedback_div
;
1200 feedback_div
= (min_feed_div
+ max_feed_div
) / 2;
1202 tmp
= (uint64_t)pll
->reference_freq
* feedback_div
;
1203 vco
= radeon_div(tmp
, ref_div
);
1205 if (vco
< pll_out_min
) {
1206 min_feed_div
= feedback_div
+ 1;
1208 } else if (vco
> pll_out_max
) {
1209 max_feed_div
= feedback_div
;
1213 while (min_frac_feed_div
< max_frac_feed_div
) {
1214 frac_feedback_div
= (min_frac_feed_div
+ max_frac_feed_div
) / 2;
1215 tmp
= (uint64_t)pll
->reference_freq
* 10000 * feedback_div
;
1216 tmp
+= (uint64_t)pll
->reference_freq
* 1000 * frac_feedback_div
;
1217 current_freq
= radeon_div(tmp
, ref_div
* post_div
);
1219 if (pll
->flags
& RADEON_PLL_PREFER_CLOSEST_LOWER
) {
1220 if (freq
< current_freq
)
1223 error
= freq
- current_freq
;
1225 error
= abs(current_freq
- freq
);
1226 vco_diff
= abs(vco
- best_vco
);
1228 if ((best_vco
== 0 && error
< best_error
) ||
1230 ((best_error
> 100 && error
< best_error
- 100) ||
1231 (abs(error
- best_error
) < 100 && vco_diff
< best_vco_diff
)))) {
1232 best_post_div
= post_div
;
1233 best_ref_div
= ref_div
;
1234 best_feedback_div
= feedback_div
;
1235 best_frac_feedback_div
= frac_feedback_div
;
1236 best_freq
= current_freq
;
1238 best_vco_diff
= vco_diff
;
1239 } else if (current_freq
== freq
) {
1240 if (best_freq
== -1) {
1241 best_post_div
= post_div
;
1242 best_ref_div
= ref_div
;
1243 best_feedback_div
= feedback_div
;
1244 best_frac_feedback_div
= frac_feedback_div
;
1245 best_freq
= current_freq
;
1247 best_vco_diff
= vco_diff
;
1248 } else if (((pll
->flags
& RADEON_PLL_PREFER_LOW_REF_DIV
) && (ref_div
< best_ref_div
)) ||
1249 ((pll
->flags
& RADEON_PLL_PREFER_HIGH_REF_DIV
) && (ref_div
> best_ref_div
)) ||
1250 ((pll
->flags
& RADEON_PLL_PREFER_LOW_FB_DIV
) && (feedback_div
< best_feedback_div
)) ||
1251 ((pll
->flags
& RADEON_PLL_PREFER_HIGH_FB_DIV
) && (feedback_div
> best_feedback_div
)) ||
1252 ((pll
->flags
& RADEON_PLL_PREFER_LOW_POST_DIV
) && (post_div
< best_post_div
)) ||
1253 ((pll
->flags
& RADEON_PLL_PREFER_HIGH_POST_DIV
) && (post_div
> best_post_div
))) {
1254 best_post_div
= post_div
;
1255 best_ref_div
= ref_div
;
1256 best_feedback_div
= feedback_div
;
1257 best_frac_feedback_div
= frac_feedback_div
;
1258 best_freq
= current_freq
;
1260 best_vco_diff
= vco_diff
;
1263 if (current_freq
< freq
)
1264 min_frac_feed_div
= frac_feedback_div
+ 1;
1266 max_frac_feed_div
= frac_feedback_div
;
1268 if (current_freq
< freq
)
1269 min_feed_div
= feedback_div
+ 1;
1271 max_feed_div
= feedback_div
;
1276 *dot_clock_p
= best_freq
/ 10000;
1277 *fb_div_p
= best_feedback_div
;
1278 *frac_fb_div_p
= best_frac_feedback_div
;
1279 *ref_div_p
= best_ref_div
;
1280 *post_div_p
= best_post_div
;
1281 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1283 best_freq
/ 1000, best_feedback_div
, best_frac_feedback_div
,
1284 best_ref_div
, best_post_div
);
1288 static void radeon_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
1290 struct radeon_framebuffer
*radeon_fb
= to_radeon_framebuffer(fb
);
1292 drm_gem_object_put_unlocked(radeon_fb
->obj
);
1293 drm_framebuffer_cleanup(fb
);
1297 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer
*fb
,
1298 struct drm_file
*file_priv
,
1299 unsigned int *handle
)
1301 struct radeon_framebuffer
*radeon_fb
= to_radeon_framebuffer(fb
);
1303 return drm_gem_handle_create(file_priv
, radeon_fb
->obj
, handle
);
1306 static const struct drm_framebuffer_funcs radeon_fb_funcs
= {
1307 .destroy
= radeon_user_framebuffer_destroy
,
1308 .create_handle
= radeon_user_framebuffer_create_handle
,
1312 radeon_framebuffer_init(struct drm_device
*dev
,
1313 struct radeon_framebuffer
*rfb
,
1314 const struct drm_mode_fb_cmd2
*mode_cmd
,
1315 struct drm_gem_object
*obj
)
1319 drm_helper_mode_fill_fb_struct(dev
, &rfb
->base
, mode_cmd
);
1320 ret
= drm_framebuffer_init(dev
, &rfb
->base
, &radeon_fb_funcs
);
1328 static struct drm_framebuffer
*
1329 radeon_user_framebuffer_create(struct drm_device
*dev
,
1330 struct drm_file
*file_priv
,
1331 const struct drm_mode_fb_cmd2
*mode_cmd
)
1333 struct drm_gem_object
*obj
;
1334 struct radeon_framebuffer
*radeon_fb
;
1337 obj
= drm_gem_object_lookup(file_priv
, mode_cmd
->handles
[0]);
1339 dev_err(&dev
->pdev
->dev
, "No GEM object associated to handle 0x%08X, "
1340 "can't create framebuffer\n", mode_cmd
->handles
[0]);
1341 return ERR_PTR(-ENOENT
);
1344 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1345 if (obj
->import_attach
) {
1346 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
1347 return ERR_PTR(-EINVAL
);
1350 radeon_fb
= kzalloc(sizeof(*radeon_fb
), GFP_KERNEL
);
1351 if (radeon_fb
== NULL
) {
1352 drm_gem_object_put_unlocked(obj
);
1353 return ERR_PTR(-ENOMEM
);
1356 ret
= radeon_framebuffer_init(dev
, radeon_fb
, mode_cmd
, obj
);
1359 drm_gem_object_put_unlocked(obj
);
1360 return ERR_PTR(ret
);
1363 return &radeon_fb
->base
;
1366 static const struct drm_mode_config_funcs radeon_mode_funcs
= {
1367 .fb_create
= radeon_user_framebuffer_create
,
1368 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1371 static const struct drm_prop_enum_list radeon_tmds_pll_enum_list
[] =
1376 static const struct drm_prop_enum_list radeon_tv_std_enum_list
[] =
1377 { { TV_STD_NTSC
, "ntsc" },
1378 { TV_STD_PAL
, "pal" },
1379 { TV_STD_PAL_M
, "pal-m" },
1380 { TV_STD_PAL_60
, "pal-60" },
1381 { TV_STD_NTSC_J
, "ntsc-j" },
1382 { TV_STD_SCART_PAL
, "scart-pal" },
1383 { TV_STD_PAL_CN
, "pal-cn" },
1384 { TV_STD_SECAM
, "secam" },
1387 static const struct drm_prop_enum_list radeon_underscan_enum_list
[] =
1388 { { UNDERSCAN_OFF
, "off" },
1389 { UNDERSCAN_ON
, "on" },
1390 { UNDERSCAN_AUTO
, "auto" },
1393 static const struct drm_prop_enum_list radeon_audio_enum_list
[] =
1394 { { RADEON_AUDIO_DISABLE
, "off" },
1395 { RADEON_AUDIO_ENABLE
, "on" },
1396 { RADEON_AUDIO_AUTO
, "auto" },
1399 /* XXX support different dither options? spatial, temporal, both, etc. */
1400 static const struct drm_prop_enum_list radeon_dither_enum_list
[] =
1401 { { RADEON_FMT_DITHER_DISABLE
, "off" },
1402 { RADEON_FMT_DITHER_ENABLE
, "on" },
1405 static const struct drm_prop_enum_list radeon_output_csc_enum_list
[] =
1406 { { RADEON_OUTPUT_CSC_BYPASS
, "bypass" },
1407 { RADEON_OUTPUT_CSC_TVRGB
, "tvrgb" },
1408 { RADEON_OUTPUT_CSC_YCBCR601
, "ycbcr601" },
1409 { RADEON_OUTPUT_CSC_YCBCR709
, "ycbcr709" },
1412 static int radeon_modeset_create_props(struct radeon_device
*rdev
)
1416 if (rdev
->is_atom_bios
) {
1417 rdev
->mode_info
.coherent_mode_property
=
1418 drm_property_create_range(rdev
->ddev
, 0 , "coherent", 0, 1);
1419 if (!rdev
->mode_info
.coherent_mode_property
)
1423 if (!ASIC_IS_AVIVO(rdev
)) {
1424 sz
= ARRAY_SIZE(radeon_tmds_pll_enum_list
);
1425 rdev
->mode_info
.tmds_pll_property
=
1426 drm_property_create_enum(rdev
->ddev
, 0,
1428 radeon_tmds_pll_enum_list
, sz
);
1431 rdev
->mode_info
.load_detect_property
=
1432 drm_property_create_range(rdev
->ddev
, 0, "load detection", 0, 1);
1433 if (!rdev
->mode_info
.load_detect_property
)
1436 drm_mode_create_scaling_mode_property(rdev
->ddev
);
1438 sz
= ARRAY_SIZE(radeon_tv_std_enum_list
);
1439 rdev
->mode_info
.tv_std_property
=
1440 drm_property_create_enum(rdev
->ddev
, 0,
1442 radeon_tv_std_enum_list
, sz
);
1444 sz
= ARRAY_SIZE(radeon_underscan_enum_list
);
1445 rdev
->mode_info
.underscan_property
=
1446 drm_property_create_enum(rdev
->ddev
, 0,
1448 radeon_underscan_enum_list
, sz
);
1450 rdev
->mode_info
.underscan_hborder_property
=
1451 drm_property_create_range(rdev
->ddev
, 0,
1452 "underscan hborder", 0, 128);
1453 if (!rdev
->mode_info
.underscan_hborder_property
)
1456 rdev
->mode_info
.underscan_vborder_property
=
1457 drm_property_create_range(rdev
->ddev
, 0,
1458 "underscan vborder", 0, 128);
1459 if (!rdev
->mode_info
.underscan_vborder_property
)
1462 sz
= ARRAY_SIZE(radeon_audio_enum_list
);
1463 rdev
->mode_info
.audio_property
=
1464 drm_property_create_enum(rdev
->ddev
, 0,
1466 radeon_audio_enum_list
, sz
);
1468 sz
= ARRAY_SIZE(radeon_dither_enum_list
);
1469 rdev
->mode_info
.dither_property
=
1470 drm_property_create_enum(rdev
->ddev
, 0,
1472 radeon_dither_enum_list
, sz
);
1474 sz
= ARRAY_SIZE(radeon_output_csc_enum_list
);
1475 rdev
->mode_info
.output_csc_property
=
1476 drm_property_create_enum(rdev
->ddev
, 0,
1478 radeon_output_csc_enum_list
, sz
);
1483 void radeon_update_display_priority(struct radeon_device
*rdev
)
1485 /* adjustment options for the display watermarks */
1486 if ((radeon_disp_priority
== 0) || (radeon_disp_priority
> 2)) {
1487 /* set display priority to high for r3xx, rv515 chips
1488 * this avoids flickering due to underflow to the
1489 * display controllers during heavy acceleration.
1490 * Don't force high on rs4xx igp chips as it seems to
1491 * affect the sound card. See kernel bug 15982.
1493 if ((ASIC_IS_R300(rdev
) || (rdev
->family
== CHIP_RV515
)) &&
1494 !(rdev
->flags
& RADEON_IS_IGP
))
1495 rdev
->disp_priority
= 2;
1497 rdev
->disp_priority
= 0;
1499 rdev
->disp_priority
= radeon_disp_priority
;
1504 * Allocate hdmi structs and determine register offsets
1506 static void radeon_afmt_init(struct radeon_device
*rdev
)
1510 for (i
= 0; i
< RADEON_MAX_AFMT_BLOCKS
; i
++)
1511 rdev
->mode_info
.afmt
[i
] = NULL
;
1513 if (ASIC_IS_NODCE(rdev
)) {
1515 } else if (ASIC_IS_DCE4(rdev
)) {
1516 static uint32_t eg_offsets
[] = {
1517 EVERGREEN_CRTC0_REGISTER_OFFSET
,
1518 EVERGREEN_CRTC1_REGISTER_OFFSET
,
1519 EVERGREEN_CRTC2_REGISTER_OFFSET
,
1520 EVERGREEN_CRTC3_REGISTER_OFFSET
,
1521 EVERGREEN_CRTC4_REGISTER_OFFSET
,
1522 EVERGREEN_CRTC5_REGISTER_OFFSET
,
1527 /* DCE8 has 7 audio blocks tied to DIG encoders */
1528 /* DCE6 has 6 audio blocks tied to DIG encoders */
1529 /* DCE4/5 has 6 audio blocks tied to DIG encoders */
1530 /* DCE4.1 has 2 audio blocks tied to DIG encoders */
1531 if (ASIC_IS_DCE8(rdev
))
1533 else if (ASIC_IS_DCE6(rdev
))
1535 else if (ASIC_IS_DCE5(rdev
))
1537 else if (ASIC_IS_DCE41(rdev
))
1542 BUG_ON(num_afmt
> ARRAY_SIZE(eg_offsets
));
1543 for (i
= 0; i
< num_afmt
; i
++) {
1544 rdev
->mode_info
.afmt
[i
] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1545 if (rdev
->mode_info
.afmt
[i
]) {
1546 rdev
->mode_info
.afmt
[i
]->offset
= eg_offsets
[i
];
1547 rdev
->mode_info
.afmt
[i
]->id
= i
;
1550 } else if (ASIC_IS_DCE3(rdev
)) {
1551 /* DCE3.x has 2 audio blocks tied to DIG encoders */
1552 rdev
->mode_info
.afmt
[0] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1553 if (rdev
->mode_info
.afmt
[0]) {
1554 rdev
->mode_info
.afmt
[0]->offset
= DCE3_HDMI_OFFSET0
;
1555 rdev
->mode_info
.afmt
[0]->id
= 0;
1557 rdev
->mode_info
.afmt
[1] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1558 if (rdev
->mode_info
.afmt
[1]) {
1559 rdev
->mode_info
.afmt
[1]->offset
= DCE3_HDMI_OFFSET1
;
1560 rdev
->mode_info
.afmt
[1]->id
= 1;
1562 } else if (ASIC_IS_DCE2(rdev
)) {
1563 /* DCE2 has at least 1 routable audio block */
1564 rdev
->mode_info
.afmt
[0] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1565 if (rdev
->mode_info
.afmt
[0]) {
1566 rdev
->mode_info
.afmt
[0]->offset
= DCE2_HDMI_OFFSET0
;
1567 rdev
->mode_info
.afmt
[0]->id
= 0;
1569 /* r6xx has 2 routable audio blocks */
1570 if (rdev
->family
>= CHIP_R600
) {
1571 rdev
->mode_info
.afmt
[1] = kzalloc(sizeof(struct radeon_afmt
), GFP_KERNEL
);
1572 if (rdev
->mode_info
.afmt
[1]) {
1573 rdev
->mode_info
.afmt
[1]->offset
= DCE2_HDMI_OFFSET1
;
1574 rdev
->mode_info
.afmt
[1]->id
= 1;
1580 static void radeon_afmt_fini(struct radeon_device
*rdev
)
1584 for (i
= 0; i
< RADEON_MAX_AFMT_BLOCKS
; i
++) {
1585 kfree(rdev
->mode_info
.afmt
[i
]);
1586 rdev
->mode_info
.afmt
[i
] = NULL
;
1590 int radeon_modeset_init(struct radeon_device
*rdev
)
1595 drm_mode_config_init(rdev
->ddev
);
1596 rdev
->mode_info
.mode_config_initialized
= true;
1598 rdev
->ddev
->mode_config
.funcs
= &radeon_mode_funcs
;
1600 if (radeon_use_pflipirq
== 2 && rdev
->family
>= CHIP_R600
)
1601 rdev
->ddev
->mode_config
.async_page_flip
= true;
1603 if (ASIC_IS_DCE5(rdev
)) {
1604 rdev
->ddev
->mode_config
.max_width
= 16384;
1605 rdev
->ddev
->mode_config
.max_height
= 16384;
1606 } else if (ASIC_IS_AVIVO(rdev
)) {
1607 rdev
->ddev
->mode_config
.max_width
= 8192;
1608 rdev
->ddev
->mode_config
.max_height
= 8192;
1610 rdev
->ddev
->mode_config
.max_width
= 4096;
1611 rdev
->ddev
->mode_config
.max_height
= 4096;
1614 rdev
->ddev
->mode_config
.preferred_depth
= 24;
1615 rdev
->ddev
->mode_config
.prefer_shadow
= 1;
1617 rdev
->ddev
->mode_config
.fb_base
= rdev
->mc
.aper_base
;
1619 ret
= radeon_modeset_create_props(rdev
);
1624 /* init i2c buses */
1625 radeon_i2c_init(rdev
);
1627 /* check combios for a valid hardcoded EDID - Sun servers */
1628 if (!rdev
->is_atom_bios
) {
1629 /* check for hardcoded EDID in BIOS */
1630 radeon_combios_check_hardcoded_edid(rdev
);
1633 /* allocate crtcs */
1634 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1635 radeon_crtc_init(rdev
->ddev
, i
);
1638 /* okay we should have all the bios connectors */
1639 ret
= radeon_setup_enc_conn(rdev
->ddev
);
1644 /* init dig PHYs, disp eng pll */
1645 if (rdev
->is_atom_bios
) {
1646 radeon_atom_encoder_init(rdev
);
1647 radeon_atom_disp_eng_pll_init(rdev
);
1650 /* initialize hpd */
1651 radeon_hpd_init(rdev
);
1654 radeon_afmt_init(rdev
);
1656 radeon_fbdev_init(rdev
);
1657 drm_kms_helper_poll_init(rdev
->ddev
);
1659 /* do pm late init */
1660 ret
= radeon_pm_late_init(rdev
);
1665 void radeon_modeset_fini(struct radeon_device
*rdev
)
1667 if (rdev
->mode_info
.mode_config_initialized
) {
1668 drm_kms_helper_poll_fini(rdev
->ddev
);
1669 radeon_hpd_fini(rdev
);
1670 drm_crtc_force_disable_all(rdev
->ddev
);
1671 radeon_fbdev_fini(rdev
);
1672 radeon_afmt_fini(rdev
);
1673 drm_mode_config_cleanup(rdev
->ddev
);
1674 rdev
->mode_info
.mode_config_initialized
= false;
1677 kfree(rdev
->mode_info
.bios_hardcoded_edid
);
1679 /* free i2c buses */
1680 radeon_i2c_fini(rdev
);
1683 static bool is_hdtv_mode(const struct drm_display_mode
*mode
)
1685 /* try and guess if this is a tv or a monitor */
1686 if ((mode
->vdisplay
== 480 && mode
->hdisplay
== 720) || /* 480p */
1687 (mode
->vdisplay
== 576) || /* 576p */
1688 (mode
->vdisplay
== 720) || /* 720p */
1689 (mode
->vdisplay
== 1080)) /* 1080p */
1695 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc
*crtc
,
1696 const struct drm_display_mode
*mode
,
1697 struct drm_display_mode
*adjusted_mode
)
1699 struct drm_device
*dev
= crtc
->dev
;
1700 struct radeon_device
*rdev
= dev
->dev_private
;
1701 struct drm_encoder
*encoder
;
1702 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
1703 struct radeon_encoder
*radeon_encoder
;
1704 struct drm_connector
*connector
;
1705 struct radeon_connector
*radeon_connector
;
1707 u32 src_v
= 1, dst_v
= 1;
1708 u32 src_h
= 1, dst_h
= 1;
1710 radeon_crtc
->h_border
= 0;
1711 radeon_crtc
->v_border
= 0;
1713 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
1714 if (encoder
->crtc
!= crtc
)
1716 radeon_encoder
= to_radeon_encoder(encoder
);
1717 connector
= radeon_get_connector_for_encoder(encoder
);
1718 radeon_connector
= to_radeon_connector(connector
);
1722 if (radeon_encoder
->rmx_type
== RMX_OFF
)
1723 radeon_crtc
->rmx_type
= RMX_OFF
;
1724 else if (mode
->hdisplay
< radeon_encoder
->native_mode
.hdisplay
||
1725 mode
->vdisplay
< radeon_encoder
->native_mode
.vdisplay
)
1726 radeon_crtc
->rmx_type
= radeon_encoder
->rmx_type
;
1728 radeon_crtc
->rmx_type
= RMX_OFF
;
1729 /* copy native mode */
1730 memcpy(&radeon_crtc
->native_mode
,
1731 &radeon_encoder
->native_mode
,
1732 sizeof(struct drm_display_mode
));
1733 src_v
= crtc
->mode
.vdisplay
;
1734 dst_v
= radeon_crtc
->native_mode
.vdisplay
;
1735 src_h
= crtc
->mode
.hdisplay
;
1736 dst_h
= radeon_crtc
->native_mode
.hdisplay
;
1738 /* fix up for overscan on hdmi */
1739 if (ASIC_IS_AVIVO(rdev
) &&
1740 (!(mode
->flags
& DRM_MODE_FLAG_INTERLACE
)) &&
1741 ((radeon_encoder
->underscan_type
== UNDERSCAN_ON
) ||
1742 ((radeon_encoder
->underscan_type
== UNDERSCAN_AUTO
) &&
1743 drm_detect_hdmi_monitor(radeon_connector_edid(connector
)) &&
1744 is_hdtv_mode(mode
)))) {
1745 if (radeon_encoder
->underscan_hborder
!= 0)
1746 radeon_crtc
->h_border
= radeon_encoder
->underscan_hborder
;
1748 radeon_crtc
->h_border
= (mode
->hdisplay
>> 5) + 16;
1749 if (radeon_encoder
->underscan_vborder
!= 0)
1750 radeon_crtc
->v_border
= radeon_encoder
->underscan_vborder
;
1752 radeon_crtc
->v_border
= (mode
->vdisplay
>> 5) + 16;
1753 radeon_crtc
->rmx_type
= RMX_FULL
;
1754 src_v
= crtc
->mode
.vdisplay
;
1755 dst_v
= crtc
->mode
.vdisplay
- (radeon_crtc
->v_border
* 2);
1756 src_h
= crtc
->mode
.hdisplay
;
1757 dst_h
= crtc
->mode
.hdisplay
- (radeon_crtc
->h_border
* 2);
1761 if (radeon_crtc
->rmx_type
!= radeon_encoder
->rmx_type
) {
1762 /* WARNING: Right now this can't happen but
1763 * in the future we need to check that scaling
1764 * are consistent across different encoder
1765 * (ie all encoder can work with the same
1768 DRM_ERROR("Scaling not consistent across encoder.\n");
1773 if (radeon_crtc
->rmx_type
!= RMX_OFF
) {
1775 a
.full
= dfixed_const(src_v
);
1776 b
.full
= dfixed_const(dst_v
);
1777 radeon_crtc
->vsc
.full
= dfixed_div(a
, b
);
1778 a
.full
= dfixed_const(src_h
);
1779 b
.full
= dfixed_const(dst_h
);
1780 radeon_crtc
->hsc
.full
= dfixed_div(a
, b
);
1782 radeon_crtc
->vsc
.full
= dfixed_const(1);
1783 radeon_crtc
->hsc
.full
= dfixed_const(1);
1789 * Retrieve current video scanout position of crtc on a given gpu, and
1790 * an optional accurate timestamp of when query happened.
1792 * \param dev Device to query.
1793 * \param crtc Crtc to query.
1794 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1795 * For driver internal use only also supports these flags:
1797 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1798 * of a fudged earlier start of vblank.
1800 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1801 * fudged earlier start of vblank in *vpos and the distance
1802 * to true start of vblank in *hpos.
1804 * \param *vpos Location where vertical scanout position should be stored.
1805 * \param *hpos Location where horizontal scanout position should go.
1806 * \param *stime Target location for timestamp taken immediately before
1807 * scanout position query. Can be NULL to skip timestamp.
1808 * \param *etime Target location for timestamp taken immediately after
1809 * scanout position query. Can be NULL to skip timestamp.
1811 * Returns vpos as a positive number while in active scanout area.
1812 * Returns vpos as a negative number inside vblank, counting the number
1813 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1814 * until start of active scanout / end of vblank."
1816 * \return Flags, or'ed together as follows:
1818 * DRM_SCANOUTPOS_VALID = Query successful.
1819 * DRM_SCANOUTPOS_INVBL = Inside vblank.
1820 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1821 * this flag means that returned position may be offset by a constant but
1822 * unknown small number of scanlines wrt. real scanout position.
1825 int radeon_get_crtc_scanoutpos(struct drm_device
*dev
, unsigned int pipe
,
1826 unsigned int flags
, int *vpos
, int *hpos
,
1827 ktime_t
*stime
, ktime_t
*etime
,
1828 const struct drm_display_mode
*mode
)
1830 u32 stat_crtc
= 0, vbl
= 0, position
= 0;
1831 int vbl_start
, vbl_end
, vtotal
, ret
= 0;
1834 struct radeon_device
*rdev
= dev
->dev_private
;
1836 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1838 /* Get optional system timestamp before query. */
1840 *stime
= ktime_get();
1842 if (ASIC_IS_DCE4(rdev
)) {
1844 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1845 EVERGREEN_CRTC0_REGISTER_OFFSET
);
1846 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1847 EVERGREEN_CRTC0_REGISTER_OFFSET
);
1848 ret
|= DRM_SCANOUTPOS_VALID
;
1851 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1852 EVERGREEN_CRTC1_REGISTER_OFFSET
);
1853 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1854 EVERGREEN_CRTC1_REGISTER_OFFSET
);
1855 ret
|= DRM_SCANOUTPOS_VALID
;
1858 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1859 EVERGREEN_CRTC2_REGISTER_OFFSET
);
1860 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1861 EVERGREEN_CRTC2_REGISTER_OFFSET
);
1862 ret
|= DRM_SCANOUTPOS_VALID
;
1865 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1866 EVERGREEN_CRTC3_REGISTER_OFFSET
);
1867 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1868 EVERGREEN_CRTC3_REGISTER_OFFSET
);
1869 ret
|= DRM_SCANOUTPOS_VALID
;
1872 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1873 EVERGREEN_CRTC4_REGISTER_OFFSET
);
1874 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1875 EVERGREEN_CRTC4_REGISTER_OFFSET
);
1876 ret
|= DRM_SCANOUTPOS_VALID
;
1879 vbl
= RREG32(EVERGREEN_CRTC_V_BLANK_START_END
+
1880 EVERGREEN_CRTC5_REGISTER_OFFSET
);
1881 position
= RREG32(EVERGREEN_CRTC_STATUS_POSITION
+
1882 EVERGREEN_CRTC5_REGISTER_OFFSET
);
1883 ret
|= DRM_SCANOUTPOS_VALID
;
1885 } else if (ASIC_IS_AVIVO(rdev
)) {
1887 vbl
= RREG32(AVIVO_D1CRTC_V_BLANK_START_END
);
1888 position
= RREG32(AVIVO_D1CRTC_STATUS_POSITION
);
1889 ret
|= DRM_SCANOUTPOS_VALID
;
1892 vbl
= RREG32(AVIVO_D2CRTC_V_BLANK_START_END
);
1893 position
= RREG32(AVIVO_D2CRTC_STATUS_POSITION
);
1894 ret
|= DRM_SCANOUTPOS_VALID
;
1897 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
1899 /* Assume vbl_end == 0, get vbl_start from
1902 vbl
= (RREG32(RADEON_CRTC_V_TOTAL_DISP
) &
1903 RADEON_CRTC_V_DISP
) >> RADEON_CRTC_V_DISP_SHIFT
;
1904 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
1905 position
= (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE
) >> 16) & RADEON_CRTC_V_TOTAL
;
1906 stat_crtc
= RREG32(RADEON_CRTC_STATUS
);
1907 if (!(stat_crtc
& 1))
1910 ret
|= DRM_SCANOUTPOS_VALID
;
1913 vbl
= (RREG32(RADEON_CRTC2_V_TOTAL_DISP
) &
1914 RADEON_CRTC_V_DISP
) >> RADEON_CRTC_V_DISP_SHIFT
;
1915 position
= (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE
) >> 16) & RADEON_CRTC_V_TOTAL
;
1916 stat_crtc
= RREG32(RADEON_CRTC2_STATUS
);
1917 if (!(stat_crtc
& 1))
1920 ret
|= DRM_SCANOUTPOS_VALID
;
1924 /* Get optional system timestamp after query. */
1926 *etime
= ktime_get();
1928 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1930 /* Decode into vertical and horizontal scanout position. */
1931 *vpos
= position
& 0x1fff;
1932 *hpos
= (position
>> 16) & 0x1fff;
1934 /* Valid vblank area boundaries from gpu retrieved? */
1937 ret
|= DRM_SCANOUTPOS_ACCURATE
;
1938 vbl_start
= vbl
& 0x1fff;
1939 vbl_end
= (vbl
>> 16) & 0x1fff;
1942 /* No: Fake something reasonable which gives at least ok results. */
1943 vbl_start
= mode
->crtc_vdisplay
;
1947 /* Called from driver internal vblank counter query code? */
1948 if (flags
& GET_DISTANCE_TO_VBLANKSTART
) {
1949 /* Caller wants distance from real vbl_start in *hpos */
1950 *hpos
= *vpos
- vbl_start
;
1953 /* Fudge vblank to start a few scanlines earlier to handle the
1954 * problem that vblank irqs fire a few scanlines before start
1955 * of vblank. Some driver internal callers need the true vblank
1956 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1958 * The cause of the "early" vblank irq is that the irq is triggered
1959 * by the line buffer logic when the line buffer read position enters
1960 * the vblank, whereas our crtc scanout position naturally lags the
1961 * line buffer read position.
1963 if (!(flags
& USE_REAL_VBLANKSTART
))
1964 vbl_start
-= rdev
->mode_info
.crtcs
[pipe
]->lb_vblank_lead_lines
;
1966 /* Test scanout position against vblank region. */
1967 if ((*vpos
< vbl_start
) && (*vpos
>= vbl_end
))
1972 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
1974 /* Called from driver internal vblank counter query code? */
1975 if (flags
& GET_DISTANCE_TO_VBLANKSTART
) {
1976 /* Caller wants distance from fudged earlier vbl_start */
1981 /* Check if inside vblank area and apply corrective offsets:
1982 * vpos will then be >=0 in video scanout area, but negative
1983 * within vblank area, counting down the number of lines until
1987 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1988 if (in_vbl
&& (*vpos
>= vbl_start
)) {
1989 vtotal
= mode
->crtc_vtotal
;
1990 *vpos
= *vpos
- vtotal
;
1993 /* Correct for shifted end of vbl at vbl_end. */
1994 *vpos
= *vpos
- vbl_end
;