2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <drm/amdgpu_drm.h>
29 #include "amdgpu_i2c.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include <asm/div64.h>
35 #include <linux/pm_runtime.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include <drm/drm_gem_framebuffer_helper.h>
39 #include <drm/drm_fb_helper.h>
41 static void amdgpu_display_flip_callback(struct dma_fence
*f
,
42 struct dma_fence_cb
*cb
)
44 struct amdgpu_flip_work
*work
=
45 container_of(cb
, struct amdgpu_flip_work
, cb
);
48 schedule_work(&work
->flip_work
.work
);
51 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work
*work
,
54 struct dma_fence
*fence
= *f
;
61 if (!dma_fence_add_callback(fence
, &work
->cb
,
62 amdgpu_display_flip_callback
))
69 static void amdgpu_display_flip_work_func(struct work_struct
*__work
)
71 struct delayed_work
*delayed_work
=
72 container_of(__work
, struct delayed_work
, work
);
73 struct amdgpu_flip_work
*work
=
74 container_of(delayed_work
, struct amdgpu_flip_work
, flip_work
);
75 struct amdgpu_device
*adev
= work
->adev
;
76 struct amdgpu_crtc
*amdgpu_crtc
= adev
->mode_info
.crtcs
[work
->crtc_id
];
78 struct drm_crtc
*crtc
= &amdgpu_crtc
->base
;
83 if (amdgpu_display_flip_handle_fence(work
, &work
->excl
))
86 for (i
= 0; i
< work
->shared_count
; ++i
)
87 if (amdgpu_display_flip_handle_fence(work
, &work
->shared
[i
]))
90 /* Wait until we're out of the vertical blank period before the one
91 * targeted by the flip
93 if (amdgpu_crtc
->enabled
&&
94 (amdgpu_display_get_crtc_scanoutpos(adev
->ddev
, work
->crtc_id
, 0,
95 &vpos
, &hpos
, NULL
, NULL
,
97 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
98 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
99 (int)(work
->target_vblank
-
100 amdgpu_get_vblank_counter_kms(adev
->ddev
, amdgpu_crtc
->crtc_id
)) > 0) {
101 schedule_delayed_work(&work
->flip_work
, usecs_to_jiffies(1000));
105 /* We borrow the event spin lock for protecting flip_status */
106 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
108 /* Do the flip (mmio) */
109 adev
->mode_info
.funcs
->page_flip(adev
, work
->crtc_id
, work
->base
, work
->async
);
111 /* Set the flip status */
112 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
113 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
116 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
117 amdgpu_crtc
->crtc_id
, amdgpu_crtc
, work
);
122 * Handle unpin events outside the interrupt handler proper.
124 static void amdgpu_display_unpin_work_func(struct work_struct
*__work
)
126 struct amdgpu_flip_work
*work
=
127 container_of(__work
, struct amdgpu_flip_work
, unpin_work
);
130 /* unpin of the old buffer */
131 r
= amdgpu_bo_reserve(work
->old_abo
, true);
132 if (likely(r
== 0)) {
133 r
= amdgpu_bo_unpin(work
->old_abo
);
134 if (unlikely(r
!= 0)) {
135 DRM_ERROR("failed to unpin buffer after flip\n");
137 amdgpu_bo_unreserve(work
->old_abo
);
139 DRM_ERROR("failed to reserve buffer after flip\n");
141 amdgpu_bo_unref(&work
->old_abo
);
146 int amdgpu_display_crtc_page_flip_target(struct drm_crtc
*crtc
,
147 struct drm_framebuffer
*fb
,
148 struct drm_pending_vblank_event
*event
,
149 uint32_t page_flip_flags
, uint32_t target
,
150 struct drm_modeset_acquire_ctx
*ctx
)
152 struct drm_device
*dev
= crtc
->dev
;
153 struct amdgpu_device
*adev
= dev
->dev_private
;
154 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
155 struct drm_gem_object
*obj
;
156 struct amdgpu_flip_work
*work
;
157 struct amdgpu_bo
*new_abo
;
162 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
166 INIT_DELAYED_WORK(&work
->flip_work
, amdgpu_display_flip_work_func
);
167 INIT_WORK(&work
->unpin_work
, amdgpu_display_unpin_work_func
);
171 work
->crtc_id
= amdgpu_crtc
->crtc_id
;
172 work
->async
= (page_flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
174 /* schedule unpin of the old buffer */
175 obj
= crtc
->primary
->fb
->obj
[0];
177 /* take a reference to the old object */
178 work
->old_abo
= gem_to_amdgpu_bo(obj
);
179 amdgpu_bo_ref(work
->old_abo
);
182 new_abo
= gem_to_amdgpu_bo(obj
);
184 /* pin the new buffer */
185 r
= amdgpu_bo_reserve(new_abo
, false);
186 if (unlikely(r
!= 0)) {
187 DRM_ERROR("failed to reserve new abo buffer before flip\n");
191 r
= amdgpu_bo_pin(new_abo
, amdgpu_display_supported_domains(adev
));
192 if (unlikely(r
!= 0)) {
193 DRM_ERROR("failed to pin new abo buffer before flip\n");
197 r
= amdgpu_ttm_alloc_gart(&new_abo
->tbo
);
198 if (unlikely(r
!= 0)) {
199 DRM_ERROR("%p bind failed\n", new_abo
);
203 r
= reservation_object_get_fences_rcu(new_abo
->tbo
.resv
, &work
->excl
,
206 if (unlikely(r
!= 0)) {
207 DRM_ERROR("failed to get fences for buffer\n");
211 amdgpu_bo_get_tiling_flags(new_abo
, &tiling_flags
);
212 amdgpu_bo_unreserve(new_abo
);
214 work
->base
= amdgpu_bo_gpu_offset(new_abo
);
215 work
->target_vblank
= target
- (uint32_t)drm_crtc_vblank_count(crtc
) +
216 amdgpu_get_vblank_counter_kms(dev
, work
->crtc_id
);
218 /* we borrow the event spin lock for protecting flip_wrok */
219 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
220 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_NONE
) {
221 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
222 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
227 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_PENDING
;
228 amdgpu_crtc
->pflip_works
= work
;
231 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
232 amdgpu_crtc
->crtc_id
, amdgpu_crtc
, work
);
234 crtc
->primary
->fb
= fb
;
235 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
236 amdgpu_display_flip_work_func(&work
->flip_work
.work
);
240 if (unlikely(amdgpu_bo_reserve(new_abo
, false) != 0)) {
241 DRM_ERROR("failed to reserve new abo in error path\n");
245 if (unlikely(amdgpu_bo_unpin(new_abo
) != 0)) {
246 DRM_ERROR("failed to unpin new abo in error path\n");
249 amdgpu_bo_unreserve(new_abo
);
252 amdgpu_bo_unref(&work
->old_abo
);
253 dma_fence_put(work
->excl
);
254 for (i
= 0; i
< work
->shared_count
; ++i
)
255 dma_fence_put(work
->shared
[i
]);
262 int amdgpu_display_crtc_set_config(struct drm_mode_set
*set
,
263 struct drm_modeset_acquire_ctx
*ctx
)
265 struct drm_device
*dev
;
266 struct amdgpu_device
*adev
;
267 struct drm_crtc
*crtc
;
271 if (!set
|| !set
->crtc
)
274 dev
= set
->crtc
->dev
;
276 ret
= pm_runtime_get_sync(dev
->dev
);
280 ret
= drm_crtc_helper_set_config(set
, ctx
);
282 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
286 pm_runtime_mark_last_busy(dev
->dev
);
288 adev
= dev
->dev_private
;
289 /* if we have active crtcs and we don't have a power ref,
290 take the current one */
291 if (active
&& !adev
->have_disp_power_ref
) {
292 adev
->have_disp_power_ref
= true;
295 /* if we have no active crtcs, then drop the power ref
297 if (!active
&& adev
->have_disp_power_ref
) {
298 pm_runtime_put_autosuspend(dev
->dev
);
299 adev
->have_disp_power_ref
= false;
302 /* drop the power reference we got coming in here */
303 pm_runtime_put_autosuspend(dev
->dev
);
307 static const char *encoder_names
[41] = {
327 "INTERNAL_KLDSCP_TMDS1",
328 "INTERNAL_KLDSCP_DVO1",
329 "INTERNAL_KLDSCP_DAC1",
330 "INTERNAL_KLDSCP_DAC2",
339 "INTERNAL_KLDSCP_LVTMA",
351 static const char *hpd_names
[6] = {
360 void amdgpu_display_print_display_setup(struct drm_device
*dev
)
362 struct drm_connector
*connector
;
363 struct amdgpu_connector
*amdgpu_connector
;
364 struct drm_encoder
*encoder
;
365 struct amdgpu_encoder
*amdgpu_encoder
;
369 DRM_INFO("AMDGPU Display Connectors\n");
370 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
371 amdgpu_connector
= to_amdgpu_connector(connector
);
372 DRM_INFO("Connector %d:\n", i
);
373 DRM_INFO(" %s\n", connector
->name
);
374 if (amdgpu_connector
->hpd
.hpd
!= AMDGPU_HPD_NONE
)
375 DRM_INFO(" %s\n", hpd_names
[amdgpu_connector
->hpd
.hpd
]);
376 if (amdgpu_connector
->ddc_bus
) {
377 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
378 amdgpu_connector
->ddc_bus
->rec
.mask_clk_reg
,
379 amdgpu_connector
->ddc_bus
->rec
.mask_data_reg
,
380 amdgpu_connector
->ddc_bus
->rec
.a_clk_reg
,
381 amdgpu_connector
->ddc_bus
->rec
.a_data_reg
,
382 amdgpu_connector
->ddc_bus
->rec
.en_clk_reg
,
383 amdgpu_connector
->ddc_bus
->rec
.en_data_reg
,
384 amdgpu_connector
->ddc_bus
->rec
.y_clk_reg
,
385 amdgpu_connector
->ddc_bus
->rec
.y_data_reg
);
386 if (amdgpu_connector
->router
.ddc_valid
)
387 DRM_INFO(" DDC Router 0x%x/0x%x\n",
388 amdgpu_connector
->router
.ddc_mux_control_pin
,
389 amdgpu_connector
->router
.ddc_mux_state
);
390 if (amdgpu_connector
->router
.cd_valid
)
391 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
392 amdgpu_connector
->router
.cd_mux_control_pin
,
393 amdgpu_connector
->router
.cd_mux_state
);
395 if (connector
->connector_type
== DRM_MODE_CONNECTOR_VGA
||
396 connector
->connector_type
== DRM_MODE_CONNECTOR_DVII
||
397 connector
->connector_type
== DRM_MODE_CONNECTOR_DVID
||
398 connector
->connector_type
== DRM_MODE_CONNECTOR_DVIA
||
399 connector
->connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
400 connector
->connector_type
== DRM_MODE_CONNECTOR_HDMIB
)
401 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
403 DRM_INFO(" Encoders:\n");
404 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
405 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
406 devices
= amdgpu_encoder
->devices
& amdgpu_connector
->devices
;
408 if (devices
& ATOM_DEVICE_CRT1_SUPPORT
)
409 DRM_INFO(" CRT1: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
410 if (devices
& ATOM_DEVICE_CRT2_SUPPORT
)
411 DRM_INFO(" CRT2: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
412 if (devices
& ATOM_DEVICE_LCD1_SUPPORT
)
413 DRM_INFO(" LCD1: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
414 if (devices
& ATOM_DEVICE_DFP1_SUPPORT
)
415 DRM_INFO(" DFP1: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
416 if (devices
& ATOM_DEVICE_DFP2_SUPPORT
)
417 DRM_INFO(" DFP2: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
418 if (devices
& ATOM_DEVICE_DFP3_SUPPORT
)
419 DRM_INFO(" DFP3: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
420 if (devices
& ATOM_DEVICE_DFP4_SUPPORT
)
421 DRM_INFO(" DFP4: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
422 if (devices
& ATOM_DEVICE_DFP5_SUPPORT
)
423 DRM_INFO(" DFP5: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
424 if (devices
& ATOM_DEVICE_DFP6_SUPPORT
)
425 DRM_INFO(" DFP6: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
426 if (devices
& ATOM_DEVICE_TV1_SUPPORT
)
427 DRM_INFO(" TV1: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
428 if (devices
& ATOM_DEVICE_CV_SUPPORT
)
429 DRM_INFO(" CV: %s\n", encoder_names
[amdgpu_encoder
->encoder_id
]);
437 * amdgpu_display_ddc_probe
440 bool amdgpu_display_ddc_probe(struct amdgpu_connector
*amdgpu_connector
,
446 struct i2c_msg msgs
[] = {
461 /* on hw with routers, select right port */
462 if (amdgpu_connector
->router
.ddc_valid
)
463 amdgpu_i2c_router_select_ddc_port(amdgpu_connector
);
466 ret
= i2c_transfer(&amdgpu_connector
->ddc_bus
->aux
.ddc
, msgs
, 2);
468 ret
= i2c_transfer(&amdgpu_connector
->ddc_bus
->adapter
, msgs
, 2);
472 /* Couldn't find an accessible DDC on this connector */
474 /* Probe also for valid EDID header
475 * EDID header starts with:
476 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
477 * Only the first 6 bytes must be valid as
478 * drm_edid_block_valid() can fix the last 2 bytes */
479 if (drm_edid_header_is_valid(buf
) < 6) {
480 /* Couldn't find an accessible EDID on this
487 static const struct drm_framebuffer_funcs amdgpu_fb_funcs
= {
488 .destroy
= drm_gem_fb_destroy
,
489 .create_handle
= drm_gem_fb_create_handle
,
492 uint32_t amdgpu_display_supported_domains(struct amdgpu_device
*adev
)
494 uint32_t domain
= AMDGPU_GEM_DOMAIN_VRAM
;
496 #if defined(CONFIG_DRM_AMD_DC)
497 if (adev
->asic_type
>= CHIP_CARRIZO
&& adev
->asic_type
< CHIP_RAVEN
&&
498 adev
->flags
& AMD_IS_APU
&&
499 amdgpu_device_asic_has_dc_support(adev
->asic_type
))
500 domain
|= AMDGPU_GEM_DOMAIN_GTT
;
506 int amdgpu_display_framebuffer_init(struct drm_device
*dev
,
507 struct amdgpu_framebuffer
*rfb
,
508 const struct drm_mode_fb_cmd2
*mode_cmd
,
509 struct drm_gem_object
*obj
)
512 rfb
->base
.obj
[0] = obj
;
513 drm_helper_mode_fill_fb_struct(dev
, &rfb
->base
, mode_cmd
);
514 ret
= drm_framebuffer_init(dev
, &rfb
->base
, &amdgpu_fb_funcs
);
516 rfb
->base
.obj
[0] = NULL
;
522 struct drm_framebuffer
*
523 amdgpu_display_user_framebuffer_create(struct drm_device
*dev
,
524 struct drm_file
*file_priv
,
525 const struct drm_mode_fb_cmd2
*mode_cmd
)
527 struct drm_gem_object
*obj
;
528 struct amdgpu_framebuffer
*amdgpu_fb
;
531 obj
= drm_gem_object_lookup(file_priv
, mode_cmd
->handles
[0]);
533 dev_err(&dev
->pdev
->dev
, "No GEM object associated to handle 0x%08X, "
534 "can't create framebuffer\n", mode_cmd
->handles
[0]);
535 return ERR_PTR(-ENOENT
);
538 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
539 if (obj
->import_attach
) {
540 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
541 return ERR_PTR(-EINVAL
);
544 amdgpu_fb
= kzalloc(sizeof(*amdgpu_fb
), GFP_KERNEL
);
545 if (amdgpu_fb
== NULL
) {
546 drm_gem_object_put_unlocked(obj
);
547 return ERR_PTR(-ENOMEM
);
550 ret
= amdgpu_display_framebuffer_init(dev
, amdgpu_fb
, mode_cmd
, obj
);
553 drm_gem_object_put_unlocked(obj
);
557 return &amdgpu_fb
->base
;
560 const struct drm_mode_config_funcs amdgpu_mode_funcs
= {
561 .fb_create
= amdgpu_display_user_framebuffer_create
,
562 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
565 static const struct drm_prop_enum_list amdgpu_underscan_enum_list
[] =
566 { { UNDERSCAN_OFF
, "off" },
567 { UNDERSCAN_ON
, "on" },
568 { UNDERSCAN_AUTO
, "auto" },
571 static const struct drm_prop_enum_list amdgpu_audio_enum_list
[] =
572 { { AMDGPU_AUDIO_DISABLE
, "off" },
573 { AMDGPU_AUDIO_ENABLE
, "on" },
574 { AMDGPU_AUDIO_AUTO
, "auto" },
577 /* XXX support different dither options? spatial, temporal, both, etc. */
578 static const struct drm_prop_enum_list amdgpu_dither_enum_list
[] =
579 { { AMDGPU_FMT_DITHER_DISABLE
, "off" },
580 { AMDGPU_FMT_DITHER_ENABLE
, "on" },
583 int amdgpu_display_modeset_create_props(struct amdgpu_device
*adev
)
587 adev
->mode_info
.coherent_mode_property
=
588 drm_property_create_range(adev
->ddev
, 0 , "coherent", 0, 1);
589 if (!adev
->mode_info
.coherent_mode_property
)
592 adev
->mode_info
.load_detect_property
=
593 drm_property_create_range(adev
->ddev
, 0, "load detection", 0, 1);
594 if (!adev
->mode_info
.load_detect_property
)
597 drm_mode_create_scaling_mode_property(adev
->ddev
);
599 sz
= ARRAY_SIZE(amdgpu_underscan_enum_list
);
600 adev
->mode_info
.underscan_property
=
601 drm_property_create_enum(adev
->ddev
, 0,
603 amdgpu_underscan_enum_list
, sz
);
605 adev
->mode_info
.underscan_hborder_property
=
606 drm_property_create_range(adev
->ddev
, 0,
607 "underscan hborder", 0, 128);
608 if (!adev
->mode_info
.underscan_hborder_property
)
611 adev
->mode_info
.underscan_vborder_property
=
612 drm_property_create_range(adev
->ddev
, 0,
613 "underscan vborder", 0, 128);
614 if (!adev
->mode_info
.underscan_vborder_property
)
617 sz
= ARRAY_SIZE(amdgpu_audio_enum_list
);
618 adev
->mode_info
.audio_property
=
619 drm_property_create_enum(adev
->ddev
, 0,
621 amdgpu_audio_enum_list
, sz
);
623 sz
= ARRAY_SIZE(amdgpu_dither_enum_list
);
624 adev
->mode_info
.dither_property
=
625 drm_property_create_enum(adev
->ddev
, 0,
627 amdgpu_dither_enum_list
, sz
);
629 if (amdgpu_device_has_dc_support(adev
)) {
630 adev
->mode_info
.max_bpc_property
=
631 drm_property_create_range(adev
->ddev
, 0, "max bpc", 8, 16);
632 if (!adev
->mode_info
.max_bpc_property
)
639 void amdgpu_display_update_priority(struct amdgpu_device
*adev
)
641 /* adjustment options for the display watermarks */
642 if ((amdgpu_disp_priority
== 0) || (amdgpu_disp_priority
> 2))
643 adev
->mode_info
.disp_priority
= 0;
645 adev
->mode_info
.disp_priority
= amdgpu_disp_priority
;
649 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode
*mode
)
651 /* try and guess if this is a tv or a monitor */
652 if ((mode
->vdisplay
== 480 && mode
->hdisplay
== 720) || /* 480p */
653 (mode
->vdisplay
== 576) || /* 576p */
654 (mode
->vdisplay
== 720) || /* 720p */
655 (mode
->vdisplay
== 1080)) /* 1080p */
661 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc
*crtc
,
662 const struct drm_display_mode
*mode
,
663 struct drm_display_mode
*adjusted_mode
)
665 struct drm_device
*dev
= crtc
->dev
;
666 struct drm_encoder
*encoder
;
667 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
668 struct amdgpu_encoder
*amdgpu_encoder
;
669 struct drm_connector
*connector
;
670 struct amdgpu_connector
*amdgpu_connector
;
671 u32 src_v
= 1, dst_v
= 1;
672 u32 src_h
= 1, dst_h
= 1;
674 amdgpu_crtc
->h_border
= 0;
675 amdgpu_crtc
->v_border
= 0;
677 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
678 if (encoder
->crtc
!= crtc
)
680 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
681 connector
= amdgpu_get_connector_for_encoder(encoder
);
682 amdgpu_connector
= to_amdgpu_connector(connector
);
685 if (amdgpu_encoder
->rmx_type
== RMX_OFF
)
686 amdgpu_crtc
->rmx_type
= RMX_OFF
;
687 else if (mode
->hdisplay
< amdgpu_encoder
->native_mode
.hdisplay
||
688 mode
->vdisplay
< amdgpu_encoder
->native_mode
.vdisplay
)
689 amdgpu_crtc
->rmx_type
= amdgpu_encoder
->rmx_type
;
691 amdgpu_crtc
->rmx_type
= RMX_OFF
;
692 /* copy native mode */
693 memcpy(&amdgpu_crtc
->native_mode
,
694 &amdgpu_encoder
->native_mode
,
695 sizeof(struct drm_display_mode
));
696 src_v
= crtc
->mode
.vdisplay
;
697 dst_v
= amdgpu_crtc
->native_mode
.vdisplay
;
698 src_h
= crtc
->mode
.hdisplay
;
699 dst_h
= amdgpu_crtc
->native_mode
.hdisplay
;
701 /* fix up for overscan on hdmi */
702 if ((!(mode
->flags
& DRM_MODE_FLAG_INTERLACE
)) &&
703 ((amdgpu_encoder
->underscan_type
== UNDERSCAN_ON
) ||
704 ((amdgpu_encoder
->underscan_type
== UNDERSCAN_AUTO
) &&
705 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector
)) &&
706 amdgpu_display_is_hdtv_mode(mode
)))) {
707 if (amdgpu_encoder
->underscan_hborder
!= 0)
708 amdgpu_crtc
->h_border
= amdgpu_encoder
->underscan_hborder
;
710 amdgpu_crtc
->h_border
= (mode
->hdisplay
>> 5) + 16;
711 if (amdgpu_encoder
->underscan_vborder
!= 0)
712 amdgpu_crtc
->v_border
= amdgpu_encoder
->underscan_vborder
;
714 amdgpu_crtc
->v_border
= (mode
->vdisplay
>> 5) + 16;
715 amdgpu_crtc
->rmx_type
= RMX_FULL
;
716 src_v
= crtc
->mode
.vdisplay
;
717 dst_v
= crtc
->mode
.vdisplay
- (amdgpu_crtc
->v_border
* 2);
718 src_h
= crtc
->mode
.hdisplay
;
719 dst_h
= crtc
->mode
.hdisplay
- (amdgpu_crtc
->h_border
* 2);
722 if (amdgpu_crtc
->rmx_type
!= RMX_OFF
) {
724 a
.full
= dfixed_const(src_v
);
725 b
.full
= dfixed_const(dst_v
);
726 amdgpu_crtc
->vsc
.full
= dfixed_div(a
, b
);
727 a
.full
= dfixed_const(src_h
);
728 b
.full
= dfixed_const(dst_h
);
729 amdgpu_crtc
->hsc
.full
= dfixed_div(a
, b
);
731 amdgpu_crtc
->vsc
.full
= dfixed_const(1);
732 amdgpu_crtc
->hsc
.full
= dfixed_const(1);
738 * Retrieve current video scanout position of crtc on a given gpu, and
739 * an optional accurate timestamp of when query happened.
741 * \param dev Device to query.
742 * \param pipe Crtc to query.
743 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
744 * For driver internal use only also supports these flags:
746 * USE_REAL_VBLANKSTART to use the real start of vblank instead
747 * of a fudged earlier start of vblank.
749 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
750 * fudged earlier start of vblank in *vpos and the distance
751 * to true start of vblank in *hpos.
753 * \param *vpos Location where vertical scanout position should be stored.
754 * \param *hpos Location where horizontal scanout position should go.
755 * \param *stime Target location for timestamp taken immediately before
756 * scanout position query. Can be NULL to skip timestamp.
757 * \param *etime Target location for timestamp taken immediately after
758 * scanout position query. Can be NULL to skip timestamp.
760 * Returns vpos as a positive number while in active scanout area.
761 * Returns vpos as a negative number inside vblank, counting the number
762 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
763 * until start of active scanout / end of vblank."
765 * \return Flags, or'ed together as follows:
767 * DRM_SCANOUTPOS_VALID = Query successful.
768 * DRM_SCANOUTPOS_INVBL = Inside vblank.
769 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
770 * this flag means that returned position may be offset by a constant but
771 * unknown small number of scanlines wrt. real scanout position.
774 int amdgpu_display_get_crtc_scanoutpos(struct drm_device
*dev
,
775 unsigned int pipe
, unsigned int flags
, int *vpos
,
776 int *hpos
, ktime_t
*stime
, ktime_t
*etime
,
777 const struct drm_display_mode
*mode
)
779 u32 vbl
= 0, position
= 0;
780 int vbl_start
, vbl_end
, vtotal
, ret
= 0;
783 struct amdgpu_device
*adev
= dev
->dev_private
;
785 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
787 /* Get optional system timestamp before query. */
789 *stime
= ktime_get();
791 if (amdgpu_display_page_flip_get_scanoutpos(adev
, pipe
, &vbl
, &position
) == 0)
792 ret
|= DRM_SCANOUTPOS_VALID
;
794 /* Get optional system timestamp after query. */
796 *etime
= ktime_get();
798 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
800 /* Decode into vertical and horizontal scanout position. */
801 *vpos
= position
& 0x1fff;
802 *hpos
= (position
>> 16) & 0x1fff;
804 /* Valid vblank area boundaries from gpu retrieved? */
807 ret
|= DRM_SCANOUTPOS_ACCURATE
;
808 vbl_start
= vbl
& 0x1fff;
809 vbl_end
= (vbl
>> 16) & 0x1fff;
812 /* No: Fake something reasonable which gives at least ok results. */
813 vbl_start
= mode
->crtc_vdisplay
;
817 /* Called from driver internal vblank counter query code? */
818 if (flags
& GET_DISTANCE_TO_VBLANKSTART
) {
819 /* Caller wants distance from real vbl_start in *hpos */
820 *hpos
= *vpos
- vbl_start
;
823 /* Fudge vblank to start a few scanlines earlier to handle the
824 * problem that vblank irqs fire a few scanlines before start
825 * of vblank. Some driver internal callers need the true vblank
826 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
828 * The cause of the "early" vblank irq is that the irq is triggered
829 * by the line buffer logic when the line buffer read position enters
830 * the vblank, whereas our crtc scanout position naturally lags the
831 * line buffer read position.
833 if (!(flags
& USE_REAL_VBLANKSTART
))
834 vbl_start
-= adev
->mode_info
.crtcs
[pipe
]->lb_vblank_lead_lines
;
836 /* Test scanout position against vblank region. */
837 if ((*vpos
< vbl_start
) && (*vpos
>= vbl_end
))
842 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
844 /* Called from driver internal vblank counter query code? */
845 if (flags
& GET_DISTANCE_TO_VBLANKSTART
) {
846 /* Caller wants distance from fudged earlier vbl_start */
851 /* Check if inside vblank area and apply corrective offsets:
852 * vpos will then be >=0 in video scanout area, but negative
853 * within vblank area, counting down the number of lines until
857 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
858 if (in_vbl
&& (*vpos
>= vbl_start
)) {
859 vtotal
= mode
->crtc_vtotal
;
860 *vpos
= *vpos
- vtotal
;
863 /* Correct for shifted end of vbl at vbl_end. */
864 *vpos
= *vpos
- vbl_end
;
869 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device
*adev
, int crtc
)
871 if (crtc
< 0 || crtc
>= adev
->mode_info
.num_crtc
)
872 return AMDGPU_CRTC_IRQ_NONE
;
876 return AMDGPU_CRTC_IRQ_VBLANK1
;
878 return AMDGPU_CRTC_IRQ_VBLANK2
;
880 return AMDGPU_CRTC_IRQ_VBLANK3
;
882 return AMDGPU_CRTC_IRQ_VBLANK4
;
884 return AMDGPU_CRTC_IRQ_VBLANK5
;
886 return AMDGPU_CRTC_IRQ_VBLANK6
;
888 return AMDGPU_CRTC_IRQ_NONE
;