2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB
);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
101 /* Number of bytes in PSP header for firmware. */
102 #define PSP_HEADER_BYTES 0x100
104 /* Number of bytes in PSP footer for firmware. */
105 #define PSP_FOOTER_BYTES 0x100
110 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
111 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
112 * requests into DC requests, and DC responses into DRM responses.
114 * The root control structure is &struct amdgpu_display_manager.
117 /* basic init/fini API */
118 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
119 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
122 * initializes drm_device display related structures, based on the information
123 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
124 * drm_encoder, drm_mode_config
126 * Returns 0 on success
128 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
129 /* removes and deallocates the drm structures, created by the above function */
130 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
133 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
136 struct drm_plane
*plane
,
137 unsigned long possible_crtcs
,
138 const struct dc_plane_cap
*plane_cap
);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
140 struct drm_plane
*plane
,
141 uint32_t link_index
);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
143 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
145 struct amdgpu_encoder
*amdgpu_encoder
);
146 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
147 struct amdgpu_encoder
*aencoder
,
148 uint32_t link_index
);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
152 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
153 struct drm_atomic_state
*state
,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
158 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
159 struct drm_atomic_state
*state
);
161 static void handle_cursor_update(struct drm_plane
*plane
,
162 struct drm_plane_state
*old_plane_state
);
164 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
185 if (crtc
>= adev
->mode_info
.num_crtc
)
188 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
189 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
193 if (acrtc_state
->stream
== NULL
) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
204 u32
*vbl
, u32
*position
)
206 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
208 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
211 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
212 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
215 if (acrtc_state
->stream
== NULL
) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state
->stream
,
231 *position
= v_position
| (h_position
<< 16);
232 *vbl
= v_blank_start
| (v_blank_end
<< 16);
238 static bool dm_is_idle(void *handle
)
244 static int dm_wait_for_idle(void *handle
)
250 static bool dm_check_soft_reset(void *handle
)
255 static int dm_soft_reset(void *handle
)
261 static struct amdgpu_crtc
*
262 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
265 struct drm_device
*dev
= adev
->ddev
;
266 struct drm_crtc
*crtc
;
267 struct amdgpu_crtc
*amdgpu_crtc
;
269 if (otg_inst
== -1) {
271 return adev
->mode_info
.crtcs
[0];
274 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
275 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
277 if (amdgpu_crtc
->otg_inst
== otg_inst
)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
286 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
287 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params
)
299 struct amdgpu_crtc
*amdgpu_crtc
;
300 struct common_irq_params
*irq_params
= interrupt_params
;
301 struct amdgpu_device
*adev
= irq_params
->adev
;
303 struct drm_pending_vblank_event
*e
;
304 struct dm_crtc_state
*acrtc_state
;
305 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
308 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc
== NULL
) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
319 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc
->pflip_status
,
322 AMDGPU_FLIP_SUBMITTED
,
323 amdgpu_crtc
->crtc_id
,
325 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
329 /* page flip completed. */
330 e
= amdgpu_crtc
->event
;
331 amdgpu_crtc
->event
= NULL
;
336 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
337 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
342 &v_blank_end
, &hpos
, &vpos
) ||
343 (vpos
< v_blank_start
)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
375 e
->pipe
= amdgpu_crtc
->crtc_id
;
377 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc
->last_flip_vblank
= amdgpu_get_vblank_counter_kms(adev
->ddev
,
387 amdgpu_crtc
->crtc_id
);
389 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
390 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
394 vrr_active
, (int) !e
);
397 static void dm_vupdate_high_irq(void *interrupt_params
)
399 struct common_irq_params
*irq_params
= interrupt_params
;
400 struct amdgpu_device
*adev
= irq_params
->adev
;
401 struct amdgpu_crtc
*acrtc
;
402 struct dm_crtc_state
*acrtc_state
;
405 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
408 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
410 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc
->crtc_id
,
411 amdgpu_dm_vrr_active(acrtc_state
));
413 /* Core vblank handling is done here after end of front-porch in
414 * vrr mode, as vblank timestamping will give valid results
415 * while now done after front-porch. This will also deliver
416 * page-flip completion events that have been queued to us
417 * if a pageflip happened inside front-porch.
419 if (amdgpu_dm_vrr_active(acrtc_state
)) {
420 drm_crtc_handle_vblank(&acrtc
->base
);
422 /* BTR processing for pre-DCE12 ASICs */
423 if (acrtc_state
->stream
&&
424 adev
->family
< AMDGPU_FAMILY_AI
) {
425 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
426 mod_freesync_handle_v_update(
427 adev
->dm
.freesync_module
,
429 &acrtc_state
->vrr_params
);
431 dc_stream_adjust_vmin_vmax(
434 &acrtc_state
->vrr_params
.adjust
);
435 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
442 * dm_crtc_high_irq() - Handles CRTC interrupt
443 * @interrupt_params: ignored
445 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
448 static void dm_crtc_high_irq(void *interrupt_params
)
450 struct common_irq_params
*irq_params
= interrupt_params
;
451 struct amdgpu_device
*adev
= irq_params
->adev
;
452 struct amdgpu_crtc
*acrtc
;
453 struct dm_crtc_state
*acrtc_state
;
456 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
459 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
461 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc
->crtc_id
,
462 amdgpu_dm_vrr_active(acrtc_state
));
464 /* Core vblank handling at start of front-porch is only possible
465 * in non-vrr mode, as only there vblank timestamping will give
466 * valid results while done in front-porch. Otherwise defer it
467 * to dm_vupdate_high_irq after end of front-porch.
469 if (!amdgpu_dm_vrr_active(acrtc_state
))
470 drm_crtc_handle_vblank(&acrtc
->base
);
472 /* Following stuff must happen at start of vblank, for crc
473 * computation and below-the-range btr support in vrr mode.
475 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
477 if (acrtc_state
->stream
&& adev
->family
>= AMDGPU_FAMILY_AI
&&
478 acrtc_state
->vrr_params
.supported
&&
479 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
480 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
481 mod_freesync_handle_v_update(
482 adev
->dm
.freesync_module
,
484 &acrtc_state
->vrr_params
);
486 dc_stream_adjust_vmin_vmax(
489 &acrtc_state
->vrr_params
.adjust
);
490 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
495 #if defined(CONFIG_DRM_AMD_DC_DCN)
497 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
498 * @interrupt params - interrupt parameters
500 * Notify DRM's vblank event handler at VSTARTUP
502 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
503 * * We are close enough to VUPDATE - the point of no return for hw
504 * * We are in the fixed portion of variable front porch when vrr is enabled
505 * * We are before VUPDATE, where double-buffered vrr registers are swapped
507 * It is therefore the correct place to signal vblank, send user flip events,
510 static void dm_dcn_crtc_high_irq(void *interrupt_params
)
512 struct common_irq_params
*irq_params
= interrupt_params
;
513 struct amdgpu_device
*adev
= irq_params
->adev
;
514 struct amdgpu_crtc
*acrtc
;
515 struct dm_crtc_state
*acrtc_state
;
518 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
523 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
525 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc
->crtc_id
,
526 amdgpu_dm_vrr_active(acrtc_state
));
528 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
529 drm_crtc_handle_vblank(&acrtc
->base
);
531 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
533 if (acrtc_state
->vrr_params
.supported
&&
534 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
535 mod_freesync_handle_v_update(
536 adev
->dm
.freesync_module
,
538 &acrtc_state
->vrr_params
);
540 dc_stream_adjust_vmin_vmax(
543 &acrtc_state
->vrr_params
.adjust
);
546 if (acrtc
->pflip_status
== AMDGPU_FLIP_SUBMITTED
) {
548 drm_crtc_send_vblank_event(&acrtc
->base
, acrtc
->event
);
550 drm_crtc_vblank_put(&acrtc
->base
);
552 acrtc
->pflip_status
= AMDGPU_FLIP_NONE
;
555 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
559 static int dm_set_clockgating_state(void *handle
,
560 enum amd_clockgating_state state
)
565 static int dm_set_powergating_state(void *handle
,
566 enum amd_powergating_state state
)
571 /* Prototypes of private functions */
572 static int dm_early_init(void* handle
);
574 /* Allocate memory for FBC compressed data */
575 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
577 struct drm_device
*dev
= connector
->dev
;
578 struct amdgpu_device
*adev
= dev
->dev_private
;
579 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
580 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
581 struct drm_display_mode
*mode
;
582 unsigned long max_size
= 0;
584 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
587 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
590 if (compressor
->bo_ptr
)
594 list_for_each_entry(mode
, &connector
->modes
, head
) {
595 if (max_size
< mode
->htotal
* mode
->vtotal
)
596 max_size
= mode
->htotal
* mode
->vtotal
;
600 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
601 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
602 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
605 DRM_ERROR("DM: Failed to initialize FBC\n");
607 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
608 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
615 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
616 int pipe
, bool *enabled
,
617 unsigned char *buf
, int max_bytes
)
619 struct drm_device
*dev
= dev_get_drvdata(kdev
);
620 struct amdgpu_device
*adev
= dev
->dev_private
;
621 struct drm_connector
*connector
;
622 struct drm_connector_list_iter conn_iter
;
623 struct amdgpu_dm_connector
*aconnector
;
628 mutex_lock(&adev
->dm
.audio_lock
);
630 drm_connector_list_iter_begin(dev
, &conn_iter
);
631 drm_for_each_connector_iter(connector
, &conn_iter
) {
632 aconnector
= to_amdgpu_dm_connector(connector
);
633 if (aconnector
->audio_inst
!= port
)
637 ret
= drm_eld_size(connector
->eld
);
638 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
642 drm_connector_list_iter_end(&conn_iter
);
644 mutex_unlock(&adev
->dm
.audio_lock
);
646 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
651 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
652 .get_eld
= amdgpu_dm_audio_component_get_eld
,
655 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
656 struct device
*hda_kdev
, void *data
)
658 struct drm_device
*dev
= dev_get_drvdata(kdev
);
659 struct amdgpu_device
*adev
= dev
->dev_private
;
660 struct drm_audio_component
*acomp
= data
;
662 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
664 adev
->dm
.audio_component
= acomp
;
669 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
670 struct device
*hda_kdev
, void *data
)
672 struct drm_device
*dev
= dev_get_drvdata(kdev
);
673 struct amdgpu_device
*adev
= dev
->dev_private
;
674 struct drm_audio_component
*acomp
= data
;
678 adev
->dm
.audio_component
= NULL
;
681 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
682 .bind
= amdgpu_dm_audio_component_bind
,
683 .unbind
= amdgpu_dm_audio_component_unbind
,
686 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
693 adev
->mode_info
.audio
.enabled
= true;
695 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
697 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
698 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
699 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
700 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
701 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
702 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
703 adev
->mode_info
.audio
.pin
[i
].connected
= false;
704 adev
->mode_info
.audio
.pin
[i
].id
=
705 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
706 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
709 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
713 adev
->dm
.audio_registered
= true;
718 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
723 if (!adev
->mode_info
.audio
.enabled
)
726 if (adev
->dm
.audio_registered
) {
727 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
728 adev
->dm
.audio_registered
= false;
731 /* TODO: Disable audio? */
733 adev
->mode_info
.audio
.enabled
= false;
736 void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
738 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
740 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
741 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
743 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
748 static int dm_dmub_hw_init(struct amdgpu_device
*adev
)
750 const struct dmcub_firmware_header_v1_0
*hdr
;
751 struct dmub_srv
*dmub_srv
= adev
->dm
.dmub_srv
;
752 struct dmub_srv_fb_info
*fb_info
= adev
->dm
.dmub_fb_info
;
753 const struct firmware
*dmub_fw
= adev
->dm
.dmub_fw
;
754 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
755 struct abm
*abm
= adev
->dm
.dc
->res_pool
->abm
;
756 struct dmub_srv_hw_params hw_params
;
757 enum dmub_status status
;
758 const unsigned char *fw_inst_const
, *fw_bss_data
;
759 uint32_t i
, fw_inst_const_size
, fw_bss_data_size
;
763 /* DMUB isn't supported on the ASIC. */
767 DRM_ERROR("No framebuffer info for DMUB service.\n");
772 /* Firmware required for DMUB support. */
773 DRM_ERROR("No firmware provided for DMUB.\n");
777 status
= dmub_srv_has_hw_support(dmub_srv
, &has_hw_support
);
778 if (status
!= DMUB_STATUS_OK
) {
779 DRM_ERROR("Error checking HW support for DMUB: %d\n", status
);
783 if (!has_hw_support
) {
784 DRM_INFO("DMUB unsupported on ASIC\n");
788 hdr
= (const struct dmcub_firmware_header_v1_0
*)dmub_fw
->data
;
790 fw_inst_const
= dmub_fw
->data
+
791 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
794 fw_bss_data
= dmub_fw
->data
+
795 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
796 le32_to_cpu(hdr
->inst_const_bytes
);
798 /* Copy firmware and bios info into FB memory. */
799 fw_inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
800 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
802 fw_bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
804 memcpy(fb_info
->fb
[DMUB_WINDOW_0_INST_CONST
].cpu_addr
, fw_inst_const
,
806 memcpy(fb_info
->fb
[DMUB_WINDOW_2_BSS_DATA
].cpu_addr
, fw_bss_data
,
808 memcpy(fb_info
->fb
[DMUB_WINDOW_3_VBIOS
].cpu_addr
, adev
->bios
,
811 /* Reset regions that need to be reset. */
812 memset(fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].cpu_addr
, 0,
813 fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].size
);
815 memset(fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].cpu_addr
, 0,
816 fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].size
);
818 memset(fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].cpu_addr
, 0,
819 fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].size
);
821 /* Initialize hardware. */
822 memset(&hw_params
, 0, sizeof(hw_params
));
823 hw_params
.fb_base
= adev
->gmc
.fb_start
;
824 hw_params
.fb_offset
= adev
->gmc
.aper_base
;
827 hw_params
.psp_version
= dmcu
->psp_version
;
829 for (i
= 0; i
< fb_info
->num_fb
; ++i
)
830 hw_params
.fb
[i
] = &fb_info
->fb
[i
];
832 status
= dmub_srv_hw_init(dmub_srv
, &hw_params
);
833 if (status
!= DMUB_STATUS_OK
) {
834 DRM_ERROR("Error initializing DMUB HW: %d\n", status
);
838 /* Wait for firmware load to finish. */
839 status
= dmub_srv_wait_for_auto_load(dmub_srv
, 100000);
840 if (status
!= DMUB_STATUS_OK
)
841 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status
);
843 /* Init DMCU and ABM if available. */
845 dmcu
->funcs
->dmcu_init(dmcu
);
846 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
849 adev
->dm
.dc
->ctx
->dmub_srv
= dc_dmub_srv_create(adev
->dm
.dc
, dmub_srv
);
850 if (!adev
->dm
.dc
->ctx
->dmub_srv
) {
851 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
855 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
856 adev
->dm
.dmcub_fw_version
);
861 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
863 struct dc_init_data init_data
;
864 #ifdef CONFIG_DRM_AMD_DC_HDCP
865 struct dc_callback_init init_params
;
869 adev
->dm
.ddev
= adev
->ddev
;
870 adev
->dm
.adev
= adev
;
872 /* Zero all the fields */
873 memset(&init_data
, 0, sizeof(init_data
));
874 #ifdef CONFIG_DRM_AMD_DC_HDCP
875 memset(&init_params
, 0, sizeof(init_params
));
878 mutex_init(&adev
->dm
.dc_lock
);
879 mutex_init(&adev
->dm
.audio_lock
);
881 if(amdgpu_dm_irq_init(adev
)) {
882 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
886 init_data
.asic_id
.chip_family
= adev
->family
;
888 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
889 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
891 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
892 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
893 init_data
.asic_id
.atombios_base_address
=
894 adev
->mode_info
.atom_context
->bios
;
896 init_data
.driver
= adev
;
898 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
900 if (!adev
->dm
.cgs_device
) {
901 DRM_ERROR("amdgpu: failed to create cgs device.\n");
905 init_data
.cgs_device
= adev
->dm
.cgs_device
;
907 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
909 switch (adev
->asic_type
) {
914 init_data
.flags
.gpu_vm_support
= true;
920 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
921 init_data
.flags
.fbc_support
= true;
923 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
924 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
926 if (amdgpu_dc_feature_mask
& DC_DISABLE_FRACTIONAL_PWM_MASK
)
927 init_data
.flags
.disable_fractional_pwm
= true;
929 init_data
.flags
.power_down_display_on_boot
= true;
931 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
933 /* Display Core create. */
934 adev
->dm
.dc
= dc_create(&init_data
);
937 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
939 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
943 dc_hardware_init(adev
->dm
.dc
);
945 r
= dm_dmub_hw_init(adev
);
947 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
951 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
952 if (!adev
->dm
.freesync_module
) {
954 "amdgpu: failed to initialize freesync_module.\n");
956 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
957 adev
->dm
.freesync_module
);
959 amdgpu_dm_init_color_mod();
961 #ifdef CONFIG_DRM_AMD_DC_HDCP
962 if (adev
->asic_type
>= CHIP_RAVEN
) {
963 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(&adev
->psp
, &init_params
.cp_psp
, adev
->dm
.dc
);
965 if (!adev
->dm
.hdcp_workqueue
)
966 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
970 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
973 if (amdgpu_dm_initialize_drm_device(adev
)) {
975 "amdgpu: failed to initialize sw for display support.\n");
979 /* Update the actual used number of crtc */
980 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
982 /* TODO: Add_display_info? */
984 /* TODO use dynamic cursor width */
985 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
986 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
988 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
990 "amdgpu: failed to initialize sw for display support.\n");
994 #if defined(CONFIG_DEBUG_FS)
995 if (dtn_debugfs_init(adev
))
996 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
999 DRM_DEBUG_DRIVER("KMS initialized.\n");
1003 amdgpu_dm_fini(adev
);
1008 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
1010 amdgpu_dm_audio_fini(adev
);
1012 amdgpu_dm_destroy_drm_device(&adev
->dm
);
1014 #ifdef CONFIG_DRM_AMD_DC_HDCP
1015 if (adev
->dm
.hdcp_workqueue
) {
1016 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
1017 adev
->dm
.hdcp_workqueue
= NULL
;
1021 dc_deinit_callbacks(adev
->dm
.dc
);
1023 if (adev
->dm
.dc
->ctx
->dmub_srv
) {
1024 dc_dmub_srv_destroy(&adev
->dm
.dc
->ctx
->dmub_srv
);
1025 adev
->dm
.dc
->ctx
->dmub_srv
= NULL
;
1028 if (adev
->dm
.dmub_bo
)
1029 amdgpu_bo_free_kernel(&adev
->dm
.dmub_bo
,
1030 &adev
->dm
.dmub_bo_gpu_addr
,
1031 &adev
->dm
.dmub_bo_cpu_addr
);
1033 /* DC Destroy TODO: Replace destroy DAL */
1035 dc_destroy(&adev
->dm
.dc
);
1037 * TODO: pageflip, vlank interrupt
1039 * amdgpu_dm_irq_fini(adev);
1042 if (adev
->dm
.cgs_device
) {
1043 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
1044 adev
->dm
.cgs_device
= NULL
;
1046 if (adev
->dm
.freesync_module
) {
1047 mod_freesync_destroy(adev
->dm
.freesync_module
);
1048 adev
->dm
.freesync_module
= NULL
;
1051 mutex_destroy(&adev
->dm
.audio_lock
);
1052 mutex_destroy(&adev
->dm
.dc_lock
);
1057 static int load_dmcu_fw(struct amdgpu_device
*adev
)
1059 const char *fw_name_dmcu
= NULL
;
1061 const struct dmcu_firmware_header_v1_0
*hdr
;
1063 switch(adev
->asic_type
) {
1073 case CHIP_POLARIS11
:
1074 case CHIP_POLARIS10
:
1075 case CHIP_POLARIS12
:
1086 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
1087 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1088 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
1089 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1094 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1098 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1099 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1103 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
1105 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1106 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1107 adev
->dm
.fw_dmcu
= NULL
;
1111 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
1116 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
1118 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1120 release_firmware(adev
->dm
.fw_dmcu
);
1121 adev
->dm
.fw_dmcu
= NULL
;
1125 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
1126 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
1127 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
1128 adev
->firmware
.fw_size
+=
1129 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1131 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
1132 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
1133 adev
->firmware
.fw_size
+=
1134 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1136 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1138 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1143 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx
, uint32_t address
)
1145 struct amdgpu_device
*adev
= ctx
;
1147 return dm_read_reg(adev
->dm
.dc
->ctx
, address
);
1150 static void amdgpu_dm_dmub_reg_write(void *ctx
, uint32_t address
,
1153 struct amdgpu_device
*adev
= ctx
;
1155 return dm_write_reg(adev
->dm
.dc
->ctx
, address
, value
);
1158 static int dm_dmub_sw_init(struct amdgpu_device
*adev
)
1160 struct dmub_srv_create_params create_params
;
1161 struct dmub_srv_region_params region_params
;
1162 struct dmub_srv_region_info region_info
;
1163 struct dmub_srv_fb_params fb_params
;
1164 struct dmub_srv_fb_info
*fb_info
;
1165 struct dmub_srv
*dmub_srv
;
1166 const struct dmcub_firmware_header_v1_0
*hdr
;
1167 const char *fw_name_dmub
;
1168 enum dmub_asic dmub_asic
;
1169 enum dmub_status status
;
1172 switch (adev
->asic_type
) {
1174 dmub_asic
= DMUB_ASIC_DCN21
;
1175 fw_name_dmub
= FIRMWARE_RENOIR_DMUB
;
1179 /* ASIC doesn't support DMUB. */
1183 r
= request_firmware_direct(&adev
->dm
.dmub_fw
, fw_name_dmub
, adev
->dev
);
1185 DRM_ERROR("DMUB firmware loading failed: %d\n", r
);
1189 r
= amdgpu_ucode_validate(adev
->dm
.dmub_fw
);
1191 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r
);
1195 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1196 DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
1200 hdr
= (const struct dmcub_firmware_header_v1_0
*)adev
->dm
.dmub_fw
->data
;
1201 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].ucode_id
=
1202 AMDGPU_UCODE_ID_DMCUB
;
1203 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].fw
= adev
->dm
.dmub_fw
;
1204 adev
->firmware
.fw_size
+=
1205 ALIGN(le32_to_cpu(hdr
->inst_const_bytes
), PAGE_SIZE
);
1207 adev
->dm
.dmcub_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1209 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1210 adev
->dm
.dmcub_fw_version
);
1212 adev
->dm
.dmub_srv
= kzalloc(sizeof(*adev
->dm
.dmub_srv
), GFP_KERNEL
);
1213 dmub_srv
= adev
->dm
.dmub_srv
;
1216 DRM_ERROR("Failed to allocate DMUB service!\n");
1220 memset(&create_params
, 0, sizeof(create_params
));
1221 create_params
.user_ctx
= adev
;
1222 create_params
.funcs
.reg_read
= amdgpu_dm_dmub_reg_read
;
1223 create_params
.funcs
.reg_write
= amdgpu_dm_dmub_reg_write
;
1224 create_params
.asic
= dmub_asic
;
1226 /* Create the DMUB service. */
1227 status
= dmub_srv_create(dmub_srv
, &create_params
);
1228 if (status
!= DMUB_STATUS_OK
) {
1229 DRM_ERROR("Error creating DMUB service: %d\n", status
);
1233 /* Calculate the size of all the regions for the DMUB service. */
1234 memset(®ion_params
, 0, sizeof(region_params
));
1236 region_params
.inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
1237 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
1238 region_params
.bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
1239 region_params
.vbios_size
= adev
->bios_size
;
1240 region_params
.fw_bss_data
=
1241 adev
->dm
.dmub_fw
->data
+
1242 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1243 le32_to_cpu(hdr
->inst_const_bytes
);
1245 status
= dmub_srv_calc_region_info(dmub_srv
, ®ion_params
,
1248 if (status
!= DMUB_STATUS_OK
) {
1249 DRM_ERROR("Error calculating DMUB region info: %d\n", status
);
1254 * Allocate a framebuffer based on the total size of all the regions.
1255 * TODO: Move this into GART.
1257 r
= amdgpu_bo_create_kernel(adev
, region_info
.fb_size
, PAGE_SIZE
,
1258 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->dm
.dmub_bo
,
1259 &adev
->dm
.dmub_bo_gpu_addr
,
1260 &adev
->dm
.dmub_bo_cpu_addr
);
1264 /* Rebase the regions on the framebuffer address. */
1265 memset(&fb_params
, 0, sizeof(fb_params
));
1266 fb_params
.cpu_addr
= adev
->dm
.dmub_bo_cpu_addr
;
1267 fb_params
.gpu_addr
= adev
->dm
.dmub_bo_gpu_addr
;
1268 fb_params
.region_info
= ®ion_info
;
1270 adev
->dm
.dmub_fb_info
=
1271 kzalloc(sizeof(*adev
->dm
.dmub_fb_info
), GFP_KERNEL
);
1272 fb_info
= adev
->dm
.dmub_fb_info
;
1276 "Failed to allocate framebuffer info for DMUB service!\n");
1280 status
= dmub_srv_calc_fb_info(dmub_srv
, &fb_params
, fb_info
);
1281 if (status
!= DMUB_STATUS_OK
) {
1282 DRM_ERROR("Error calculating DMUB FB info: %d\n", status
);
1289 static int dm_sw_init(void *handle
)
1291 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1294 r
= dm_dmub_sw_init(adev
);
1298 return load_dmcu_fw(adev
);
1301 static int dm_sw_fini(void *handle
)
1303 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1305 kfree(adev
->dm
.dmub_fb_info
);
1306 adev
->dm
.dmub_fb_info
= NULL
;
1308 if (adev
->dm
.dmub_srv
) {
1309 dmub_srv_destroy(adev
->dm
.dmub_srv
);
1310 adev
->dm
.dmub_srv
= NULL
;
1313 if (adev
->dm
.dmub_fw
) {
1314 release_firmware(adev
->dm
.dmub_fw
);
1315 adev
->dm
.dmub_fw
= NULL
;
1318 if(adev
->dm
.fw_dmcu
) {
1319 release_firmware(adev
->dm
.fw_dmcu
);
1320 adev
->dm
.fw_dmcu
= NULL
;
1326 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
1328 struct amdgpu_dm_connector
*aconnector
;
1329 struct drm_connector
*connector
;
1330 struct drm_connector_list_iter iter
;
1333 drm_connector_list_iter_begin(dev
, &iter
);
1334 drm_for_each_connector_iter(connector
, &iter
) {
1335 aconnector
= to_amdgpu_dm_connector(connector
);
1336 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
1337 aconnector
->mst_mgr
.aux
) {
1338 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1340 aconnector
->base
.base
.id
);
1342 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
1344 DRM_ERROR("DM_MST: Failed to start MST\n");
1345 aconnector
->dc_link
->type
=
1346 dc_connection_single
;
1351 drm_connector_list_iter_end(&iter
);
1356 static int dm_late_init(void *handle
)
1358 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1360 struct dmcu_iram_parameters params
;
1361 unsigned int linear_lut
[16];
1363 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
1366 for (i
= 0; i
< 16; i
++)
1367 linear_lut
[i
] = 0xFFFF * i
/ 15;
1370 params
.backlight_ramping_start
= 0xCCCC;
1371 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
1372 params
.backlight_lut_array_size
= 16;
1373 params
.backlight_lut_array
= linear_lut
;
1375 /* Min backlight level after ABM reduction, Don't allow below 1%
1376 * 0xFFFF x 0.01 = 0x28F
1378 params
.min_abm_backlight
= 0x28F;
1380 /* todo will enable for navi10 */
1381 if (adev
->asic_type
<= CHIP_RAVEN
) {
1382 ret
= dmcu_load_iram(dmcu
, params
);
1388 return detect_mst_link_for_all_connectors(adev
->ddev
);
1391 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1393 struct amdgpu_dm_connector
*aconnector
;
1394 struct drm_connector
*connector
;
1395 struct drm_connector_list_iter iter
;
1396 struct drm_dp_mst_topology_mgr
*mgr
;
1398 bool need_hotplug
= false;
1400 drm_connector_list_iter_begin(dev
, &iter
);
1401 drm_for_each_connector_iter(connector
, &iter
) {
1402 aconnector
= to_amdgpu_dm_connector(connector
);
1403 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1404 aconnector
->mst_port
)
1407 mgr
= &aconnector
->mst_mgr
;
1410 drm_dp_mst_topology_mgr_suspend(mgr
);
1412 ret
= drm_dp_mst_topology_mgr_resume(mgr
, true);
1414 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1415 need_hotplug
= true;
1419 drm_connector_list_iter_end(&iter
);
1422 drm_kms_helper_hotplug_event(dev
);
1426 * dm_hw_init() - Initialize DC device
1427 * @handle: The base driver device containing the amdgpu_dm device.
1429 * Initialize the &struct amdgpu_display_manager device. This involves calling
1430 * the initializers of each DM component, then populating the struct with them.
1432 * Although the function implies hardware initialization, both hardware and
1433 * software are initialized here. Splitting them out to their relevant init
1434 * hooks is a future TODO item.
1436 * Some notable things that are initialized here:
1438 * - Display Core, both software and hardware
1439 * - DC modules that we need (freesync and color management)
1440 * - DRM software states
1441 * - Interrupt sources and handlers
1443 * - Debug FS entries, if enabled
1445 static int dm_hw_init(void *handle
)
1447 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1448 /* Create DAL display manager */
1449 amdgpu_dm_init(adev
);
1450 amdgpu_dm_hpd_init(adev
);
1456 * dm_hw_fini() - Teardown DC device
1457 * @handle: The base driver device containing the amdgpu_dm device.
1459 * Teardown components within &struct amdgpu_display_manager that require
1460 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1461 * were loaded. Also flush IRQ workqueues and disable them.
1463 static int dm_hw_fini(void *handle
)
1465 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1467 amdgpu_dm_hpd_fini(adev
);
1469 amdgpu_dm_irq_fini(adev
);
1470 amdgpu_dm_fini(adev
);
1474 static int dm_suspend(void *handle
)
1476 struct amdgpu_device
*adev
= handle
;
1477 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1480 WARN_ON(adev
->dm
.cached_state
);
1481 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1483 s3_handle_mst(adev
->ddev
, true);
1485 amdgpu_dm_irq_suspend(adev
);
1488 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1493 static struct amdgpu_dm_connector
*
1494 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1495 struct drm_crtc
*crtc
)
1498 struct drm_connector_state
*new_con_state
;
1499 struct drm_connector
*connector
;
1500 struct drm_crtc
*crtc_from_state
;
1502 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1503 crtc_from_state
= new_con_state
->crtc
;
1505 if (crtc_from_state
== crtc
)
1506 return to_amdgpu_dm_connector(connector
);
1512 static void emulated_link_detect(struct dc_link
*link
)
1514 struct dc_sink_init_data sink_init_data
= { 0 };
1515 struct display_sink_capability sink_caps
= { 0 };
1516 enum dc_edid_status edid_status
;
1517 struct dc_context
*dc_ctx
= link
->ctx
;
1518 struct dc_sink
*sink
= NULL
;
1519 struct dc_sink
*prev_sink
= NULL
;
1521 link
->type
= dc_connection_none
;
1522 prev_sink
= link
->local_sink
;
1524 if (prev_sink
!= NULL
)
1525 dc_sink_retain(prev_sink
);
1527 switch (link
->connector_signal
) {
1528 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1529 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1530 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1534 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1535 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1536 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1540 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1541 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1542 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1546 case SIGNAL_TYPE_LVDS
: {
1547 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1548 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1552 case SIGNAL_TYPE_EDP
: {
1553 sink_caps
.transaction_type
=
1554 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1555 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1559 case SIGNAL_TYPE_DISPLAY_PORT
: {
1560 sink_caps
.transaction_type
=
1561 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1562 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1567 DC_ERROR("Invalid connector type! signal:%d\n",
1568 link
->connector_signal
);
1572 sink_init_data
.link
= link
;
1573 sink_init_data
.sink_signal
= sink_caps
.signal
;
1575 sink
= dc_sink_create(&sink_init_data
);
1577 DC_ERROR("Failed to create sink!\n");
1581 /* dc_sink_create returns a new reference */
1582 link
->local_sink
= sink
;
1584 edid_status
= dm_helpers_read_local_edid(
1589 if (edid_status
!= EDID_OK
)
1590 DC_ERROR("Failed to read EDID");
1594 static int dm_resume(void *handle
)
1596 struct amdgpu_device
*adev
= handle
;
1597 struct drm_device
*ddev
= adev
->ddev
;
1598 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1599 struct amdgpu_dm_connector
*aconnector
;
1600 struct drm_connector
*connector
;
1601 struct drm_connector_list_iter iter
;
1602 struct drm_crtc
*crtc
;
1603 struct drm_crtc_state
*new_crtc_state
;
1604 struct dm_crtc_state
*dm_new_crtc_state
;
1605 struct drm_plane
*plane
;
1606 struct drm_plane_state
*new_plane_state
;
1607 struct dm_plane_state
*dm_new_plane_state
;
1608 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1609 enum dc_connection_type new_connection_type
= dc_connection_none
;
1612 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1613 dc_release_state(dm_state
->context
);
1614 dm_state
->context
= dc_create_state(dm
->dc
);
1615 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1616 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1618 /* Before powering on DC we need to re-initialize DMUB. */
1619 r
= dm_dmub_hw_init(adev
);
1621 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1623 /* power on hardware */
1624 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1626 /* program HPD filter */
1630 * early enable HPD Rx IRQ, should be done before set mode as short
1631 * pulse interrupts are used for MST
1633 amdgpu_dm_irq_resume_early(adev
);
1635 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1636 s3_handle_mst(ddev
, false);
1639 drm_connector_list_iter_begin(ddev
, &iter
);
1640 drm_for_each_connector_iter(connector
, &iter
) {
1641 aconnector
= to_amdgpu_dm_connector(connector
);
1644 * this is the case when traversing through already created
1645 * MST connectors, should be skipped
1647 if (aconnector
->mst_port
)
1650 mutex_lock(&aconnector
->hpd_lock
);
1651 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1652 DRM_ERROR("KMS: Failed to detect connector\n");
1654 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1655 emulated_link_detect(aconnector
->dc_link
);
1657 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1659 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1660 aconnector
->fake_enable
= false;
1662 if (aconnector
->dc_sink
)
1663 dc_sink_release(aconnector
->dc_sink
);
1664 aconnector
->dc_sink
= NULL
;
1665 amdgpu_dm_update_connector_after_detect(aconnector
);
1666 mutex_unlock(&aconnector
->hpd_lock
);
1668 drm_connector_list_iter_end(&iter
);
1670 /* Force mode set in atomic commit */
1671 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1672 new_crtc_state
->active_changed
= true;
1675 * atomic_check is expected to create the dc states. We need to release
1676 * them here, since they were duplicated as part of the suspend
1679 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1680 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1681 if (dm_new_crtc_state
->stream
) {
1682 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1683 dc_stream_release(dm_new_crtc_state
->stream
);
1684 dm_new_crtc_state
->stream
= NULL
;
1688 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1689 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1690 if (dm_new_plane_state
->dc_state
) {
1691 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1692 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1693 dm_new_plane_state
->dc_state
= NULL
;
1697 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1699 dm
->cached_state
= NULL
;
1701 amdgpu_dm_irq_resume_late(adev
);
1709 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1710 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1711 * the base driver's device list to be initialized and torn down accordingly.
1713 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1716 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1718 .early_init
= dm_early_init
,
1719 .late_init
= dm_late_init
,
1720 .sw_init
= dm_sw_init
,
1721 .sw_fini
= dm_sw_fini
,
1722 .hw_init
= dm_hw_init
,
1723 .hw_fini
= dm_hw_fini
,
1724 .suspend
= dm_suspend
,
1725 .resume
= dm_resume
,
1726 .is_idle
= dm_is_idle
,
1727 .wait_for_idle
= dm_wait_for_idle
,
1728 .check_soft_reset
= dm_check_soft_reset
,
1729 .soft_reset
= dm_soft_reset
,
1730 .set_clockgating_state
= dm_set_clockgating_state
,
1731 .set_powergating_state
= dm_set_powergating_state
,
1734 const struct amdgpu_ip_block_version dm_ip_block
=
1736 .type
= AMD_IP_BLOCK_TYPE_DCE
,
1740 .funcs
= &amdgpu_dm_funcs
,
1750 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
1751 .fb_create
= amdgpu_display_user_framebuffer_create
,
1752 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1753 .atomic_check
= amdgpu_dm_atomic_check
,
1754 .atomic_commit
= amdgpu_dm_atomic_commit
,
1757 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
1758 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
1762 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
1764 struct drm_connector
*connector
= &aconnector
->base
;
1765 struct drm_device
*dev
= connector
->dev
;
1766 struct dc_sink
*sink
;
1768 /* MST handled by drm_mst framework */
1769 if (aconnector
->mst_mgr
.mst_state
== true)
1773 sink
= aconnector
->dc_link
->local_sink
;
1775 dc_sink_retain(sink
);
1778 * Edid mgmt connector gets first update only in mode_valid hook and then
1779 * the connector sink is set to either fake or physical sink depends on link status.
1780 * Skip if already done during boot.
1782 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1783 && aconnector
->dc_em_sink
) {
1786 * For S3 resume with headless use eml_sink to fake stream
1787 * because on resume connector->sink is set to NULL
1789 mutex_lock(&dev
->mode_config
.mutex
);
1792 if (aconnector
->dc_sink
) {
1793 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1795 * retain and release below are used to
1796 * bump up refcount for sink because the link doesn't point
1797 * to it anymore after disconnect, so on next crtc to connector
1798 * reshuffle by UMD we will get into unwanted dc_sink release
1800 dc_sink_release(aconnector
->dc_sink
);
1802 aconnector
->dc_sink
= sink
;
1803 dc_sink_retain(aconnector
->dc_sink
);
1804 amdgpu_dm_update_freesync_caps(connector
,
1807 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1808 if (!aconnector
->dc_sink
) {
1809 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1810 dc_sink_retain(aconnector
->dc_sink
);
1814 mutex_unlock(&dev
->mode_config
.mutex
);
1817 dc_sink_release(sink
);
1822 * TODO: temporary guard to look for proper fix
1823 * if this sink is MST sink, we should not do anything
1825 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
1826 dc_sink_release(sink
);
1830 if (aconnector
->dc_sink
== sink
) {
1832 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1835 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1836 aconnector
->connector_id
);
1838 dc_sink_release(sink
);
1842 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1843 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1845 mutex_lock(&dev
->mode_config
.mutex
);
1848 * 1. Update status of the drm connector
1849 * 2. Send an event and let userspace tell us what to do
1853 * TODO: check if we still need the S3 mode update workaround.
1854 * If yes, put it here.
1856 if (aconnector
->dc_sink
)
1857 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1859 aconnector
->dc_sink
= sink
;
1860 dc_sink_retain(aconnector
->dc_sink
);
1861 if (sink
->dc_edid
.length
== 0) {
1862 aconnector
->edid
= NULL
;
1863 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1866 (struct edid
*) sink
->dc_edid
.raw_edid
;
1869 drm_connector_update_edid_property(connector
,
1871 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
1874 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
1877 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1878 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1879 drm_connector_update_edid_property(connector
, NULL
);
1880 aconnector
->num_modes
= 0;
1881 dc_sink_release(aconnector
->dc_sink
);
1882 aconnector
->dc_sink
= NULL
;
1883 aconnector
->edid
= NULL
;
1884 #ifdef CONFIG_DRM_AMD_DC_HDCP
1885 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1886 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
1887 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1891 mutex_unlock(&dev
->mode_config
.mutex
);
1894 dc_sink_release(sink
);
1897 static void handle_hpd_irq(void *param
)
1899 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1900 struct drm_connector
*connector
= &aconnector
->base
;
1901 struct drm_device
*dev
= connector
->dev
;
1902 enum dc_connection_type new_connection_type
= dc_connection_none
;
1903 #ifdef CONFIG_DRM_AMD_DC_HDCP
1904 struct amdgpu_device
*adev
= dev
->dev_private
;
1908 * In case of failure or MST no need to update connector status or notify the OS
1909 * since (for MST case) MST does this in its own context.
1911 mutex_lock(&aconnector
->hpd_lock
);
1913 #ifdef CONFIG_DRM_AMD_DC_HDCP
1914 if (adev
->asic_type
>= CHIP_RAVEN
)
1915 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
1917 if (aconnector
->fake_enable
)
1918 aconnector
->fake_enable
= false;
1920 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1921 DRM_ERROR("KMS: Failed to detect connector\n");
1923 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1924 emulated_link_detect(aconnector
->dc_link
);
1927 drm_modeset_lock_all(dev
);
1928 dm_restore_drm_connector_state(dev
, connector
);
1929 drm_modeset_unlock_all(dev
);
1931 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1932 drm_kms_helper_hotplug_event(dev
);
1934 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
1935 amdgpu_dm_update_connector_after_detect(aconnector
);
1938 drm_modeset_lock_all(dev
);
1939 dm_restore_drm_connector_state(dev
, connector
);
1940 drm_modeset_unlock_all(dev
);
1942 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1943 drm_kms_helper_hotplug_event(dev
);
1945 mutex_unlock(&aconnector
->hpd_lock
);
1949 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
1951 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
1953 bool new_irq_handled
= false;
1955 int dpcd_bytes_to_read
;
1957 const int max_process_count
= 30;
1958 int process_count
= 0;
1960 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
1962 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
1963 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
1964 /* DPCD 0x200 - 0x201 for downstream IRQ */
1965 dpcd_addr
= DP_SINK_COUNT
;
1967 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
1968 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1969 dpcd_addr
= DP_SINK_COUNT_ESI
;
1972 dret
= drm_dp_dpcd_read(
1973 &aconnector
->dm_dp_aux
.aux
,
1976 dpcd_bytes_to_read
);
1978 while (dret
== dpcd_bytes_to_read
&&
1979 process_count
< max_process_count
) {
1985 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
1986 /* handle HPD short pulse irq */
1987 if (aconnector
->mst_mgr
.mst_state
)
1989 &aconnector
->mst_mgr
,
1993 if (new_irq_handled
) {
1994 /* ACK at DPCD to notify down stream */
1995 const int ack_dpcd_bytes_to_write
=
1996 dpcd_bytes_to_read
- 1;
1998 for (retry
= 0; retry
< 3; retry
++) {
2001 wret
= drm_dp_dpcd_write(
2002 &aconnector
->dm_dp_aux
.aux
,
2005 ack_dpcd_bytes_to_write
);
2006 if (wret
== ack_dpcd_bytes_to_write
)
2010 /* check if there is new irq to be handled */
2011 dret
= drm_dp_dpcd_read(
2012 &aconnector
->dm_dp_aux
.aux
,
2015 dpcd_bytes_to_read
);
2017 new_irq_handled
= false;
2023 if (process_count
== max_process_count
)
2024 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2027 static void handle_hpd_rx_irq(void *param
)
2029 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2030 struct drm_connector
*connector
= &aconnector
->base
;
2031 struct drm_device
*dev
= connector
->dev
;
2032 struct dc_link
*dc_link
= aconnector
->dc_link
;
2033 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
2034 enum dc_connection_type new_connection_type
= dc_connection_none
;
2035 #ifdef CONFIG_DRM_AMD_DC_HDCP
2036 union hpd_irq_data hpd_irq_data
;
2037 struct amdgpu_device
*adev
= dev
->dev_private
;
2039 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
2043 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2044 * conflict, after implement i2c helper, this mutex should be
2047 if (dc_link
->type
!= dc_connection_mst_branch
)
2048 mutex_lock(&aconnector
->hpd_lock
);
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
2054 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
2056 !is_mst_root_connector
) {
2057 /* Downstream Port status changed. */
2058 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
2059 DRM_ERROR("KMS: Failed to detect connector\n");
2061 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2062 emulated_link_detect(dc_link
);
2064 if (aconnector
->fake_enable
)
2065 aconnector
->fake_enable
= false;
2067 amdgpu_dm_update_connector_after_detect(aconnector
);
2070 drm_modeset_lock_all(dev
);
2071 dm_restore_drm_connector_state(dev
, connector
);
2072 drm_modeset_unlock_all(dev
);
2074 drm_kms_helper_hotplug_event(dev
);
2075 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
2077 if (aconnector
->fake_enable
)
2078 aconnector
->fake_enable
= false;
2080 amdgpu_dm_update_connector_after_detect(aconnector
);
2083 drm_modeset_lock_all(dev
);
2084 dm_restore_drm_connector_state(dev
, connector
);
2085 drm_modeset_unlock_all(dev
);
2087 drm_kms_helper_hotplug_event(dev
);
2090 #ifdef CONFIG_DRM_AMD_DC_HDCP
2091 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
)
2092 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
2094 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
2095 (dc_link
->type
== dc_connection_mst_branch
))
2096 dm_handle_hpd_rx_irq(aconnector
);
2098 if (dc_link
->type
!= dc_connection_mst_branch
) {
2099 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
2100 mutex_unlock(&aconnector
->hpd_lock
);
2104 static void register_hpd_handlers(struct amdgpu_device
*adev
)
2106 struct drm_device
*dev
= adev
->ddev
;
2107 struct drm_connector
*connector
;
2108 struct amdgpu_dm_connector
*aconnector
;
2109 const struct dc_link
*dc_link
;
2110 struct dc_interrupt_params int_params
= {0};
2112 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2113 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2115 list_for_each_entry(connector
,
2116 &dev
->mode_config
.connector_list
, head
) {
2118 aconnector
= to_amdgpu_dm_connector(connector
);
2119 dc_link
= aconnector
->dc_link
;
2121 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
2122 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2123 int_params
.irq_source
= dc_link
->irq_source_hpd
;
2125 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2127 (void *) aconnector
);
2130 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
2132 /* Also register for DP short pulse (hpd_rx). */
2133 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2134 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
2136 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2138 (void *) aconnector
);
2143 /* Register IRQ sources and initialize IRQ callbacks */
2144 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
2146 struct dc
*dc
= adev
->dm
.dc
;
2147 struct common_irq_params
*c_irq_params
;
2148 struct dc_interrupt_params int_params
= {0};
2151 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2153 if (adev
->asic_type
>= CHIP_VEGA10
)
2154 client_id
= SOC15_IH_CLIENTID_DCE
;
2156 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2157 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2160 * Actions of amdgpu_irq_add_id():
2161 * 1. Register a set() function with base driver.
2162 * Base driver will call set() function to enable/disable an
2163 * interrupt in DC hardware.
2164 * 2. Register amdgpu_dm_irq_handler().
2165 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2166 * coming from DC hardware.
2167 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2168 * for acknowledging and handling. */
2170 /* Use VBLANK interrupt */
2171 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
2172 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
2174 DRM_ERROR("Failed to add crtc irq id!\n");
2178 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2179 int_params
.irq_source
=
2180 dc_interrupt_to_irq_source(dc
, i
, 0);
2182 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2184 c_irq_params
->adev
= adev
;
2185 c_irq_params
->irq_src
= int_params
.irq_source
;
2187 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2188 dm_crtc_high_irq
, c_irq_params
);
2191 /* Use VUPDATE interrupt */
2192 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
2193 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
2195 DRM_ERROR("Failed to add vupdate irq id!\n");
2199 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2200 int_params
.irq_source
=
2201 dc_interrupt_to_irq_source(dc
, i
, 0);
2203 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2205 c_irq_params
->adev
= adev
;
2206 c_irq_params
->irq_src
= int_params
.irq_source
;
2208 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2209 dm_vupdate_high_irq
, c_irq_params
);
2212 /* Use GRPH_PFLIP interrupt */
2213 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2214 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2215 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2217 DRM_ERROR("Failed to add page flip irq id!\n");
2221 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2222 int_params
.irq_source
=
2223 dc_interrupt_to_irq_source(dc
, i
, 0);
2225 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2227 c_irq_params
->adev
= adev
;
2228 c_irq_params
->irq_src
= int_params
.irq_source
;
2230 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2231 dm_pflip_high_irq
, c_irq_params
);
2236 r
= amdgpu_irq_add_id(adev
, client_id
,
2237 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2239 DRM_ERROR("Failed to add hpd irq id!\n");
2243 register_hpd_handlers(adev
);
2248 #if defined(CONFIG_DRM_AMD_DC_DCN)
2249 /* Register IRQ sources and initialize IRQ callbacks */
2250 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
2252 struct dc
*dc
= adev
->dm
.dc
;
2253 struct common_irq_params
*c_irq_params
;
2254 struct dc_interrupt_params int_params
= {0};
2258 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2259 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2262 * Actions of amdgpu_irq_add_id():
2263 * 1. Register a set() function with base driver.
2264 * Base driver will call set() function to enable/disable an
2265 * interrupt in DC hardware.
2266 * 2. Register amdgpu_dm_irq_handler().
2267 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2268 * coming from DC hardware.
2269 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2270 * for acknowledging and handling.
2273 /* Use VSTARTUP interrupt */
2274 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
2275 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
2277 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
2280 DRM_ERROR("Failed to add crtc irq id!\n");
2284 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2285 int_params
.irq_source
=
2286 dc_interrupt_to_irq_source(dc
, i
, 0);
2288 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2290 c_irq_params
->adev
= adev
;
2291 c_irq_params
->irq_src
= int_params
.irq_source
;
2293 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2294 dm_dcn_crtc_high_irq
, c_irq_params
);
2297 /* Use GRPH_PFLIP interrupt */
2298 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
2299 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2301 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
2303 DRM_ERROR("Failed to add page flip irq id!\n");
2307 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2308 int_params
.irq_source
=
2309 dc_interrupt_to_irq_source(dc
, i
, 0);
2311 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2313 c_irq_params
->adev
= adev
;
2314 c_irq_params
->irq_src
= int_params
.irq_source
;
2316 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2317 dm_pflip_high_irq
, c_irq_params
);
2322 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
2325 DRM_ERROR("Failed to add hpd irq id!\n");
2329 register_hpd_handlers(adev
);
2336 * Acquires the lock for the atomic state object and returns
2337 * the new atomic state.
2339 * This should only be called during atomic check.
2341 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
2342 struct dm_atomic_state
**dm_state
)
2344 struct drm_device
*dev
= state
->dev
;
2345 struct amdgpu_device
*adev
= dev
->dev_private
;
2346 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2347 struct drm_private_state
*priv_state
;
2352 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
2353 if (IS_ERR(priv_state
))
2354 return PTR_ERR(priv_state
);
2356 *dm_state
= to_dm_atomic_state(priv_state
);
2361 struct dm_atomic_state
*
2362 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2364 struct drm_device
*dev
= state
->dev
;
2365 struct amdgpu_device
*adev
= dev
->dev_private
;
2366 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2367 struct drm_private_obj
*obj
;
2368 struct drm_private_state
*new_obj_state
;
2371 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2372 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2373 return to_dm_atomic_state(new_obj_state
);
2379 struct dm_atomic_state
*
2380 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2382 struct drm_device
*dev
= state
->dev
;
2383 struct amdgpu_device
*adev
= dev
->dev_private
;
2384 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2385 struct drm_private_obj
*obj
;
2386 struct drm_private_state
*old_obj_state
;
2389 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2390 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2391 return to_dm_atomic_state(old_obj_state
);
2397 static struct drm_private_state
*
2398 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2400 struct dm_atomic_state
*old_state
, *new_state
;
2402 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2406 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2408 old_state
= to_dm_atomic_state(obj
->state
);
2410 if (old_state
&& old_state
->context
)
2411 new_state
->context
= dc_copy_state(old_state
->context
);
2413 if (!new_state
->context
) {
2418 return &new_state
->base
;
2421 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2422 struct drm_private_state
*state
)
2424 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2426 if (dm_state
&& dm_state
->context
)
2427 dc_release_state(dm_state
->context
);
2432 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2433 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2434 .atomic_destroy_state
= dm_atomic_destroy_state
,
2437 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2439 struct dm_atomic_state
*state
;
2442 adev
->mode_info
.mode_config_initialized
= true;
2444 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2445 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2447 adev
->ddev
->mode_config
.max_width
= 16384;
2448 adev
->ddev
->mode_config
.max_height
= 16384;
2450 adev
->ddev
->mode_config
.preferred_depth
= 24;
2451 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2452 /* indicates support for immediate flip */
2453 adev
->ddev
->mode_config
.async_page_flip
= true;
2455 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2457 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2461 state
->context
= dc_create_state(adev
->dm
.dc
);
2462 if (!state
->context
) {
2467 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2469 drm_atomic_private_obj_init(adev
->ddev
,
2470 &adev
->dm
.atomic_obj
,
2472 &dm_atomic_state_funcs
);
2474 r
= amdgpu_display_modeset_create_props(adev
);
2478 r
= amdgpu_dm_audio_init(adev
);
2485 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2486 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2488 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2489 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2491 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2493 #if defined(CONFIG_ACPI)
2494 struct amdgpu_dm_backlight_caps caps
;
2496 if (dm
->backlight_caps
.caps_valid
)
2499 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2500 if (caps
.caps_valid
) {
2501 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2502 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2503 dm
->backlight_caps
.caps_valid
= true;
2505 dm
->backlight_caps
.min_input_signal
=
2506 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2507 dm
->backlight_caps
.max_input_signal
=
2508 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2511 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2512 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2516 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
2518 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2519 struct amdgpu_dm_backlight_caps caps
;
2520 uint32_t brightness
= bd
->props
.brightness
;
2522 amdgpu_dm_update_backlight_caps(dm
);
2523 caps
= dm
->backlight_caps
;
2525 * The brightness input is in the range 0-255
2526 * It needs to be rescaled to be between the
2527 * requested min and max input signal
2529 * It also needs to be scaled up by 0x101 to
2530 * match the DC interface which has a range of
2536 * (caps
.max_input_signal
- caps
.min_input_signal
)
2537 / AMDGPU_MAX_BL_LEVEL
2538 + caps
.min_input_signal
* 0x101;
2540 if (dc_link_set_backlight_level(dm
->backlight_link
,
2547 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
2549 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2550 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
2552 if (ret
== DC_ERROR_UNEXPECTED
)
2553 return bd
->props
.brightness
;
2557 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
2558 .options
= BL_CORE_SUSPENDRESUME
,
2559 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
2560 .update_status
= amdgpu_dm_backlight_update_status
,
2564 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
2567 struct backlight_properties props
= { 0 };
2569 amdgpu_dm_update_backlight_caps(dm
);
2571 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
2572 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
2573 props
.type
= BACKLIGHT_RAW
;
2575 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
2576 dm
->adev
->ddev
->primary
->index
);
2578 dm
->backlight_dev
= backlight_device_register(bl_name
,
2579 dm
->adev
->ddev
->dev
,
2581 &amdgpu_dm_backlight_ops
,
2584 if (IS_ERR(dm
->backlight_dev
))
2585 DRM_ERROR("DM: Backlight registration failed!\n");
2587 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
2592 static int initialize_plane(struct amdgpu_display_manager
*dm
,
2593 struct amdgpu_mode_info
*mode_info
, int plane_id
,
2594 enum drm_plane_type plane_type
,
2595 const struct dc_plane_cap
*plane_cap
)
2597 struct drm_plane
*plane
;
2598 unsigned long possible_crtcs
;
2601 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
2603 DRM_ERROR("KMS: Failed to allocate plane\n");
2606 plane
->type
= plane_type
;
2609 * HACK: IGT tests expect that the primary plane for a CRTC
2610 * can only have one possible CRTC. Only expose support for
2611 * any CRTC if they're not going to be used as a primary plane
2612 * for a CRTC - like overlay or underlay planes.
2614 possible_crtcs
= 1 << plane_id
;
2615 if (plane_id
>= dm
->dc
->caps
.max_streams
)
2616 possible_crtcs
= 0xff;
2618 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
2621 DRM_ERROR("KMS: Failed to initialize plane\n");
2627 mode_info
->planes
[plane_id
] = plane
;
2633 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
2634 struct dc_link
*link
)
2636 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2637 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2639 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2640 link
->type
!= dc_connection_none
) {
2642 * Event if registration failed, we should continue with
2643 * DM initialization because not having a backlight control
2644 * is better then a black screen.
2646 amdgpu_dm_register_backlight_device(dm
);
2648 if (dm
->backlight_dev
)
2649 dm
->backlight_link
= link
;
2656 * In this architecture, the association
2657 * connector -> encoder -> crtc
2658 * id not really requried. The crtc and connector will hold the
2659 * display_index as an abstraction to use with DAL component
2661 * Returns 0 on success
2663 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
2665 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2667 struct amdgpu_dm_connector
*aconnector
= NULL
;
2668 struct amdgpu_encoder
*aencoder
= NULL
;
2669 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2671 int32_t primary_planes
;
2672 enum dc_connection_type new_connection_type
= dc_connection_none
;
2673 const struct dc_plane_cap
*plane
;
2675 link_cnt
= dm
->dc
->caps
.max_links
;
2676 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
2677 DRM_ERROR("DM: Failed to initialize mode config\n");
2681 /* There is one primary plane per CRTC */
2682 primary_planes
= dm
->dc
->caps
.max_streams
;
2683 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
2686 * Initialize primary planes, implicit planes for legacy IOCTLS.
2687 * Order is reversed to match iteration order in atomic check.
2689 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
2690 plane
= &dm
->dc
->caps
.planes
[i
];
2692 if (initialize_plane(dm
, mode_info
, i
,
2693 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
2694 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2700 * Initialize overlay planes, index starting after primary planes.
2701 * These planes have a higher DRM index than the primary planes since
2702 * they should be considered as having a higher z-order.
2703 * Order is reversed to match iteration order in atomic check.
2705 * Only support DCN for now, and only expose one so we don't encourage
2706 * userspace to use up all the pipes.
2708 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
2709 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
2711 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
2714 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
2717 if (!plane
->pixel_format_support
.argb8888
)
2720 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
2721 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
2722 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2726 /* Only create one overlay plane. */
2730 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
2731 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
2732 DRM_ERROR("KMS: Failed to initialize crtc\n");
2736 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
2738 /* loops over all connectors on the board */
2739 for (i
= 0; i
< link_cnt
; i
++) {
2740 struct dc_link
*link
= NULL
;
2742 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
2744 "KMS: Cannot support more than %d display indexes\n",
2745 AMDGPU_DM_MAX_DISPLAY_INDEX
);
2749 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
2753 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
2757 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
2758 DRM_ERROR("KMS: Failed to initialize encoder\n");
2762 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
2763 DRM_ERROR("KMS: Failed to initialize connector\n");
2767 link
= dc_get_link_at_index(dm
->dc
, i
);
2769 if (!dc_link_detect_sink(link
, &new_connection_type
))
2770 DRM_ERROR("KMS: Failed to detect connector\n");
2772 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2773 emulated_link_detect(link
);
2774 amdgpu_dm_update_connector_after_detect(aconnector
);
2776 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
2777 amdgpu_dm_update_connector_after_detect(aconnector
);
2778 register_backlight_device(dm
, link
);
2779 if (amdgpu_dc_feature_mask
& DC_PSR_MASK
)
2780 amdgpu_dm_set_psr_caps(link
);
2786 /* Software is initialized. Now we can register interrupt handlers. */
2787 switch (adev
->asic_type
) {
2797 case CHIP_POLARIS11
:
2798 case CHIP_POLARIS10
:
2799 case CHIP_POLARIS12
:
2804 if (dce110_register_irq_handlers(dm
->adev
)) {
2805 DRM_ERROR("DM: Failed to initialize IRQ\n");
2809 #if defined(CONFIG_DRM_AMD_DC_DCN)
2815 if (dcn10_register_irq_handlers(dm
->adev
)) {
2816 DRM_ERROR("DM: Failed to initialize IRQ\n");
2822 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2826 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
2827 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
2837 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
2839 drm_mode_config_cleanup(dm
->ddev
);
2840 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
2844 /******************************************************************************
2845 * amdgpu_display_funcs functions
2846 *****************************************************************************/
2849 * dm_bandwidth_update - program display watermarks
2851 * @adev: amdgpu_device pointer
2853 * Calculate and program the display watermarks and line buffer allocation.
2855 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
2857 /* TODO: implement later */
2860 static const struct amdgpu_display_funcs dm_display_funcs
= {
2861 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
2862 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
2863 .backlight_set_level
= NULL
, /* never called for DC */
2864 .backlight_get_level
= NULL
, /* never called for DC */
2865 .hpd_sense
= NULL
,/* called unconditionally */
2866 .hpd_set_polarity
= NULL
, /* called unconditionally */
2867 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
2868 .page_flip_get_scanoutpos
=
2869 dm_crtc_get_scanoutpos
,/* called unconditionally */
2870 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
2871 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
2874 #if defined(CONFIG_DEBUG_KERNEL_DC)
2876 static ssize_t
s3_debug_store(struct device
*device
,
2877 struct device_attribute
*attr
,
2883 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
2884 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
2886 ret
= kstrtoint(buf
, 0, &s3_state
);
2891 drm_kms_helper_hotplug_event(adev
->ddev
);
2896 return ret
== 0 ? count
: 0;
2899 DEVICE_ATTR_WO(s3_debug
);
2903 static int dm_early_init(void *handle
)
2905 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2907 switch (adev
->asic_type
) {
2910 adev
->mode_info
.num_crtc
= 6;
2911 adev
->mode_info
.num_hpd
= 6;
2912 adev
->mode_info
.num_dig
= 6;
2915 adev
->mode_info
.num_crtc
= 4;
2916 adev
->mode_info
.num_hpd
= 6;
2917 adev
->mode_info
.num_dig
= 7;
2921 adev
->mode_info
.num_crtc
= 2;
2922 adev
->mode_info
.num_hpd
= 6;
2923 adev
->mode_info
.num_dig
= 6;
2927 adev
->mode_info
.num_crtc
= 6;
2928 adev
->mode_info
.num_hpd
= 6;
2929 adev
->mode_info
.num_dig
= 7;
2932 adev
->mode_info
.num_crtc
= 3;
2933 adev
->mode_info
.num_hpd
= 6;
2934 adev
->mode_info
.num_dig
= 9;
2937 adev
->mode_info
.num_crtc
= 2;
2938 adev
->mode_info
.num_hpd
= 6;
2939 adev
->mode_info
.num_dig
= 9;
2941 case CHIP_POLARIS11
:
2942 case CHIP_POLARIS12
:
2943 adev
->mode_info
.num_crtc
= 5;
2944 adev
->mode_info
.num_hpd
= 5;
2945 adev
->mode_info
.num_dig
= 5;
2947 case CHIP_POLARIS10
:
2949 adev
->mode_info
.num_crtc
= 6;
2950 adev
->mode_info
.num_hpd
= 6;
2951 adev
->mode_info
.num_dig
= 6;
2956 adev
->mode_info
.num_crtc
= 6;
2957 adev
->mode_info
.num_hpd
= 6;
2958 adev
->mode_info
.num_dig
= 6;
2960 #if defined(CONFIG_DRM_AMD_DC_DCN)
2962 adev
->mode_info
.num_crtc
= 4;
2963 adev
->mode_info
.num_hpd
= 4;
2964 adev
->mode_info
.num_dig
= 4;
2969 adev
->mode_info
.num_crtc
= 6;
2970 adev
->mode_info
.num_hpd
= 6;
2971 adev
->mode_info
.num_dig
= 6;
2974 adev
->mode_info
.num_crtc
= 5;
2975 adev
->mode_info
.num_hpd
= 5;
2976 adev
->mode_info
.num_dig
= 5;
2979 adev
->mode_info
.num_crtc
= 4;
2980 adev
->mode_info
.num_hpd
= 4;
2981 adev
->mode_info
.num_dig
= 4;
2984 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2988 amdgpu_dm_set_irq_funcs(adev
);
2990 if (adev
->mode_info
.funcs
== NULL
)
2991 adev
->mode_info
.funcs
= &dm_display_funcs
;
2994 * Note: Do NOT change adev->audio_endpt_rreg and
2995 * adev->audio_endpt_wreg because they are initialised in
2996 * amdgpu_device_init()
2998 #if defined(CONFIG_DEBUG_KERNEL_DC)
3001 &dev_attr_s3_debug
);
3007 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
3008 struct dc_stream_state
*new_stream
,
3009 struct dc_stream_state
*old_stream
)
3011 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3014 if (!crtc_state
->enable
)
3017 return crtc_state
->active
;
3020 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
3022 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3025 return !crtc_state
->enable
|| !crtc_state
->active
;
3028 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
3030 drm_encoder_cleanup(encoder
);
3034 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
3035 .destroy
= amdgpu_dm_encoder_destroy
,
3039 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
3040 struct dc_scaling_info
*scaling_info
)
3042 int scale_w
, scale_h
;
3044 memset(scaling_info
, 0, sizeof(*scaling_info
));
3046 /* Source is fixed 16.16 but we ignore mantissa for now... */
3047 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
3048 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
3050 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
3051 if (scaling_info
->src_rect
.width
== 0)
3054 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
3055 if (scaling_info
->src_rect
.height
== 0)
3058 scaling_info
->dst_rect
.x
= state
->crtc_x
;
3059 scaling_info
->dst_rect
.y
= state
->crtc_y
;
3061 if (state
->crtc_w
== 0)
3064 scaling_info
->dst_rect
.width
= state
->crtc_w
;
3066 if (state
->crtc_h
== 0)
3069 scaling_info
->dst_rect
.height
= state
->crtc_h
;
3071 /* DRM doesn't specify clipping on destination output. */
3072 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
3074 /* TODO: Validate scaling per-format with DC plane caps */
3075 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
3076 scaling_info
->src_rect
.width
;
3078 if (scale_w
< 250 || scale_w
> 16000)
3081 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
3082 scaling_info
->src_rect
.height
;
3084 if (scale_h
< 250 || scale_h
> 16000)
3088 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3089 * assume reasonable defaults based on the format.
3095 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
3096 uint64_t *tiling_flags
)
3098 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
3099 int r
= amdgpu_bo_reserve(rbo
, false);
3102 /* Don't show error message when returning -ERESTARTSYS */
3103 if (r
!= -ERESTARTSYS
)
3104 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
3109 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
3111 amdgpu_bo_unreserve(rbo
);
3116 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
3118 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
3120 return offset
? (address
+ offset
* 256) : 0;
3124 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
3125 const struct amdgpu_framebuffer
*afb
,
3126 const enum surface_pixel_format format
,
3127 const enum dc_rotation_angle rotation
,
3128 const struct plane_size
*plane_size
,
3129 const union dc_tiling_info
*tiling_info
,
3130 const uint64_t info
,
3131 struct dc_plane_dcc_param
*dcc
,
3132 struct dc_plane_address
*address
)
3134 struct dc
*dc
= adev
->dm
.dc
;
3135 struct dc_dcc_surface_param input
;
3136 struct dc_surface_dcc_cap output
;
3137 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
3138 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
3139 uint64_t dcc_address
;
3141 memset(&input
, 0, sizeof(input
));
3142 memset(&output
, 0, sizeof(output
));
3147 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3150 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
3153 input
.format
= format
;
3154 input
.surface_size
.width
= plane_size
->surface_size
.width
;
3155 input
.surface_size
.height
= plane_size
->surface_size
.height
;
3156 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
3158 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
3159 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
3160 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
3161 input
.scan
= SCAN_DIRECTION_VERTICAL
;
3163 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
3166 if (!output
.capable
)
3169 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
3174 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
3175 dcc
->independent_64b_blks
= i64b
;
3177 dcc_address
= get_dcc_address(afb
->address
, info
);
3178 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
3179 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
3185 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
3186 const struct amdgpu_framebuffer
*afb
,
3187 const enum surface_pixel_format format
,
3188 const enum dc_rotation_angle rotation
,
3189 const uint64_t tiling_flags
,
3190 union dc_tiling_info
*tiling_info
,
3191 struct plane_size
*plane_size
,
3192 struct dc_plane_dcc_param
*dcc
,
3193 struct dc_plane_address
*address
)
3195 const struct drm_framebuffer
*fb
= &afb
->base
;
3198 memset(tiling_info
, 0, sizeof(*tiling_info
));
3199 memset(plane_size
, 0, sizeof(*plane_size
));
3200 memset(dcc
, 0, sizeof(*dcc
));
3201 memset(address
, 0, sizeof(*address
));
3203 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3204 plane_size
->surface_size
.x
= 0;
3205 plane_size
->surface_size
.y
= 0;
3206 plane_size
->surface_size
.width
= fb
->width
;
3207 plane_size
->surface_size
.height
= fb
->height
;
3208 plane_size
->surface_pitch
=
3209 fb
->pitches
[0] / fb
->format
->cpp
[0];
3211 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
3212 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3213 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3214 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
3215 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
3217 plane_size
->surface_size
.x
= 0;
3218 plane_size
->surface_size
.y
= 0;
3219 plane_size
->surface_size
.width
= fb
->width
;
3220 plane_size
->surface_size
.height
= fb
->height
;
3221 plane_size
->surface_pitch
=
3222 fb
->pitches
[0] / fb
->format
->cpp
[0];
3224 plane_size
->chroma_size
.x
= 0;
3225 plane_size
->chroma_size
.y
= 0;
3226 /* TODO: set these based on surface format */
3227 plane_size
->chroma_size
.width
= fb
->width
/ 2;
3228 plane_size
->chroma_size
.height
= fb
->height
/ 2;
3230 plane_size
->chroma_pitch
=
3231 fb
->pitches
[1] / fb
->format
->cpp
[1];
3233 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3234 address
->video_progressive
.luma_addr
.low_part
=
3235 lower_32_bits(afb
->address
);
3236 address
->video_progressive
.luma_addr
.high_part
=
3237 upper_32_bits(afb
->address
);
3238 address
->video_progressive
.chroma_addr
.low_part
=
3239 lower_32_bits(chroma_addr
);
3240 address
->video_progressive
.chroma_addr
.high_part
=
3241 upper_32_bits(chroma_addr
);
3244 /* Fill GFX8 params */
3245 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
3246 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
3248 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
3249 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
3250 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
3251 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
3252 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
3254 /* XXX fix me for VI */
3255 tiling_info
->gfx8
.num_banks
= num_banks
;
3256 tiling_info
->gfx8
.array_mode
=
3257 DC_ARRAY_2D_TILED_THIN1
;
3258 tiling_info
->gfx8
.tile_split
= tile_split
;
3259 tiling_info
->gfx8
.bank_width
= bankw
;
3260 tiling_info
->gfx8
.bank_height
= bankh
;
3261 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
3262 tiling_info
->gfx8
.tile_mode
=
3263 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
3264 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
3265 == DC_ARRAY_1D_TILED_THIN1
) {
3266 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
3269 tiling_info
->gfx8
.pipe_config
=
3270 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
3272 if (adev
->asic_type
== CHIP_VEGA10
||
3273 adev
->asic_type
== CHIP_VEGA12
||
3274 adev
->asic_type
== CHIP_VEGA20
||
3275 adev
->asic_type
== CHIP_NAVI10
||
3276 adev
->asic_type
== CHIP_NAVI14
||
3277 adev
->asic_type
== CHIP_NAVI12
||
3278 adev
->asic_type
== CHIP_RENOIR
||
3279 adev
->asic_type
== CHIP_RAVEN
) {
3280 /* Fill GFX9 params */
3281 tiling_info
->gfx9
.num_pipes
=
3282 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
3283 tiling_info
->gfx9
.num_banks
=
3284 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
3285 tiling_info
->gfx9
.pipe_interleave
=
3286 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
3287 tiling_info
->gfx9
.num_shader_engines
=
3288 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
3289 tiling_info
->gfx9
.max_compressed_frags
=
3290 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
3291 tiling_info
->gfx9
.num_rb_per_se
=
3292 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
3293 tiling_info
->gfx9
.swizzle
=
3294 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
3295 tiling_info
->gfx9
.shaderEnable
= 1;
3297 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
3298 plane_size
, tiling_info
,
3299 tiling_flags
, dcc
, address
);
3308 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
3309 bool *per_pixel_alpha
, bool *global_alpha
,
3310 int *global_alpha_value
)
3312 *per_pixel_alpha
= false;
3313 *global_alpha
= false;
3314 *global_alpha_value
= 0xff;
3316 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
3319 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
3320 static const uint32_t alpha_formats
[] = {
3321 DRM_FORMAT_ARGB8888
,
3322 DRM_FORMAT_RGBA8888
,
3323 DRM_FORMAT_ABGR8888
,
3325 uint32_t format
= plane_state
->fb
->format
->format
;
3328 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
3329 if (format
== alpha_formats
[i
]) {
3330 *per_pixel_alpha
= true;
3336 if (plane_state
->alpha
< 0xffff) {
3337 *global_alpha
= true;
3338 *global_alpha_value
= plane_state
->alpha
>> 8;
3343 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
3344 const enum surface_pixel_format format
,
3345 enum dc_color_space
*color_space
)
3349 *color_space
= COLOR_SPACE_SRGB
;
3351 /* DRM color properties only affect non-RGB formats. */
3352 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3355 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3357 switch (plane_state
->color_encoding
) {
3358 case DRM_COLOR_YCBCR_BT601
:
3360 *color_space
= COLOR_SPACE_YCBCR601
;
3362 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3365 case DRM_COLOR_YCBCR_BT709
:
3367 *color_space
= COLOR_SPACE_YCBCR709
;
3369 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3372 case DRM_COLOR_YCBCR_BT2020
:
3374 *color_space
= COLOR_SPACE_2020_YCBCR
;
3387 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3388 const struct drm_plane_state
*plane_state
,
3389 const uint64_t tiling_flags
,
3390 struct dc_plane_info
*plane_info
,
3391 struct dc_plane_address
*address
)
3393 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3394 const struct amdgpu_framebuffer
*afb
=
3395 to_amdgpu_framebuffer(plane_state
->fb
);
3396 struct drm_format_name_buf format_name
;
3399 memset(plane_info
, 0, sizeof(*plane_info
));
3401 switch (fb
->format
->format
) {
3403 plane_info
->format
=
3404 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3406 case DRM_FORMAT_RGB565
:
3407 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3409 case DRM_FORMAT_XRGB8888
:
3410 case DRM_FORMAT_ARGB8888
:
3411 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3413 case DRM_FORMAT_XRGB2101010
:
3414 case DRM_FORMAT_ARGB2101010
:
3415 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3417 case DRM_FORMAT_XBGR2101010
:
3418 case DRM_FORMAT_ABGR2101010
:
3419 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3421 case DRM_FORMAT_XBGR8888
:
3422 case DRM_FORMAT_ABGR8888
:
3423 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3425 case DRM_FORMAT_NV21
:
3426 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3428 case DRM_FORMAT_NV12
:
3429 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3433 "Unsupported screen format %s\n",
3434 drm_get_format_name(fb
->format
->format
, &format_name
));
3438 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
3439 case DRM_MODE_ROTATE_0
:
3440 plane_info
->rotation
= ROTATION_ANGLE_0
;
3442 case DRM_MODE_ROTATE_90
:
3443 plane_info
->rotation
= ROTATION_ANGLE_90
;
3445 case DRM_MODE_ROTATE_180
:
3446 plane_info
->rotation
= ROTATION_ANGLE_180
;
3448 case DRM_MODE_ROTATE_270
:
3449 plane_info
->rotation
= ROTATION_ANGLE_270
;
3452 plane_info
->rotation
= ROTATION_ANGLE_0
;
3456 plane_info
->visible
= true;
3457 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
3459 plane_info
->layer_index
= 0;
3461 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
3462 &plane_info
->color_space
);
3466 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
3467 plane_info
->rotation
, tiling_flags
,
3468 &plane_info
->tiling_info
,
3469 &plane_info
->plane_size
,
3470 &plane_info
->dcc
, address
);
3474 fill_blending_from_plane_state(
3475 plane_state
, &plane_info
->per_pixel_alpha
,
3476 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
3481 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
3482 struct dc_plane_state
*dc_plane_state
,
3483 struct drm_plane_state
*plane_state
,
3484 struct drm_crtc_state
*crtc_state
)
3486 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
3487 const struct amdgpu_framebuffer
*amdgpu_fb
=
3488 to_amdgpu_framebuffer(plane_state
->fb
);
3489 struct dc_scaling_info scaling_info
;
3490 struct dc_plane_info plane_info
;
3491 uint64_t tiling_flags
;
3494 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
3498 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
3499 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
3500 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
3501 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
3503 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
3507 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
3509 &dc_plane_state
->address
);
3513 dc_plane_state
->format
= plane_info
.format
;
3514 dc_plane_state
->color_space
= plane_info
.color_space
;
3515 dc_plane_state
->format
= plane_info
.format
;
3516 dc_plane_state
->plane_size
= plane_info
.plane_size
;
3517 dc_plane_state
->rotation
= plane_info
.rotation
;
3518 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
3519 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
3520 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
3521 dc_plane_state
->visible
= plane_info
.visible
;
3522 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
3523 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
3524 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
3525 dc_plane_state
->dcc
= plane_info
.dcc
;
3526 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
3529 * Always set input transfer function, since plane state is refreshed
3532 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
3539 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
3540 const struct dm_connector_state
*dm_state
,
3541 struct dc_stream_state
*stream
)
3543 enum amdgpu_rmx_type rmx_type
;
3545 struct rect src
= { 0 }; /* viewport in composition space*/
3546 struct rect dst
= { 0 }; /* stream addressable area */
3548 /* no mode. nothing to be done */
3552 /* Full screen scaling by default */
3553 src
.width
= mode
->hdisplay
;
3554 src
.height
= mode
->vdisplay
;
3555 dst
.width
= stream
->timing
.h_addressable
;
3556 dst
.height
= stream
->timing
.v_addressable
;
3559 rmx_type
= dm_state
->scaling
;
3560 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
3561 if (src
.width
* dst
.height
<
3562 src
.height
* dst
.width
) {
3563 /* height needs less upscaling/more downscaling */
3564 dst
.width
= src
.width
*
3565 dst
.height
/ src
.height
;
3567 /* width needs less upscaling/more downscaling */
3568 dst
.height
= src
.height
*
3569 dst
.width
/ src
.width
;
3571 } else if (rmx_type
== RMX_CENTER
) {
3575 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
3576 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
3578 if (dm_state
->underscan_enable
) {
3579 dst
.x
+= dm_state
->underscan_hborder
/ 2;
3580 dst
.y
+= dm_state
->underscan_vborder
/ 2;
3581 dst
.width
-= dm_state
->underscan_hborder
;
3582 dst
.height
-= dm_state
->underscan_vborder
;
3589 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3590 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
3594 static enum dc_color_depth
3595 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
3596 const struct drm_connector_state
*state
,
3604 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3605 if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_48
)
3607 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_36
)
3609 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_30
)
3612 bpc
= (uint8_t)connector
->display_info
.bpc
;
3613 /* Assume 8 bpc by default if no bpc is specified. */
3614 bpc
= bpc
? bpc
: 8;
3618 state
= connector
->state
;
3622 * Cap display bpc based on the user requested value.
3624 * The value for state->max_bpc may not correctly updated
3625 * depending on when the connector gets added to the state
3626 * or if this was called outside of atomic check, so it
3627 * can't be used directly.
3629 bpc
= min(bpc
, state
->max_requested_bpc
);
3631 /* Round down to the nearest even number. */
3632 bpc
= bpc
- (bpc
& 1);
3638 * Temporary Work around, DRM doesn't parse color depth for
3639 * EDID revision before 1.4
3640 * TODO: Fix edid parsing
3642 return COLOR_DEPTH_888
;
3644 return COLOR_DEPTH_666
;
3646 return COLOR_DEPTH_888
;
3648 return COLOR_DEPTH_101010
;
3650 return COLOR_DEPTH_121212
;
3652 return COLOR_DEPTH_141414
;
3654 return COLOR_DEPTH_161616
;
3656 return COLOR_DEPTH_UNDEFINED
;
3660 static enum dc_aspect_ratio
3661 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
3663 /* 1-1 mapping, since both enums follow the HDMI spec. */
3664 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
3667 static enum dc_color_space
3668 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
3670 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
3672 switch (dc_crtc_timing
->pixel_encoding
) {
3673 case PIXEL_ENCODING_YCBCR422
:
3674 case PIXEL_ENCODING_YCBCR444
:
3675 case PIXEL_ENCODING_YCBCR420
:
3678 * 27030khz is the separation point between HDTV and SDTV
3679 * according to HDMI spec, we use YCbCr709 and YCbCr601
3682 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
3683 if (dc_crtc_timing
->flags
.Y_ONLY
)
3685 COLOR_SPACE_YCBCR709_LIMITED
;
3687 color_space
= COLOR_SPACE_YCBCR709
;
3689 if (dc_crtc_timing
->flags
.Y_ONLY
)
3691 COLOR_SPACE_YCBCR601_LIMITED
;
3693 color_space
= COLOR_SPACE_YCBCR601
;
3698 case PIXEL_ENCODING_RGB
:
3699 color_space
= COLOR_SPACE_SRGB
;
3710 static bool adjust_colour_depth_from_display_info(
3711 struct dc_crtc_timing
*timing_out
,
3712 const struct drm_display_info
*info
)
3714 enum dc_color_depth depth
= timing_out
->display_color_depth
;
3717 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
3718 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3719 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3720 normalized_clk
/= 2;
3721 /* Adjusting pix clock following on HDMI spec based on colour depth */
3723 case COLOR_DEPTH_888
:
3725 case COLOR_DEPTH_101010
:
3726 normalized_clk
= (normalized_clk
* 30) / 24;
3728 case COLOR_DEPTH_121212
:
3729 normalized_clk
= (normalized_clk
* 36) / 24;
3731 case COLOR_DEPTH_161616
:
3732 normalized_clk
= (normalized_clk
* 48) / 24;
3735 /* The above depths are the only ones valid for HDMI. */
3738 if (normalized_clk
<= info
->max_tmds_clock
) {
3739 timing_out
->display_color_depth
= depth
;
3742 } while (--depth
> COLOR_DEPTH_666
);
3746 static void fill_stream_properties_from_drm_display_mode(
3747 struct dc_stream_state
*stream
,
3748 const struct drm_display_mode
*mode_in
,
3749 const struct drm_connector
*connector
,
3750 const struct drm_connector_state
*connector_state
,
3751 const struct dc_stream_state
*old_stream
)
3753 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
3754 const struct drm_display_info
*info
= &connector
->display_info
;
3755 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3756 struct hdmi_vendor_infoframe hv_frame
;
3757 struct hdmi_avi_infoframe avi_frame
;
3759 memset(&hv_frame
, 0, sizeof(hv_frame
));
3760 memset(&avi_frame
, 0, sizeof(avi_frame
));
3762 timing_out
->h_border_left
= 0;
3763 timing_out
->h_border_right
= 0;
3764 timing_out
->v_border_top
= 0;
3765 timing_out
->v_border_bottom
= 0;
3766 /* TODO: un-hardcode */
3767 if (drm_mode_is_420_only(info
, mode_in
)
3768 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3769 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3770 else if (drm_mode_is_420_also(info
, mode_in
)
3771 && aconnector
->force_yuv420_output
)
3772 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3773 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
3774 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3775 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
3777 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
3779 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
3780 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
3781 connector
, connector_state
,
3782 (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
));
3783 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
3784 timing_out
->hdmi_vic
= 0;
3787 timing_out
->vic
= old_stream
->timing
.vic
;
3788 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
3789 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
3791 timing_out
->vic
= drm_match_cea_mode(mode_in
);
3792 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
3793 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
3794 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
3795 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
3798 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
3799 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
3800 timing_out
->vic
= avi_frame
.video_code
;
3801 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
3802 timing_out
->hdmi_vic
= hv_frame
.vic
;
3805 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
3806 timing_out
->h_total
= mode_in
->crtc_htotal
;
3807 timing_out
->h_sync_width
=
3808 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
3809 timing_out
->h_front_porch
=
3810 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
3811 timing_out
->v_total
= mode_in
->crtc_vtotal
;
3812 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
3813 timing_out
->v_front_porch
=
3814 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
3815 timing_out
->v_sync_width
=
3816 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
3817 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
3818 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
3820 stream
->output_color_space
= get_output_color_space(timing_out
);
3822 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
3823 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
3824 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
3825 if (!adjust_colour_depth_from_display_info(timing_out
, info
) &&
3826 drm_mode_is_420_also(info
, mode_in
) &&
3827 timing_out
->pixel_encoding
!= PIXEL_ENCODING_YCBCR420
) {
3828 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3829 adjust_colour_depth_from_display_info(timing_out
, info
);
3834 static void fill_audio_info(struct audio_info
*audio_info
,
3835 const struct drm_connector
*drm_connector
,
3836 const struct dc_sink
*dc_sink
)
3839 int cea_revision
= 0;
3840 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
3842 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
3843 audio_info
->product_id
= edid_caps
->product_id
;
3845 cea_revision
= drm_connector
->display_info
.cea_rev
;
3847 strscpy(audio_info
->display_name
,
3848 edid_caps
->display_name
,
3849 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
3851 if (cea_revision
>= 3) {
3852 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
3854 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
3855 audio_info
->modes
[i
].format_code
=
3856 (enum audio_format_code
)
3857 (edid_caps
->audio_modes
[i
].format_code
);
3858 audio_info
->modes
[i
].channel_count
=
3859 edid_caps
->audio_modes
[i
].channel_count
;
3860 audio_info
->modes
[i
].sample_rates
.all
=
3861 edid_caps
->audio_modes
[i
].sample_rate
;
3862 audio_info
->modes
[i
].sample_size
=
3863 edid_caps
->audio_modes
[i
].sample_size
;
3867 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
3869 /* TODO: We only check for the progressive mode, check for interlace mode too */
3870 if (drm_connector
->latency_present
[0]) {
3871 audio_info
->video_latency
= drm_connector
->video_latency
[0];
3872 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
3875 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3880 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
3881 struct drm_display_mode
*dst_mode
)
3883 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
3884 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
3885 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
3886 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
3887 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
3888 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
3889 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
3890 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
3891 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
3892 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
3893 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
3894 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
3895 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
3896 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
3900 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
3901 const struct drm_display_mode
*native_mode
,
3904 if (scale_enabled
) {
3905 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
3906 } else if (native_mode
->clock
== drm_mode
->clock
&&
3907 native_mode
->htotal
== drm_mode
->htotal
&&
3908 native_mode
->vtotal
== drm_mode
->vtotal
) {
3909 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
3911 /* no scaling nor amdgpu inserted, no need to patch */
3915 static struct dc_sink
*
3916 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
3918 struct dc_sink_init_data sink_init_data
= { 0 };
3919 struct dc_sink
*sink
= NULL
;
3920 sink_init_data
.link
= aconnector
->dc_link
;
3921 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
3923 sink
= dc_sink_create(&sink_init_data
);
3925 DRM_ERROR("Failed to create sink!\n");
3928 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
3933 static void set_multisync_trigger_params(
3934 struct dc_stream_state
*stream
)
3936 if (stream
->triggered_crtc_reset
.enabled
) {
3937 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
3938 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
3942 static void set_master_stream(struct dc_stream_state
*stream_set
[],
3945 int j
, highest_rfr
= 0, master_stream
= 0;
3947 for (j
= 0; j
< stream_count
; j
++) {
3948 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
3949 int refresh_rate
= 0;
3951 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
3952 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
3953 if (refresh_rate
> highest_rfr
) {
3954 highest_rfr
= refresh_rate
;
3959 for (j
= 0; j
< stream_count
; j
++) {
3961 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
3965 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
3969 if (context
->stream_count
< 2)
3971 for (i
= 0; i
< context
->stream_count
; i
++) {
3972 if (!context
->streams
[i
])
3975 * TODO: add a function to read AMD VSDB bits and set
3976 * crtc_sync_master.multi_sync_enabled flag
3977 * For now it's set to false
3979 set_multisync_trigger_params(context
->streams
[i
]);
3981 set_master_stream(context
->streams
, context
->stream_count
);
3984 static struct dc_stream_state
*
3985 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
3986 const struct drm_display_mode
*drm_mode
,
3987 const struct dm_connector_state
*dm_state
,
3988 const struct dc_stream_state
*old_stream
)
3990 struct drm_display_mode
*preferred_mode
= NULL
;
3991 struct drm_connector
*drm_connector
;
3992 const struct drm_connector_state
*con_state
=
3993 dm_state
? &dm_state
->base
: NULL
;
3994 struct dc_stream_state
*stream
= NULL
;
3995 struct drm_display_mode mode
= *drm_mode
;
3996 bool native_mode_found
= false;
3997 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
3999 int preferred_refresh
= 0;
4000 #if defined(CONFIG_DRM_AMD_DC_DCN)
4001 struct dsc_dec_dpcd_caps dsc_caps
;
4003 uint32_t link_bandwidth_kbps
;
4005 struct dc_sink
*sink
= NULL
;
4006 if (aconnector
== NULL
) {
4007 DRM_ERROR("aconnector is NULL!\n");
4011 drm_connector
= &aconnector
->base
;
4013 if (!aconnector
->dc_sink
) {
4014 sink
= create_fake_sink(aconnector
);
4018 sink
= aconnector
->dc_sink
;
4019 dc_sink_retain(sink
);
4022 stream
= dc_create_stream_for_sink(sink
);
4024 if (stream
== NULL
) {
4025 DRM_ERROR("Failed to create stream for sink!\n");
4029 stream
->dm_stream_context
= aconnector
;
4031 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
4032 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
4034 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
4035 /* Search for preferred mode */
4036 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
4037 native_mode_found
= true;
4041 if (!native_mode_found
)
4042 preferred_mode
= list_first_entry_or_null(
4043 &aconnector
->base
.modes
,
4044 struct drm_display_mode
,
4047 mode_refresh
= drm_mode_vrefresh(&mode
);
4049 if (preferred_mode
== NULL
) {
4051 * This may not be an error, the use case is when we have no
4052 * usermode calls to reset and set mode upon hotplug. In this
4053 * case, we call set mode ourselves to restore the previous mode
4054 * and the modelist may not be filled in in time.
4056 DRM_DEBUG_DRIVER("No preferred mode found\n");
4058 decide_crtc_timing_for_drm_display_mode(
4059 &mode
, preferred_mode
,
4060 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
4061 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
4065 drm_mode_set_crtcinfo(&mode
, 0);
4068 * If scaling is enabled and refresh rate didn't change
4069 * we copy the vic and polarities of the old timings
4071 if (!scale
|| mode_refresh
!= preferred_refresh
)
4072 fill_stream_properties_from_drm_display_mode(stream
,
4073 &mode
, &aconnector
->base
, con_state
, NULL
);
4075 fill_stream_properties_from_drm_display_mode(stream
,
4076 &mode
, &aconnector
->base
, con_state
, old_stream
);
4078 stream
->timing
.flags
.DSC
= 0;
4080 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4081 #if defined(CONFIG_DRM_AMD_DC_DCN)
4082 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
4083 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
4084 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_ext_caps
.raw
,
4087 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
4088 dc_link_get_link_cap(aconnector
->dc_link
));
4090 #if defined(CONFIG_DRM_AMD_DC_DCN)
4091 if (dsc_caps
.is_dsc_supported
)
4092 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
4094 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
4095 link_bandwidth_kbps
,
4097 &stream
->timing
.dsc_cfg
))
4098 stream
->timing
.flags
.DSC
= 1;
4102 update_stream_scaling_settings(&mode
, dm_state
, stream
);
4105 &stream
->audio_info
,
4109 update_stream_signal(stream
, sink
);
4111 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4112 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
4113 if (stream
->link
->psr_feature_enabled
) {
4114 struct dc
*core_dc
= stream
->link
->ctx
->dc
;
4116 if (dc_is_dmcu_initialized(core_dc
)) {
4117 struct dmcu
*dmcu
= core_dc
->res_pool
->dmcu
;
4119 stream
->psr_version
= dmcu
->dmcu_version
.psr_version
;
4120 mod_build_vsc_infopacket(stream
,
4121 &stream
->vsc_infopacket
,
4122 &stream
->use_vsc_sdp_for_colorimetry
);
4126 dc_sink_release(sink
);
4131 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
4133 drm_crtc_cleanup(crtc
);
4137 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
4138 struct drm_crtc_state
*state
)
4140 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
4142 /* TODO Destroy dc_stream objects are stream object is flattened */
4144 dc_stream_release(cur
->stream
);
4147 __drm_atomic_helper_crtc_destroy_state(state
);
4153 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
4155 struct dm_crtc_state
*state
;
4158 dm_crtc_destroy_state(crtc
, crtc
->state
);
4160 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4161 if (WARN_ON(!state
))
4164 crtc
->state
= &state
->base
;
4165 crtc
->state
->crtc
= crtc
;
4169 static struct drm_crtc_state
*
4170 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
4172 struct dm_crtc_state
*state
, *cur
;
4174 cur
= to_dm_crtc_state(crtc
->state
);
4176 if (WARN_ON(!crtc
->state
))
4179 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4183 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
4186 state
->stream
= cur
->stream
;
4187 dc_stream_retain(state
->stream
);
4190 state
->active_planes
= cur
->active_planes
;
4191 state
->interrupts_enabled
= cur
->interrupts_enabled
;
4192 state
->vrr_params
= cur
->vrr_params
;
4193 state
->vrr_infopacket
= cur
->vrr_infopacket
;
4194 state
->abm_level
= cur
->abm_level
;
4195 state
->vrr_supported
= cur
->vrr_supported
;
4196 state
->freesync_config
= cur
->freesync_config
;
4197 state
->crc_src
= cur
->crc_src
;
4198 state
->cm_has_degamma
= cur
->cm_has_degamma
;
4199 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
4201 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4203 return &state
->base
;
4206 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
4208 enum dc_irq_source irq_source
;
4209 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4210 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4213 /* Do not set vupdate for DCN hardware */
4214 if (adev
->family
> AMDGPU_FAMILY_AI
)
4217 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
4219 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4221 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4222 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
4226 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
4228 enum dc_irq_source irq_source
;
4229 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4230 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4231 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4235 /* vblank irq on -> Only need vupdate irq in vrr mode */
4236 if (amdgpu_dm_vrr_active(acrtc_state
))
4237 rc
= dm_set_vupdate_irq(crtc
, true);
4239 /* vblank irq off -> vupdate irq off */
4240 rc
= dm_set_vupdate_irq(crtc
, false);
4246 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
4247 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4250 static int dm_enable_vblank(struct drm_crtc
*crtc
)
4252 return dm_set_vblank(crtc
, true);
4255 static void dm_disable_vblank(struct drm_crtc
*crtc
)
4257 dm_set_vblank(crtc
, false);
4260 /* Implemented only the options currently availible for the driver */
4261 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
4262 .reset
= dm_crtc_reset_state
,
4263 .destroy
= amdgpu_dm_crtc_destroy
,
4264 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
4265 .set_config
= drm_atomic_helper_set_config
,
4266 .page_flip
= drm_atomic_helper_page_flip
,
4267 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
4268 .atomic_destroy_state
= dm_crtc_destroy_state
,
4269 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
4270 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
4271 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
4272 .enable_vblank
= dm_enable_vblank
,
4273 .disable_vblank
= dm_disable_vblank
,
4276 static enum drm_connector_status
4277 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
4280 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4284 * 1. This interface is NOT called in context of HPD irq.
4285 * 2. This interface *is called* in context of user-mode ioctl. Which
4286 * makes it a bad place for *any* MST-related activity.
4289 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
4290 !aconnector
->fake_enable
)
4291 connected
= (aconnector
->dc_sink
!= NULL
);
4293 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
4295 return (connected
? connector_status_connected
:
4296 connector_status_disconnected
);
4299 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
4300 struct drm_connector_state
*connector_state
,
4301 struct drm_property
*property
,
4304 struct drm_device
*dev
= connector
->dev
;
4305 struct amdgpu_device
*adev
= dev
->dev_private
;
4306 struct dm_connector_state
*dm_old_state
=
4307 to_dm_connector_state(connector
->state
);
4308 struct dm_connector_state
*dm_new_state
=
4309 to_dm_connector_state(connector_state
);
4313 if (property
== dev
->mode_config
.scaling_mode_property
) {
4314 enum amdgpu_rmx_type rmx_type
;
4317 case DRM_MODE_SCALE_CENTER
:
4318 rmx_type
= RMX_CENTER
;
4320 case DRM_MODE_SCALE_ASPECT
:
4321 rmx_type
= RMX_ASPECT
;
4323 case DRM_MODE_SCALE_FULLSCREEN
:
4324 rmx_type
= RMX_FULL
;
4326 case DRM_MODE_SCALE_NONE
:
4332 if (dm_old_state
->scaling
== rmx_type
)
4335 dm_new_state
->scaling
= rmx_type
;
4337 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4338 dm_new_state
->underscan_hborder
= val
;
4340 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4341 dm_new_state
->underscan_vborder
= val
;
4343 } else if (property
== adev
->mode_info
.underscan_property
) {
4344 dm_new_state
->underscan_enable
= val
;
4346 } else if (property
== adev
->mode_info
.abm_level_property
) {
4347 dm_new_state
->abm_level
= val
;
4354 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
4355 const struct drm_connector_state
*state
,
4356 struct drm_property
*property
,
4359 struct drm_device
*dev
= connector
->dev
;
4360 struct amdgpu_device
*adev
= dev
->dev_private
;
4361 struct dm_connector_state
*dm_state
=
4362 to_dm_connector_state(state
);
4365 if (property
== dev
->mode_config
.scaling_mode_property
) {
4366 switch (dm_state
->scaling
) {
4368 *val
= DRM_MODE_SCALE_CENTER
;
4371 *val
= DRM_MODE_SCALE_ASPECT
;
4374 *val
= DRM_MODE_SCALE_FULLSCREEN
;
4378 *val
= DRM_MODE_SCALE_NONE
;
4382 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4383 *val
= dm_state
->underscan_hborder
;
4385 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4386 *val
= dm_state
->underscan_vborder
;
4388 } else if (property
== adev
->mode_info
.underscan_property
) {
4389 *val
= dm_state
->underscan_enable
;
4391 } else if (property
== adev
->mode_info
.abm_level_property
) {
4392 *val
= dm_state
->abm_level
;
4399 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4401 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4403 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4406 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4408 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4409 const struct dc_link
*link
= aconnector
->dc_link
;
4410 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4411 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4413 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4414 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4416 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
4417 link
->type
!= dc_connection_none
&&
4418 dm
->backlight_dev
) {
4419 backlight_device_unregister(dm
->backlight_dev
);
4420 dm
->backlight_dev
= NULL
;
4424 if (aconnector
->dc_em_sink
)
4425 dc_sink_release(aconnector
->dc_em_sink
);
4426 aconnector
->dc_em_sink
= NULL
;
4427 if (aconnector
->dc_sink
)
4428 dc_sink_release(aconnector
->dc_sink
);
4429 aconnector
->dc_sink
= NULL
;
4431 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
4432 drm_connector_unregister(connector
);
4433 drm_connector_cleanup(connector
);
4434 if (aconnector
->i2c
) {
4435 i2c_del_adapter(&aconnector
->i2c
->base
);
4436 kfree(aconnector
->i2c
);
4442 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
4444 struct dm_connector_state
*state
=
4445 to_dm_connector_state(connector
->state
);
4447 if (connector
->state
)
4448 __drm_atomic_helper_connector_destroy_state(connector
->state
);
4452 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4455 state
->scaling
= RMX_OFF
;
4456 state
->underscan_enable
= false;
4457 state
->underscan_hborder
= 0;
4458 state
->underscan_vborder
= 0;
4459 state
->base
.max_requested_bpc
= 8;
4460 state
->vcpi_slots
= 0;
4462 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4463 state
->abm_level
= amdgpu_dm_abm_level
;
4465 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
4469 struct drm_connector_state
*
4470 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
4472 struct dm_connector_state
*state
=
4473 to_dm_connector_state(connector
->state
);
4475 struct dm_connector_state
*new_state
=
4476 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
4481 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
4483 new_state
->freesync_capable
= state
->freesync_capable
;
4484 new_state
->abm_level
= state
->abm_level
;
4485 new_state
->scaling
= state
->scaling
;
4486 new_state
->underscan_enable
= state
->underscan_enable
;
4487 new_state
->underscan_hborder
= state
->underscan_hborder
;
4488 new_state
->underscan_vborder
= state
->underscan_vborder
;
4489 new_state
->vcpi_slots
= state
->vcpi_slots
;
4490 new_state
->pbn
= state
->pbn
;
4491 return &new_state
->base
;
4494 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
4495 .reset
= amdgpu_dm_connector_funcs_reset
,
4496 .detect
= amdgpu_dm_connector_detect
,
4497 .fill_modes
= drm_helper_probe_single_connector_modes
,
4498 .destroy
= amdgpu_dm_connector_destroy
,
4499 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
4500 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4501 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
4502 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
4503 .early_unregister
= amdgpu_dm_connector_unregister
4506 static int get_modes(struct drm_connector
*connector
)
4508 return amdgpu_dm_connector_get_modes(connector
);
4511 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
4513 struct dc_sink_init_data init_params
= {
4514 .link
= aconnector
->dc_link
,
4515 .sink_signal
= SIGNAL_TYPE_VIRTUAL
4519 if (!aconnector
->base
.edid_blob_ptr
) {
4520 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4521 aconnector
->base
.name
);
4523 aconnector
->base
.force
= DRM_FORCE_OFF
;
4524 aconnector
->base
.override_edid
= false;
4528 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
4530 aconnector
->edid
= edid
;
4532 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
4533 aconnector
->dc_link
,
4535 (edid
->extensions
+ 1) * EDID_LENGTH
,
4538 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
4539 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
4540 aconnector
->dc_link
->local_sink
:
4541 aconnector
->dc_em_sink
;
4542 dc_sink_retain(aconnector
->dc_sink
);
4546 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
4548 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
4551 * In case of headless boot with force on for DP managed connector
4552 * Those settings have to be != 0 to get initial modeset
4554 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4555 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
4556 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
4560 aconnector
->base
.override_edid
= true;
4561 create_eml_sink(aconnector
);
4564 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
4565 struct drm_display_mode
*mode
)
4567 int result
= MODE_ERROR
;
4568 struct dc_sink
*dc_sink
;
4569 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4570 /* TODO: Unhardcode stream count */
4571 struct dc_stream_state
*stream
;
4572 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4573 enum dc_status dc_result
= DC_OK
;
4575 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
4576 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
4580 * Only run this the first time mode_valid is called to initilialize
4583 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
4584 !aconnector
->dc_em_sink
)
4585 handle_edid_mgmt(aconnector
);
4587 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
4589 if (dc_sink
== NULL
) {
4590 DRM_ERROR("dc_sink is NULL!\n");
4594 stream
= create_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
4595 if (stream
== NULL
) {
4596 DRM_ERROR("Failed to create stream for sink!\n");
4600 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
4602 if (dc_result
== DC_OK
)
4605 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4611 dc_stream_release(stream
);
4614 /* TODO: error handling*/
4618 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
4619 struct dc_info_packet
*out
)
4621 struct hdmi_drm_infoframe frame
;
4622 unsigned char buf
[30]; /* 26 + 4 */
4626 memset(out
, 0, sizeof(*out
));
4628 if (!state
->hdr_output_metadata
)
4631 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
4635 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
4639 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4643 /* Prepare the infopacket for DC. */
4644 switch (state
->connector
->connector_type
) {
4645 case DRM_MODE_CONNECTOR_HDMIA
:
4646 out
->hb0
= 0x87; /* type */
4647 out
->hb1
= 0x01; /* version */
4648 out
->hb2
= 0x1A; /* length */
4649 out
->sb
[0] = buf
[3]; /* checksum */
4653 case DRM_MODE_CONNECTOR_DisplayPort
:
4654 case DRM_MODE_CONNECTOR_eDP
:
4655 out
->hb0
= 0x00; /* sdp id, zero */
4656 out
->hb1
= 0x87; /* type */
4657 out
->hb2
= 0x1D; /* payload len - 1 */
4658 out
->hb3
= (0x13 << 2); /* sdp version */
4659 out
->sb
[0] = 0x01; /* version */
4660 out
->sb
[1] = 0x1A; /* length */
4668 memcpy(&out
->sb
[i
], &buf
[4], 26);
4671 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
4672 sizeof(out
->sb
), false);
4678 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
4679 const struct drm_connector_state
*new_state
)
4681 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
4682 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
4684 if (old_blob
!= new_blob
) {
4685 if (old_blob
&& new_blob
&&
4686 old_blob
->length
== new_blob
->length
)
4687 return memcmp(old_blob
->data
, new_blob
->data
,
4697 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
4698 struct drm_atomic_state
*state
)
4700 struct drm_connector_state
*new_con_state
=
4701 drm_atomic_get_new_connector_state(state
, conn
);
4702 struct drm_connector_state
*old_con_state
=
4703 drm_atomic_get_old_connector_state(state
, conn
);
4704 struct drm_crtc
*crtc
= new_con_state
->crtc
;
4705 struct drm_crtc_state
*new_crtc_state
;
4711 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
4712 struct dc_info_packet hdr_infopacket
;
4714 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
4718 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
4719 if (IS_ERR(new_crtc_state
))
4720 return PTR_ERR(new_crtc_state
);
4723 * DC considers the stream backends changed if the
4724 * static metadata changes. Forcing the modeset also
4725 * gives a simple way for userspace to switch from
4726 * 8bpc to 10bpc when setting the metadata to enter
4729 * Changing the static metadata after it's been
4730 * set is permissible, however. So only force a
4731 * modeset if we're entering or exiting HDR.
4733 new_crtc_state
->mode_changed
=
4734 !old_con_state
->hdr_output_metadata
||
4735 !new_con_state
->hdr_output_metadata
;
4741 static const struct drm_connector_helper_funcs
4742 amdgpu_dm_connector_helper_funcs
= {
4744 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4745 * modes will be filtered by drm_mode_validate_size(), and those modes
4746 * are missing after user start lightdm. So we need to renew modes list.
4747 * in get_modes call back, not just return the modes count
4749 .get_modes
= get_modes
,
4750 .mode_valid
= amdgpu_dm_connector_mode_valid
,
4751 .atomic_check
= amdgpu_dm_connector_atomic_check
,
4754 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
4758 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
4760 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
4761 struct drm_plane
*plane
;
4763 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
4764 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4771 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
4773 struct drm_atomic_state
*state
= new_crtc_state
->state
;
4774 struct drm_plane
*plane
;
4777 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
4778 struct drm_plane_state
*new_plane_state
;
4780 /* Cursor planes are "fake". */
4781 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4784 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
4786 if (!new_plane_state
) {
4788 * The plane is enable on the CRTC and hasn't changed
4789 * state. This means that it previously passed
4790 * validation and is therefore enabled.
4796 /* We need a framebuffer to be considered enabled. */
4797 num_active
+= (new_plane_state
->fb
!= NULL
);
4804 * Sets whether interrupts should be enabled on a specific CRTC.
4805 * We require that the stream be enabled and that there exist active
4806 * DC planes on the stream.
4809 dm_update_crtc_interrupt_state(struct drm_crtc
*crtc
,
4810 struct drm_crtc_state
*new_crtc_state
)
4812 struct dm_crtc_state
*dm_new_crtc_state
=
4813 to_dm_crtc_state(new_crtc_state
);
4815 dm_new_crtc_state
->active_planes
= 0;
4816 dm_new_crtc_state
->interrupts_enabled
= false;
4818 if (!dm_new_crtc_state
->stream
)
4821 dm_new_crtc_state
->active_planes
=
4822 count_crtc_active_planes(new_crtc_state
);
4824 dm_new_crtc_state
->interrupts_enabled
=
4825 dm_new_crtc_state
->active_planes
> 0;
4828 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
4829 struct drm_crtc_state
*state
)
4831 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4832 struct dc
*dc
= adev
->dm
.dc
;
4833 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
4837 * Update interrupt state for the CRTC. This needs to happen whenever
4838 * the CRTC has changed or whenever any of its planes have changed.
4839 * Atomic check satisfies both of these requirements since the CRTC
4840 * is added to the state by DRM during drm_atomic_helper_check_planes.
4842 dm_update_crtc_interrupt_state(crtc
, state
);
4844 if (unlikely(!dm_crtc_state
->stream
&&
4845 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
4850 /* In some use cases, like reset, no stream is attached */
4851 if (!dm_crtc_state
->stream
)
4855 * We want at least one hardware plane enabled to use
4856 * the stream with a cursor enabled.
4858 if (state
->enable
&& state
->active
&&
4859 does_crtc_have_active_cursor(state
) &&
4860 dm_crtc_state
->active_planes
== 0)
4863 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
4869 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
4870 const struct drm_display_mode
*mode
,
4871 struct drm_display_mode
*adjusted_mode
)
4876 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
4877 .disable
= dm_crtc_helper_disable
,
4878 .atomic_check
= dm_crtc_helper_atomic_check
,
4879 .mode_fixup
= dm_crtc_helper_mode_fixup
4882 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
4887 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth
)
4889 switch (display_color_depth
) {
4890 case COLOR_DEPTH_666
:
4892 case COLOR_DEPTH_888
:
4894 case COLOR_DEPTH_101010
:
4896 case COLOR_DEPTH_121212
:
4898 case COLOR_DEPTH_141414
:
4900 case COLOR_DEPTH_161616
:
4908 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
4909 struct drm_crtc_state
*crtc_state
,
4910 struct drm_connector_state
*conn_state
)
4912 struct drm_atomic_state
*state
= crtc_state
->state
;
4913 struct drm_connector
*connector
= conn_state
->connector
;
4914 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4915 struct dm_connector_state
*dm_new_connector_state
= to_dm_connector_state(conn_state
);
4916 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
4917 struct drm_dp_mst_topology_mgr
*mst_mgr
;
4918 struct drm_dp_mst_port
*mst_port
;
4919 enum dc_color_depth color_depth
;
4921 bool is_y420
= false;
4923 if (!aconnector
->port
|| !aconnector
->dc_sink
)
4926 mst_port
= aconnector
->port
;
4927 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
4929 if (!crtc_state
->connectors_changed
&& !crtc_state
->mode_changed
)
4932 if (!state
->duplicated
) {
4933 is_y420
= drm_mode_is_420_also(&connector
->display_info
, adjusted_mode
) &&
4934 aconnector
->force_yuv420_output
;
4935 color_depth
= convert_color_depth_from_display_info(connector
, conn_state
,
4937 bpp
= convert_dc_color_depth_into_bpc(color_depth
) * 3;
4938 clock
= adjusted_mode
->clock
;
4939 dm_new_connector_state
->pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, false);
4941 dm_new_connector_state
->vcpi_slots
= drm_dp_atomic_find_vcpi_slots(state
,
4944 dm_new_connector_state
->pbn
,
4946 if (dm_new_connector_state
->vcpi_slots
< 0) {
4947 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state
->vcpi_slots
);
4948 return dm_new_connector_state
->vcpi_slots
;
4953 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
4954 .disable
= dm_encoder_helper_disable
,
4955 .atomic_check
= dm_encoder_helper_atomic_check
4958 #if defined(CONFIG_DRM_AMD_DC_DCN)
4959 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
*state
,
4960 struct dc_state
*dc_state
)
4962 struct dc_stream_state
*stream
= NULL
;
4963 struct drm_connector
*connector
;
4964 struct drm_connector_state
*new_con_state
, *old_con_state
;
4965 struct amdgpu_dm_connector
*aconnector
;
4966 struct dm_connector_state
*dm_conn_state
;
4967 int i
, j
, clock
, bpp
;
4968 int vcpi
, pbn_div
, pbn
= 0;
4970 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4972 aconnector
= to_amdgpu_dm_connector(connector
);
4974 if (!aconnector
->port
)
4977 if (!new_con_state
|| !new_con_state
->crtc
)
4980 dm_conn_state
= to_dm_connector_state(new_con_state
);
4982 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
4983 stream
= dc_state
->streams
[j
];
4987 if ((struct amdgpu_dm_connector
*)stream
->dm_stream_context
== aconnector
)
4996 if (stream
->timing
.flags
.DSC
!= 1) {
4997 drm_dp_mst_atomic_enable_dsc(state
,
5005 pbn_div
= dm_mst_get_pbn_divider(stream
->link
);
5006 bpp
= stream
->timing
.dsc_cfg
.bits_per_pixel
;
5007 clock
= stream
->timing
.pix_clk_100hz
/ 10;
5008 pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, true);
5009 vcpi
= drm_dp_mst_atomic_enable_dsc(state
,
5016 dm_conn_state
->pbn
= pbn
;
5017 dm_conn_state
->vcpi_slots
= vcpi
;
5023 static void dm_drm_plane_reset(struct drm_plane
*plane
)
5025 struct dm_plane_state
*amdgpu_state
= NULL
;
5028 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
5030 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
5031 WARN_ON(amdgpu_state
== NULL
);
5034 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
5037 static struct drm_plane_state
*
5038 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
5040 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
5042 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
5043 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
5044 if (!dm_plane_state
)
5047 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
5049 if (old_dm_plane_state
->dc_state
) {
5050 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
5051 dc_plane_state_retain(dm_plane_state
->dc_state
);
5054 return &dm_plane_state
->base
;
5057 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
5058 struct drm_plane_state
*state
)
5060 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
5062 if (dm_plane_state
->dc_state
)
5063 dc_plane_state_release(dm_plane_state
->dc_state
);
5065 drm_atomic_helper_plane_destroy_state(plane
, state
);
5068 static const struct drm_plane_funcs dm_plane_funcs
= {
5069 .update_plane
= drm_atomic_helper_update_plane
,
5070 .disable_plane
= drm_atomic_helper_disable_plane
,
5071 .destroy
= drm_primary_helper_destroy
,
5072 .reset
= dm_drm_plane_reset
,
5073 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
5074 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
5077 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
5078 struct drm_plane_state
*new_state
)
5080 struct amdgpu_framebuffer
*afb
;
5081 struct drm_gem_object
*obj
;
5082 struct amdgpu_device
*adev
;
5083 struct amdgpu_bo
*rbo
;
5084 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
5085 struct list_head list
;
5086 struct ttm_validate_buffer tv
;
5087 struct ww_acquire_ctx ticket
;
5088 uint64_t tiling_flags
;
5092 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
5093 dm_plane_state_new
= to_dm_plane_state(new_state
);
5095 if (!new_state
->fb
) {
5096 DRM_DEBUG_DRIVER("No FB bound\n");
5100 afb
= to_amdgpu_framebuffer(new_state
->fb
);
5101 obj
= new_state
->fb
->obj
[0];
5102 rbo
= gem_to_amdgpu_bo(obj
);
5103 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
5104 INIT_LIST_HEAD(&list
);
5108 list_add(&tv
.head
, &list
);
5110 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
5112 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
5116 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5117 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
5119 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
5121 r
= amdgpu_bo_pin(rbo
, domain
);
5122 if (unlikely(r
!= 0)) {
5123 if (r
!= -ERESTARTSYS
)
5124 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
5125 ttm_eu_backoff_reservation(&ticket
, &list
);
5129 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
5130 if (unlikely(r
!= 0)) {
5131 amdgpu_bo_unpin(rbo
);
5132 ttm_eu_backoff_reservation(&ticket
, &list
);
5133 DRM_ERROR("%p bind failed\n", rbo
);
5137 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
5139 ttm_eu_backoff_reservation(&ticket
, &list
);
5141 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
5145 if (dm_plane_state_new
->dc_state
&&
5146 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
5147 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
5149 fill_plane_buffer_attributes(
5150 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
5151 tiling_flags
, &plane_state
->tiling_info
,
5152 &plane_state
->plane_size
, &plane_state
->dcc
,
5153 &plane_state
->address
);
5159 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
5160 struct drm_plane_state
*old_state
)
5162 struct amdgpu_bo
*rbo
;
5168 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
5169 r
= amdgpu_bo_reserve(rbo
, false);
5171 DRM_ERROR("failed to reserve rbo before unpin\n");
5175 amdgpu_bo_unpin(rbo
);
5176 amdgpu_bo_unreserve(rbo
);
5177 amdgpu_bo_unref(&rbo
);
5180 static int dm_plane_atomic_check(struct drm_plane
*plane
,
5181 struct drm_plane_state
*state
)
5183 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5184 struct dc
*dc
= adev
->dm
.dc
;
5185 struct dm_plane_state
*dm_plane_state
;
5186 struct dc_scaling_info scaling_info
;
5189 dm_plane_state
= to_dm_plane_state(state
);
5191 if (!dm_plane_state
->dc_state
)
5194 ret
= fill_dc_scaling_info(state
, &scaling_info
);
5198 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
5204 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
5205 struct drm_plane_state
*new_plane_state
)
5207 /* Only support async updates on cursor planes. */
5208 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5214 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
5215 struct drm_plane_state
*new_state
)
5217 struct drm_plane_state
*old_state
=
5218 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
5220 swap(plane
->state
->fb
, new_state
->fb
);
5222 plane
->state
->src_x
= new_state
->src_x
;
5223 plane
->state
->src_y
= new_state
->src_y
;
5224 plane
->state
->src_w
= new_state
->src_w
;
5225 plane
->state
->src_h
= new_state
->src_h
;
5226 plane
->state
->crtc_x
= new_state
->crtc_x
;
5227 plane
->state
->crtc_y
= new_state
->crtc_y
;
5228 plane
->state
->crtc_w
= new_state
->crtc_w
;
5229 plane
->state
->crtc_h
= new_state
->crtc_h
;
5231 handle_cursor_update(plane
, old_state
);
5234 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
5235 .prepare_fb
= dm_plane_helper_prepare_fb
,
5236 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
5237 .atomic_check
= dm_plane_atomic_check
,
5238 .atomic_async_check
= dm_plane_atomic_async_check
,
5239 .atomic_async_update
= dm_plane_atomic_async_update
5243 * TODO: these are currently initialized to rgb formats only.
5244 * For future use cases we should either initialize them dynamically based on
5245 * plane capabilities, or initialize this array to all formats, so internal drm
5246 * check will succeed, and let DC implement proper check
5248 static const uint32_t rgb_formats
[] = {
5249 DRM_FORMAT_XRGB8888
,
5250 DRM_FORMAT_ARGB8888
,
5251 DRM_FORMAT_RGBA8888
,
5252 DRM_FORMAT_XRGB2101010
,
5253 DRM_FORMAT_XBGR2101010
,
5254 DRM_FORMAT_ARGB2101010
,
5255 DRM_FORMAT_ABGR2101010
,
5256 DRM_FORMAT_XBGR8888
,
5257 DRM_FORMAT_ABGR8888
,
5261 static const uint32_t overlay_formats
[] = {
5262 DRM_FORMAT_XRGB8888
,
5263 DRM_FORMAT_ARGB8888
,
5264 DRM_FORMAT_RGBA8888
,
5265 DRM_FORMAT_XBGR8888
,
5266 DRM_FORMAT_ABGR8888
,
5270 static const u32 cursor_formats
[] = {
5274 static int get_plane_formats(const struct drm_plane
*plane
,
5275 const struct dc_plane_cap
*plane_cap
,
5276 uint32_t *formats
, int max_formats
)
5278 int i
, num_formats
= 0;
5281 * TODO: Query support for each group of formats directly from
5282 * DC plane caps. This will require adding more formats to the
5286 switch (plane
->type
) {
5287 case DRM_PLANE_TYPE_PRIMARY
:
5288 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
5289 if (num_formats
>= max_formats
)
5292 formats
[num_formats
++] = rgb_formats
[i
];
5295 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
5296 formats
[num_formats
++] = DRM_FORMAT_NV12
;
5299 case DRM_PLANE_TYPE_OVERLAY
:
5300 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
5301 if (num_formats
>= max_formats
)
5304 formats
[num_formats
++] = overlay_formats
[i
];
5308 case DRM_PLANE_TYPE_CURSOR
:
5309 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
5310 if (num_formats
>= max_formats
)
5313 formats
[num_formats
++] = cursor_formats
[i
];
5321 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
5322 struct drm_plane
*plane
,
5323 unsigned long possible_crtcs
,
5324 const struct dc_plane_cap
*plane_cap
)
5326 uint32_t formats
[32];
5330 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
5331 ARRAY_SIZE(formats
));
5333 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
5334 &dm_plane_funcs
, formats
, num_formats
,
5335 NULL
, plane
->type
, NULL
);
5339 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
5340 plane_cap
&& plane_cap
->per_pixel_alpha
) {
5341 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
5342 BIT(DRM_MODE_BLEND_PREMULTI
);
5344 drm_plane_create_alpha_property(plane
);
5345 drm_plane_create_blend_mode_property(plane
, blend_caps
);
5348 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
5349 plane_cap
&& plane_cap
->pixel_format_support
.nv12
) {
5350 /* This only affects YUV formats. */
5351 drm_plane_create_color_properties(
5353 BIT(DRM_COLOR_YCBCR_BT601
) |
5354 BIT(DRM_COLOR_YCBCR_BT709
),
5355 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
5356 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
5357 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
5360 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
5362 /* Create (reset) the plane state */
5363 if (plane
->funcs
->reset
)
5364 plane
->funcs
->reset(plane
);
5369 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
5370 struct drm_plane
*plane
,
5371 uint32_t crtc_index
)
5373 struct amdgpu_crtc
*acrtc
= NULL
;
5374 struct drm_plane
*cursor_plane
;
5378 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
5382 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
5383 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
5385 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
5389 res
= drm_crtc_init_with_planes(
5394 &amdgpu_dm_crtc_funcs
, NULL
);
5399 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
5401 /* Create (reset) the plane state */
5402 if (acrtc
->base
.funcs
->reset
)
5403 acrtc
->base
.funcs
->reset(&acrtc
->base
);
5405 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5406 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5408 acrtc
->crtc_id
= crtc_index
;
5409 acrtc
->base
.enabled
= false;
5410 acrtc
->otg_inst
= -1;
5412 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
5413 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
5414 true, MAX_COLOR_LUT_ENTRIES
);
5415 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
5421 kfree(cursor_plane
);
5426 static int to_drm_connector_type(enum signal_type st
)
5429 case SIGNAL_TYPE_HDMI_TYPE_A
:
5430 return DRM_MODE_CONNECTOR_HDMIA
;
5431 case SIGNAL_TYPE_EDP
:
5432 return DRM_MODE_CONNECTOR_eDP
;
5433 case SIGNAL_TYPE_LVDS
:
5434 return DRM_MODE_CONNECTOR_LVDS
;
5435 case SIGNAL_TYPE_RGB
:
5436 return DRM_MODE_CONNECTOR_VGA
;
5437 case SIGNAL_TYPE_DISPLAY_PORT
:
5438 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
5439 return DRM_MODE_CONNECTOR_DisplayPort
;
5440 case SIGNAL_TYPE_DVI_DUAL_LINK
:
5441 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
5442 return DRM_MODE_CONNECTOR_DVID
;
5443 case SIGNAL_TYPE_VIRTUAL
:
5444 return DRM_MODE_CONNECTOR_VIRTUAL
;
5447 return DRM_MODE_CONNECTOR_Unknown
;
5451 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
5453 struct drm_encoder
*encoder
;
5455 /* There is only one encoder per connector */
5456 drm_connector_for_each_possible_encoder(connector
, encoder
)
5462 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
5464 struct drm_encoder
*encoder
;
5465 struct amdgpu_encoder
*amdgpu_encoder
;
5467 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5469 if (encoder
== NULL
)
5472 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5474 amdgpu_encoder
->native_mode
.clock
= 0;
5476 if (!list_empty(&connector
->probed_modes
)) {
5477 struct drm_display_mode
*preferred_mode
= NULL
;
5479 list_for_each_entry(preferred_mode
,
5480 &connector
->probed_modes
,
5482 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
5483 amdgpu_encoder
->native_mode
= *preferred_mode
;
5491 static struct drm_display_mode
*
5492 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
5494 int hdisplay
, int vdisplay
)
5496 struct drm_device
*dev
= encoder
->dev
;
5497 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5498 struct drm_display_mode
*mode
= NULL
;
5499 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5501 mode
= drm_mode_duplicate(dev
, native_mode
);
5506 mode
->hdisplay
= hdisplay
;
5507 mode
->vdisplay
= vdisplay
;
5508 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
5509 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
5515 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
5516 struct drm_connector
*connector
)
5518 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5519 struct drm_display_mode
*mode
= NULL
;
5520 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5521 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5522 to_amdgpu_dm_connector(connector
);
5526 char name
[DRM_DISPLAY_MODE_LEN
];
5529 } common_modes
[] = {
5530 { "640x480", 640, 480},
5531 { "800x600", 800, 600},
5532 { "1024x768", 1024, 768},
5533 { "1280x720", 1280, 720},
5534 { "1280x800", 1280, 800},
5535 {"1280x1024", 1280, 1024},
5536 { "1440x900", 1440, 900},
5537 {"1680x1050", 1680, 1050},
5538 {"1600x1200", 1600, 1200},
5539 {"1920x1080", 1920, 1080},
5540 {"1920x1200", 1920, 1200}
5543 n
= ARRAY_SIZE(common_modes
);
5545 for (i
= 0; i
< n
; i
++) {
5546 struct drm_display_mode
*curmode
= NULL
;
5547 bool mode_existed
= false;
5549 if (common_modes
[i
].w
> native_mode
->hdisplay
||
5550 common_modes
[i
].h
> native_mode
->vdisplay
||
5551 (common_modes
[i
].w
== native_mode
->hdisplay
&&
5552 common_modes
[i
].h
== native_mode
->vdisplay
))
5555 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
5556 if (common_modes
[i
].w
== curmode
->hdisplay
&&
5557 common_modes
[i
].h
== curmode
->vdisplay
) {
5558 mode_existed
= true;
5566 mode
= amdgpu_dm_create_common_mode(encoder
,
5567 common_modes
[i
].name
, common_modes
[i
].w
,
5569 drm_mode_probed_add(connector
, mode
);
5570 amdgpu_dm_connector
->num_modes
++;
5574 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
5577 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5578 to_amdgpu_dm_connector(connector
);
5581 /* empty probed_modes */
5582 INIT_LIST_HEAD(&connector
->probed_modes
);
5583 amdgpu_dm_connector
->num_modes
=
5584 drm_add_edid_modes(connector
, edid
);
5586 /* sorting the probed modes before calling function
5587 * amdgpu_dm_get_native_mode() since EDID can have
5588 * more than one preferred mode. The modes that are
5589 * later in the probed mode list could be of higher
5590 * and preferred resolution. For example, 3840x2160
5591 * resolution in base EDID preferred timing and 4096x2160
5592 * preferred resolution in DID extension block later.
5594 drm_mode_sort(&connector
->probed_modes
);
5595 amdgpu_dm_get_native_mode(connector
);
5597 amdgpu_dm_connector
->num_modes
= 0;
5601 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
5603 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5604 to_amdgpu_dm_connector(connector
);
5605 struct drm_encoder
*encoder
;
5606 struct edid
*edid
= amdgpu_dm_connector
->edid
;
5608 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5610 if (!edid
|| !drm_edid_is_valid(edid
)) {
5611 amdgpu_dm_connector
->num_modes
=
5612 drm_add_modes_noedid(connector
, 640, 480);
5614 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
5615 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
5617 amdgpu_dm_fbc_init(connector
);
5619 return amdgpu_dm_connector
->num_modes
;
5622 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
5623 struct amdgpu_dm_connector
*aconnector
,
5625 struct dc_link
*link
,
5628 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
5631 * Some of the properties below require access to state, like bpc.
5632 * Allocate some default initial connector state with our reset helper.
5634 if (aconnector
->base
.funcs
->reset
)
5635 aconnector
->base
.funcs
->reset(&aconnector
->base
);
5637 aconnector
->connector_id
= link_index
;
5638 aconnector
->dc_link
= link
;
5639 aconnector
->base
.interlace_allowed
= false;
5640 aconnector
->base
.doublescan_allowed
= false;
5641 aconnector
->base
.stereo_allowed
= false;
5642 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
5643 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
5644 aconnector
->audio_inst
= -1;
5645 mutex_init(&aconnector
->hpd_lock
);
5648 * configure support HPD hot plug connector_>polled default value is 0
5649 * which means HPD hot plug not supported
5651 switch (connector_type
) {
5652 case DRM_MODE_CONNECTOR_HDMIA
:
5653 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5654 aconnector
->base
.ycbcr_420_allowed
=
5655 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
5657 case DRM_MODE_CONNECTOR_DisplayPort
:
5658 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5659 aconnector
->base
.ycbcr_420_allowed
=
5660 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
5662 case DRM_MODE_CONNECTOR_DVID
:
5663 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5669 drm_object_attach_property(&aconnector
->base
.base
,
5670 dm
->ddev
->mode_config
.scaling_mode_property
,
5671 DRM_MODE_SCALE_NONE
);
5673 drm_object_attach_property(&aconnector
->base
.base
,
5674 adev
->mode_info
.underscan_property
,
5676 drm_object_attach_property(&aconnector
->base
.base
,
5677 adev
->mode_info
.underscan_hborder_property
,
5679 drm_object_attach_property(&aconnector
->base
.base
,
5680 adev
->mode_info
.underscan_vborder_property
,
5683 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
5685 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5686 aconnector
->base
.state
->max_bpc
= (connector_type
== DRM_MODE_CONNECTOR_eDP
) ? 16 : 8;
5687 aconnector
->base
.state
->max_requested_bpc
= aconnector
->base
.state
->max_bpc
;
5689 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
5690 dc_is_dmcu_initialized(adev
->dm
.dc
)) {
5691 drm_object_attach_property(&aconnector
->base
.base
,
5692 adev
->mode_info
.abm_level_property
, 0);
5695 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
5696 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5697 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
5698 drm_object_attach_property(
5699 &aconnector
->base
.base
,
5700 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
5702 drm_connector_attach_vrr_capable_property(
5704 #ifdef CONFIG_DRM_AMD_DC_HDCP
5705 if (adev
->asic_type
>= CHIP_RAVEN
)
5706 drm_connector_attach_content_protection_property(&aconnector
->base
, true);
5711 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
5712 struct i2c_msg
*msgs
, int num
)
5714 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
5715 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
5716 struct i2c_command cmd
;
5720 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
5725 cmd
.number_of_payloads
= num
;
5726 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
5729 for (i
= 0; i
< num
; i
++) {
5730 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
5731 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
5732 cmd
.payloads
[i
].length
= msgs
[i
].len
;
5733 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
5737 ddc_service
->ctx
->dc
,
5738 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
5742 kfree(cmd
.payloads
);
5746 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
5748 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
5751 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
5752 .master_xfer
= amdgpu_dm_i2c_xfer
,
5753 .functionality
= amdgpu_dm_i2c_func
,
5756 static struct amdgpu_i2c_adapter
*
5757 create_i2c(struct ddc_service
*ddc_service
,
5761 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
5762 struct amdgpu_i2c_adapter
*i2c
;
5764 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
5767 i2c
->base
.owner
= THIS_MODULE
;
5768 i2c
->base
.class = I2C_CLASS_DDC
;
5769 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
5770 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
5771 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
5772 i2c_set_adapdata(&i2c
->base
, i2c
);
5773 i2c
->ddc_service
= ddc_service
;
5774 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
5781 * Note: this function assumes that dc_link_detect() was called for the
5782 * dc_link which will be represented by this aconnector.
5784 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
5785 struct amdgpu_dm_connector
*aconnector
,
5786 uint32_t link_index
,
5787 struct amdgpu_encoder
*aencoder
)
5791 struct dc
*dc
= dm
->dc
;
5792 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
5793 struct amdgpu_i2c_adapter
*i2c
;
5795 link
->priv
= aconnector
;
5797 DRM_DEBUG_DRIVER("%s()\n", __func__
);
5799 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
5801 DRM_ERROR("Failed to create i2c adapter data\n");
5805 aconnector
->i2c
= i2c
;
5806 res
= i2c_add_adapter(&i2c
->base
);
5809 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
5813 connector_type
= to_drm_connector_type(link
->connector_signal
);
5815 res
= drm_connector_init_with_ddc(
5818 &amdgpu_dm_connector_funcs
,
5823 DRM_ERROR("connector_init failed\n");
5824 aconnector
->connector_id
= -1;
5828 drm_connector_helper_add(
5830 &amdgpu_dm_connector_helper_funcs
);
5832 amdgpu_dm_connector_init_helper(
5839 drm_connector_attach_encoder(
5840 &aconnector
->base
, &aencoder
->base
);
5842 drm_connector_register(&aconnector
->base
);
5843 #if defined(CONFIG_DEBUG_FS)
5844 connector_debugfs_init(aconnector
);
5845 aconnector
->debugfs_dpcd_address
= 0;
5846 aconnector
->debugfs_dpcd_size
= 0;
5849 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
5850 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
5851 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
5856 aconnector
->i2c
= NULL
;
5861 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
5863 switch (adev
->mode_info
.num_crtc
) {
5880 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
5881 struct amdgpu_encoder
*aencoder
,
5882 uint32_t link_index
)
5884 struct amdgpu_device
*adev
= dev
->dev_private
;
5886 int res
= drm_encoder_init(dev
,
5888 &amdgpu_dm_encoder_funcs
,
5889 DRM_MODE_ENCODER_TMDS
,
5892 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
5895 aencoder
->encoder_id
= link_index
;
5897 aencoder
->encoder_id
= -1;
5899 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
5904 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
5905 struct amdgpu_crtc
*acrtc
,
5909 * this is not correct translation but will work as soon as VBLANK
5910 * constant is the same as PFLIP
5913 amdgpu_display_crtc_idx_to_irq_type(
5918 drm_crtc_vblank_on(&acrtc
->base
);
5921 &adev
->pageflip_irq
,
5927 &adev
->pageflip_irq
,
5929 drm_crtc_vblank_off(&acrtc
->base
);
5934 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
5935 const struct dm_connector_state
*old_dm_state
)
5937 if (dm_state
->scaling
!= old_dm_state
->scaling
)
5939 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
5940 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
5942 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
5943 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
5945 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
5946 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
5951 #ifdef CONFIG_DRM_AMD_DC_HDCP
5952 static bool is_content_protection_different(struct drm_connector_state
*state
,
5953 const struct drm_connector_state
*old_state
,
5954 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
5956 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5958 if (old_state
->hdcp_content_type
!= state
->hdcp_content_type
&&
5959 state
->content_protection
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
5960 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
5964 /* CP is being re enabled, ignore this */
5965 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
5966 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
5967 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
5971 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
5972 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
5973 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
5974 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
5976 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
5977 * hot-plug, headless s3, dpms
5979 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
5980 aconnector
->dc_sink
!= NULL
)
5983 if (old_state
->content_protection
== state
->content_protection
)
5986 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
5993 static void remove_stream(struct amdgpu_device
*adev
,
5994 struct amdgpu_crtc
*acrtc
,
5995 struct dc_stream_state
*stream
)
5997 /* this is the update mode case */
5999 acrtc
->otg_inst
= -1;
6000 acrtc
->enabled
= false;
6003 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
6004 struct dc_cursor_position
*position
)
6006 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6008 int xorigin
= 0, yorigin
= 0;
6010 position
->enable
= false;
6014 if (!crtc
|| !plane
->state
->fb
)
6017 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
6018 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
6019 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6021 plane
->state
->crtc_w
,
6022 plane
->state
->crtc_h
);
6026 x
= plane
->state
->crtc_x
;
6027 y
= plane
->state
->crtc_y
;
6029 if (x
<= -amdgpu_crtc
->max_cursor_width
||
6030 y
<= -amdgpu_crtc
->max_cursor_height
)
6033 if (crtc
->primary
->state
) {
6034 /* avivo cursor are offset into the total surface */
6035 x
+= crtc
->primary
->state
->src_x
>> 16;
6036 y
+= crtc
->primary
->state
->src_y
>> 16;
6040 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
6044 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
6047 position
->enable
= true;
6050 position
->x_hotspot
= xorigin
;
6051 position
->y_hotspot
= yorigin
;
6056 static void handle_cursor_update(struct drm_plane
*plane
,
6057 struct drm_plane_state
*old_plane_state
)
6059 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
6060 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
6061 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
6062 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
6063 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6064 uint64_t address
= afb
? afb
->address
: 0;
6065 struct dc_cursor_position position
;
6066 struct dc_cursor_attributes attributes
;
6069 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
6072 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6074 amdgpu_crtc
->crtc_id
,
6075 plane
->state
->crtc_w
,
6076 plane
->state
->crtc_h
);
6078 ret
= get_cursor_position(plane
, crtc
, &position
);
6082 if (!position
.enable
) {
6083 /* turn off cursor */
6084 if (crtc_state
&& crtc_state
->stream
) {
6085 mutex_lock(&adev
->dm
.dc_lock
);
6086 dc_stream_set_cursor_position(crtc_state
->stream
,
6088 mutex_unlock(&adev
->dm
.dc_lock
);
6093 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
6094 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
6096 memset(&attributes
, 0, sizeof(attributes
));
6097 attributes
.address
.high_part
= upper_32_bits(address
);
6098 attributes
.address
.low_part
= lower_32_bits(address
);
6099 attributes
.width
= plane
->state
->crtc_w
;
6100 attributes
.height
= plane
->state
->crtc_h
;
6101 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
6102 attributes
.rotation_angle
= 0;
6103 attributes
.attribute_flags
.value
= 0;
6105 attributes
.pitch
= attributes
.width
;
6107 if (crtc_state
->stream
) {
6108 mutex_lock(&adev
->dm
.dc_lock
);
6109 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
6111 DRM_ERROR("DC failed to set cursor attributes\n");
6113 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
6115 DRM_ERROR("DC failed to set cursor position\n");
6116 mutex_unlock(&adev
->dm
.dc_lock
);
6120 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
6123 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
6124 WARN_ON(acrtc
->event
);
6126 acrtc
->event
= acrtc
->base
.state
->event
;
6128 /* Set the flip status */
6129 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
6131 /* Mark this event as consumed */
6132 acrtc
->base
.state
->event
= NULL
;
6134 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6138 static void update_freesync_state_on_stream(
6139 struct amdgpu_display_manager
*dm
,
6140 struct dm_crtc_state
*new_crtc_state
,
6141 struct dc_stream_state
*new_stream
,
6142 struct dc_plane_state
*surface
,
6143 u32 flip_timestamp_in_us
)
6145 struct mod_vrr_params vrr_params
;
6146 struct dc_info_packet vrr_infopacket
= {0};
6147 struct amdgpu_device
*adev
= dm
->adev
;
6148 unsigned long flags
;
6154 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6155 * For now it's sufficient to just guard against these conditions.
6158 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6161 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6162 vrr_params
= new_crtc_state
->vrr_params
;
6165 mod_freesync_handle_preflip(
6166 dm
->freesync_module
,
6169 flip_timestamp_in_us
,
6172 if (adev
->family
< AMDGPU_FAMILY_AI
&&
6173 amdgpu_dm_vrr_active(new_crtc_state
)) {
6174 mod_freesync_handle_v_update(dm
->freesync_module
,
6175 new_stream
, &vrr_params
);
6177 /* Need to call this before the frame ends. */
6178 dc_stream_adjust_vmin_vmax(dm
->dc
,
6179 new_crtc_state
->stream
,
6180 &vrr_params
.adjust
);
6184 mod_freesync_build_vrr_infopacket(
6185 dm
->freesync_module
,
6189 TRANSFER_FUNC_UNKNOWN
,
6192 new_crtc_state
->freesync_timing_changed
|=
6193 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6195 sizeof(vrr_params
.adjust
)) != 0);
6197 new_crtc_state
->freesync_vrr_info_changed
|=
6198 (memcmp(&new_crtc_state
->vrr_infopacket
,
6200 sizeof(vrr_infopacket
)) != 0);
6202 new_crtc_state
->vrr_params
= vrr_params
;
6203 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
6205 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
6206 new_stream
->vrr_infopacket
= vrr_infopacket
;
6208 if (new_crtc_state
->freesync_vrr_info_changed
)
6209 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6210 new_crtc_state
->base
.crtc
->base
.id
,
6211 (int)new_crtc_state
->base
.vrr_enabled
,
6212 (int)vrr_params
.state
);
6214 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6217 static void pre_update_freesync_state_on_stream(
6218 struct amdgpu_display_manager
*dm
,
6219 struct dm_crtc_state
*new_crtc_state
)
6221 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
6222 struct mod_vrr_params vrr_params
;
6223 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
6224 struct amdgpu_device
*adev
= dm
->adev
;
6225 unsigned long flags
;
6231 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6232 * For now it's sufficient to just guard against these conditions.
6234 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6237 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6238 vrr_params
= new_crtc_state
->vrr_params
;
6240 if (new_crtc_state
->vrr_supported
&&
6241 config
.min_refresh_in_uhz
&&
6242 config
.max_refresh_in_uhz
) {
6243 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6244 VRR_STATE_ACTIVE_VARIABLE
:
6247 config
.state
= VRR_STATE_UNSUPPORTED
;
6250 mod_freesync_build_vrr_params(dm
->freesync_module
,
6252 &config
, &vrr_params
);
6254 new_crtc_state
->freesync_timing_changed
|=
6255 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6257 sizeof(vrr_params
.adjust
)) != 0);
6259 new_crtc_state
->vrr_params
= vrr_params
;
6260 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6263 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
6264 struct dm_crtc_state
*new_state
)
6266 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
6267 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
6269 if (!old_vrr_active
&& new_vrr_active
) {
6270 /* Transition VRR inactive -> active:
6271 * While VRR is active, we must not disable vblank irq, as a
6272 * reenable after disable would compute bogus vblank/pflip
6273 * timestamps if it likely happened inside display front-porch.
6275 * We also need vupdate irq for the actual core vblank handling
6278 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
6279 drm_crtc_vblank_get(new_state
->base
.crtc
);
6280 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6281 __func__
, new_state
->base
.crtc
->base
.id
);
6282 } else if (old_vrr_active
&& !new_vrr_active
) {
6283 /* Transition VRR active -> inactive:
6284 * Allow vblank irq disable again for fixed refresh rate.
6286 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
6287 drm_crtc_vblank_put(new_state
->base
.crtc
);
6288 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6289 __func__
, new_state
->base
.crtc
->base
.id
);
6293 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
6295 struct drm_plane
*plane
;
6296 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6300 * TODO: Make this per-stream so we don't issue redundant updates for
6301 * commits with multiple streams.
6303 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
6305 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6306 handle_cursor_update(plane
, old_plane_state
);
6309 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
6310 struct dc_state
*dc_state
,
6311 struct drm_device
*dev
,
6312 struct amdgpu_display_manager
*dm
,
6313 struct drm_crtc
*pcrtc
,
6314 bool wait_for_vblank
)
6317 uint64_t timestamp_ns
;
6318 struct drm_plane
*plane
;
6319 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6320 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
6321 struct drm_crtc_state
*new_pcrtc_state
=
6322 drm_atomic_get_new_crtc_state(state
, pcrtc
);
6323 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
6324 struct dm_crtc_state
*dm_old_crtc_state
=
6325 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
6326 int planes_count
= 0, vpos
, hpos
;
6328 unsigned long flags
;
6329 struct amdgpu_bo
*abo
;
6330 uint64_t tiling_flags
;
6331 uint32_t target_vblank
, last_flip_vblank
;
6332 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
6333 bool pflip_present
= false;
6334 bool swizzle
= true;
6336 struct dc_surface_update surface_updates
[MAX_SURFACES
];
6337 struct dc_plane_info plane_infos
[MAX_SURFACES
];
6338 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
6339 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
6340 struct dc_stream_update stream_update
;
6343 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
6346 dm_error("Failed to allocate update bundle\n");
6351 * Disable the cursor first if we're disabling all the planes.
6352 * It'll remain on the screen after the planes are re-enabled
6355 if (acrtc_state
->active_planes
== 0)
6356 amdgpu_dm_commit_cursors(state
);
6358 /* update planes when needed */
6359 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
6360 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
6361 struct drm_crtc_state
*new_crtc_state
;
6362 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
6363 bool plane_needs_flip
;
6364 struct dc_plane_state
*dc_plane
;
6365 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
6367 /* Cursor plane is handled after stream updates */
6368 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6371 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
6374 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
6375 if (!new_crtc_state
->active
)
6378 dc_plane
= dm_new_plane_state
->dc_state
;
6380 if (dc_plane
&& !dc_plane
->tiling_info
.gfx9
.swizzle
)
6383 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6384 if (new_pcrtc_state
->color_mgmt_changed
) {
6385 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
6386 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
6389 fill_dc_scaling_info(new_plane_state
,
6390 &bundle
->scaling_infos
[planes_count
]);
6392 bundle
->surface_updates
[planes_count
].scaling_info
=
6393 &bundle
->scaling_infos
[planes_count
];
6395 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
6397 pflip_present
= pflip_present
|| plane_needs_flip
;
6399 if (!plane_needs_flip
) {
6404 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
6407 * Wait for all fences on this FB. Do limited wait to avoid
6408 * deadlock during GPU reset when this fence will not signal
6409 * but we hold reservation lock for the BO.
6411 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
6413 msecs_to_jiffies(5000));
6414 if (unlikely(r
<= 0))
6415 DRM_ERROR("Waiting for fences timed out!");
6418 * TODO This might fail and hence better not used, wait
6419 * explicitly on fences instead
6420 * and in general should be called for
6421 * blocking commit to as per framework helpers
6423 r
= amdgpu_bo_reserve(abo
, true);
6424 if (unlikely(r
!= 0))
6425 DRM_ERROR("failed to reserve buffer before flip\n");
6427 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
6429 amdgpu_bo_unreserve(abo
);
6431 fill_dc_plane_info_and_addr(
6432 dm
->adev
, new_plane_state
, tiling_flags
,
6433 &bundle
->plane_infos
[planes_count
],
6434 &bundle
->flip_addrs
[planes_count
].address
);
6436 bundle
->surface_updates
[planes_count
].plane_info
=
6437 &bundle
->plane_infos
[planes_count
];
6440 * Only allow immediate flips for fast updates that don't
6441 * change FB pitch, DCC state, rotation or mirroing.
6443 bundle
->flip_addrs
[planes_count
].flip_immediate
=
6444 crtc
->state
->async_flip
&&
6445 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
6447 timestamp_ns
= ktime_get_ns();
6448 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
6449 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
6450 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6452 if (!bundle
->surface_updates
[planes_count
].surface
) {
6453 DRM_ERROR("No surface for CRTC: id=%d\n",
6454 acrtc_attach
->crtc_id
);
6458 if (plane
== pcrtc
->primary
)
6459 update_freesync_state_on_stream(
6462 acrtc_state
->stream
,
6464 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
6466 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6468 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
6469 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
6475 if (pflip_present
) {
6477 /* Use old throttling in non-vrr fixed refresh rate mode
6478 * to keep flip scheduling based on target vblank counts
6479 * working in a backwards compatible way, e.g., for
6480 * clients using the GLX_OML_sync_control extension or
6481 * DRI3/Present extension with defined target_msc.
6483 last_flip_vblank
= amdgpu_get_vblank_counter_kms(dm
->ddev
, acrtc_attach
->crtc_id
);
6486 /* For variable refresh rate mode only:
6487 * Get vblank of last completed flip to avoid > 1 vrr
6488 * flips per video frame by use of throttling, but allow
6489 * flip programming anywhere in the possibly large
6490 * variable vrr vblank interval for fine-grained flip
6491 * timing control and more opportunity to avoid stutter
6492 * on late submission of flips.
6494 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6495 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
6496 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6499 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
6502 * Wait until we're out of the vertical blank period before the one
6503 * targeted by the flip
6505 while ((acrtc_attach
->enabled
&&
6506 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
6507 0, &vpos
, &hpos
, NULL
,
6508 NULL
, &pcrtc
->hwmode
)
6509 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
6510 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
6511 (int)(target_vblank
-
6512 amdgpu_get_vblank_counter_kms(dm
->ddev
, acrtc_attach
->crtc_id
)) > 0)) {
6513 usleep_range(1000, 1100);
6516 if (acrtc_attach
->base
.state
->event
) {
6517 drm_crtc_vblank_get(pcrtc
);
6519 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6521 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
6522 prepare_flip_isr(acrtc_attach
);
6524 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6527 if (acrtc_state
->stream
) {
6528 if (acrtc_state
->freesync_vrr_info_changed
)
6529 bundle
->stream_update
.vrr_infopacket
=
6530 &acrtc_state
->stream
->vrr_infopacket
;
6534 /* Update the planes if changed or disable if we don't have any. */
6535 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
6536 acrtc_state
->stream
) {
6537 bundle
->stream_update
.stream
= acrtc_state
->stream
;
6538 if (new_pcrtc_state
->mode_changed
) {
6539 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
6540 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
6543 if (new_pcrtc_state
->color_mgmt_changed
) {
6545 * TODO: This isn't fully correct since we've actually
6546 * already modified the stream in place.
6548 bundle
->stream_update
.gamut_remap
=
6549 &acrtc_state
->stream
->gamut_remap_matrix
;
6550 bundle
->stream_update
.output_csc_transform
=
6551 &acrtc_state
->stream
->csc_color_matrix
;
6552 bundle
->stream_update
.out_transfer_func
=
6553 acrtc_state
->stream
->out_transfer_func
;
6556 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
6557 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
6558 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
6561 * If FreeSync state on the stream has changed then we need to
6562 * re-adjust the min/max bounds now that DC doesn't handle this
6563 * as part of commit.
6565 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
6566 amdgpu_dm_vrr_active(acrtc_state
)) {
6567 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6568 dc_stream_adjust_vmin_vmax(
6569 dm
->dc
, acrtc_state
->stream
,
6570 &acrtc_state
->vrr_params
.adjust
);
6571 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6573 mutex_lock(&dm
->dc_lock
);
6574 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6575 acrtc_state
->stream
->link
->psr_allow_active
)
6576 amdgpu_dm_psr_disable(acrtc_state
->stream
);
6578 dc_commit_updates_for_stream(dm
->dc
,
6579 bundle
->surface_updates
,
6581 acrtc_state
->stream
,
6582 &bundle
->stream_update
,
6585 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6586 acrtc_state
->stream
->psr_version
&&
6587 !acrtc_state
->stream
->link
->psr_feature_enabled
)
6588 amdgpu_dm_link_setup_psr(acrtc_state
->stream
);
6589 else if ((acrtc_state
->update_type
== UPDATE_TYPE_FAST
) &&
6590 acrtc_state
->stream
->link
->psr_feature_enabled
&&
6591 !acrtc_state
->stream
->link
->psr_allow_active
&&
6593 amdgpu_dm_psr_enable(acrtc_state
->stream
);
6596 mutex_unlock(&dm
->dc_lock
);
6600 * Update cursor state *after* programming all the planes.
6601 * This avoids redundant programming in the case where we're going
6602 * to be disabling a single plane - those pipes are being disabled.
6604 if (acrtc_state
->active_planes
)
6605 amdgpu_dm_commit_cursors(state
);
6611 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
6612 struct drm_atomic_state
*state
)
6614 struct amdgpu_device
*adev
= dev
->dev_private
;
6615 struct amdgpu_dm_connector
*aconnector
;
6616 struct drm_connector
*connector
;
6617 struct drm_connector_state
*old_con_state
, *new_con_state
;
6618 struct drm_crtc_state
*new_crtc_state
;
6619 struct dm_crtc_state
*new_dm_crtc_state
;
6620 const struct dc_stream_status
*status
;
6623 /* Notify device removals. */
6624 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6625 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
6626 /* CRTC changes require notification. */
6630 if (!new_con_state
->crtc
)
6633 new_crtc_state
= drm_atomic_get_new_crtc_state(
6634 state
, new_con_state
->crtc
);
6636 if (!new_crtc_state
)
6639 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6643 aconnector
= to_amdgpu_dm_connector(connector
);
6645 mutex_lock(&adev
->dm
.audio_lock
);
6646 inst
= aconnector
->audio_inst
;
6647 aconnector
->audio_inst
= -1;
6648 mutex_unlock(&adev
->dm
.audio_lock
);
6650 amdgpu_dm_audio_eld_notify(adev
, inst
);
6653 /* Notify audio device additions. */
6654 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
6655 if (!new_con_state
->crtc
)
6658 new_crtc_state
= drm_atomic_get_new_crtc_state(
6659 state
, new_con_state
->crtc
);
6661 if (!new_crtc_state
)
6664 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6667 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6668 if (!new_dm_crtc_state
->stream
)
6671 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
6675 aconnector
= to_amdgpu_dm_connector(connector
);
6677 mutex_lock(&adev
->dm
.audio_lock
);
6678 inst
= status
->audio_inst
;
6679 aconnector
->audio_inst
= inst
;
6680 mutex_unlock(&adev
->dm
.audio_lock
);
6682 amdgpu_dm_audio_eld_notify(adev
, inst
);
6687 * Enable interrupts on CRTCs that are newly active, undergone
6688 * a modeset, or have active planes again.
6690 * Done in two passes, based on the for_modeset flag:
6691 * Pass 1: For CRTCs going through modeset
6692 * Pass 2: For CRTCs going from 0 to n active planes
6694 * Interrupts can only be enabled after the planes are programmed,
6695 * so this requires a two-pass approach since we don't want to
6696 * just defer the interrupts until after commit planes every time.
6698 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device
*dev
,
6699 struct drm_atomic_state
*state
,
6702 struct amdgpu_device
*adev
= dev
->dev_private
;
6703 struct drm_crtc
*crtc
;
6704 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6706 #ifdef CONFIG_DEBUG_FS
6707 enum amdgpu_dm_pipe_crc_source source
;
6710 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
6711 new_crtc_state
, i
) {
6712 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6713 struct dm_crtc_state
*dm_new_crtc_state
=
6714 to_dm_crtc_state(new_crtc_state
);
6715 struct dm_crtc_state
*dm_old_crtc_state
=
6716 to_dm_crtc_state(old_crtc_state
);
6717 bool modeset
= drm_atomic_crtc_needs_modeset(new_crtc_state
);
6720 run_pass
= (for_modeset
&& modeset
) ||
6721 (!for_modeset
&& !modeset
&&
6722 !dm_old_crtc_state
->interrupts_enabled
);
6727 if (!dm_new_crtc_state
->interrupts_enabled
)
6730 manage_dm_interrupts(adev
, acrtc
, true);
6732 #ifdef CONFIG_DEBUG_FS
6733 /* The stream has changed so CRC capture needs to re-enabled. */
6734 source
= dm_new_crtc_state
->crc_src
;
6735 if (amdgpu_dm_is_valid_crc_source(source
)) {
6736 amdgpu_dm_crtc_configure_crc_source(
6737 crtc
, dm_new_crtc_state
,
6738 dm_new_crtc_state
->crc_src
);
6745 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6746 * @crtc_state: the DRM CRTC state
6747 * @stream_state: the DC stream state.
6749 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6750 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6752 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
6753 struct dc_stream_state
*stream_state
)
6755 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
6758 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
6759 struct drm_atomic_state
*state
,
6762 struct drm_crtc
*crtc
;
6763 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6764 struct amdgpu_device
*adev
= dev
->dev_private
;
6768 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6769 * a modeset, being disabled, or have no active planes.
6771 * It's done in atomic commit rather than commit tail for now since
6772 * some of these interrupt handlers access the current CRTC state and
6773 * potentially the stream pointer itself.
6775 * Since the atomic state is swapped within atomic commit and not within
6776 * commit tail this would leave to new state (that hasn't been committed yet)
6777 * being accesssed from within the handlers.
6779 * TODO: Fix this so we can do this in commit tail and not have to block
6782 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
6783 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6784 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6785 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6787 if (dm_old_crtc_state
->interrupts_enabled
&&
6788 (!dm_new_crtc_state
->interrupts_enabled
||
6789 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
6790 manage_dm_interrupts(adev
, acrtc
, false);
6793 * Add check here for SoC's that support hardware cursor plane, to
6794 * unset legacy_cursor_update
6797 return drm_atomic_helper_commit(dev
, state
, nonblock
);
6799 /*TODO Handle EINTR, reenable IRQ*/
6803 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6804 * @state: The atomic state to commit
6806 * This will tell DC to commit the constructed DC state from atomic_check,
6807 * programming the hardware. Any failures here implies a hardware failure, since
6808 * atomic check should have filtered anything non-kosher.
6810 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
6812 struct drm_device
*dev
= state
->dev
;
6813 struct amdgpu_device
*adev
= dev
->dev_private
;
6814 struct amdgpu_display_manager
*dm
= &adev
->dm
;
6815 struct dm_atomic_state
*dm_state
;
6816 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
6818 struct drm_crtc
*crtc
;
6819 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6820 unsigned long flags
;
6821 bool wait_for_vblank
= true;
6822 struct drm_connector
*connector
;
6823 struct drm_connector_state
*old_con_state
, *new_con_state
;
6824 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
6825 int crtc_disable_count
= 0;
6827 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
6829 dm_state
= dm_atomic_get_new_state(state
);
6830 if (dm_state
&& dm_state
->context
) {
6831 dc_state
= dm_state
->context
;
6833 /* No state changes, retain current state. */
6834 dc_state_temp
= dc_create_state(dm
->dc
);
6835 ASSERT(dc_state_temp
);
6836 dc_state
= dc_state_temp
;
6837 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
6840 /* update changed items */
6841 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
6842 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6844 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6845 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6848 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6849 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6850 "connectors_changed:%d\n",
6852 new_crtc_state
->enable
,
6853 new_crtc_state
->active
,
6854 new_crtc_state
->planes_changed
,
6855 new_crtc_state
->mode_changed
,
6856 new_crtc_state
->active_changed
,
6857 new_crtc_state
->connectors_changed
);
6859 /* Copy all transient state flags into dc state */
6860 if (dm_new_crtc_state
->stream
) {
6861 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
6862 dm_new_crtc_state
->stream
);
6865 /* handles headless hotplug case, updating new_state and
6866 * aconnector as needed
6869 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
6871 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
6873 if (!dm_new_crtc_state
->stream
) {
6875 * this could happen because of issues with
6876 * userspace notifications delivery.
6877 * In this case userspace tries to set mode on
6878 * display which is disconnected in fact.
6879 * dc_sink is NULL in this case on aconnector.
6880 * We expect reset mode will come soon.
6882 * This can also happen when unplug is done
6883 * during resume sequence ended
6885 * In this case, we want to pretend we still
6886 * have a sink to keep the pipe running so that
6887 * hw state is consistent with the sw state
6889 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6890 __func__
, acrtc
->base
.base
.id
);
6894 if (dm_old_crtc_state
->stream
)
6895 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
6897 pm_runtime_get_noresume(dev
->dev
);
6899 acrtc
->enabled
= true;
6900 acrtc
->hw_mode
= new_crtc_state
->mode
;
6901 crtc
->hwmode
= new_crtc_state
->mode
;
6902 } else if (modereset_required(new_crtc_state
)) {
6903 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
6904 /* i.e. reset mode */
6905 if (dm_old_crtc_state
->stream
) {
6906 if (dm_old_crtc_state
->stream
->link
->psr_allow_active
)
6907 amdgpu_dm_psr_disable(dm_old_crtc_state
->stream
);
6909 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
6912 } /* for_each_crtc_in_state() */
6915 dm_enable_per_frame_crtc_master_sync(dc_state
);
6916 mutex_lock(&dm
->dc_lock
);
6917 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
6918 mutex_unlock(&dm
->dc_lock
);
6921 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
6922 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6924 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6926 if (dm_new_crtc_state
->stream
!= NULL
) {
6927 const struct dc_stream_status
*status
=
6928 dc_stream_get_status(dm_new_crtc_state
->stream
);
6931 status
= dc_stream_get_status_from_state(dc_state
,
6932 dm_new_crtc_state
->stream
);
6935 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
6937 acrtc
->otg_inst
= status
->primary_otg_inst
;
6940 #ifdef CONFIG_DRM_AMD_DC_HDCP
6941 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6942 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
6943 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
6944 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6946 new_crtc_state
= NULL
;
6949 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
6951 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6953 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
6954 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
6955 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
6956 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6960 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
6961 hdcp_update_display(
6962 adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
, aconnector
,
6963 new_con_state
->hdcp_content_type
,
6964 new_con_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
? true
6969 /* Handle connector state changes */
6970 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6971 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
6972 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
6973 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
6974 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
6975 struct dc_stream_update stream_update
;
6976 struct dc_info_packet hdr_packet
;
6977 struct dc_stream_status
*status
= NULL
;
6978 bool abm_changed
, hdr_changed
, scaling_changed
;
6980 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
6981 memset(&stream_update
, 0, sizeof(stream_update
));
6984 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
6985 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
6988 /* Skip any modesets/resets */
6989 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
6992 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6993 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6995 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
6998 abm_changed
= dm_new_crtc_state
->abm_level
!=
6999 dm_old_crtc_state
->abm_level
;
7002 is_hdr_metadata_different(old_con_state
, new_con_state
);
7004 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
7007 stream_update
.stream
= dm_new_crtc_state
->stream
;
7008 if (scaling_changed
) {
7009 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
7010 dm_new_con_state
, dm_new_crtc_state
->stream
);
7012 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
7013 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
7017 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
7019 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
7023 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
7024 stream_update
.hdr_static_metadata
= &hdr_packet
;
7027 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
7029 WARN_ON(!status
->plane_count
);
7032 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7033 * Here we create an empty update on each plane.
7034 * To fix this, DC should permit updating only stream properties.
7036 for (j
= 0; j
< status
->plane_count
; j
++)
7037 dummy_updates
[j
].surface
= status
->plane_states
[0];
7040 mutex_lock(&dm
->dc_lock
);
7041 dc_commit_updates_for_stream(dm
->dc
,
7043 status
->plane_count
,
7044 dm_new_crtc_state
->stream
,
7047 mutex_unlock(&dm
->dc_lock
);
7050 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7051 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
7052 new_crtc_state
, i
) {
7053 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
7054 crtc_disable_count
++;
7056 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7057 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7059 /* Update freesync active state. */
7060 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
7062 /* Handle vrr on->off / off->on transitions */
7063 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
7067 /* Enable interrupts for CRTCs going through a modeset. */
7068 amdgpu_dm_enable_crtc_interrupts(dev
, state
, true);
7070 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
7071 if (new_crtc_state
->async_flip
)
7072 wait_for_vblank
= false;
7074 /* update planes when needed per crtc*/
7075 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
7076 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7078 if (dm_new_crtc_state
->stream
)
7079 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
7080 dm
, crtc
, wait_for_vblank
);
7083 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7084 amdgpu_dm_enable_crtc_interrupts(dev
, state
, false);
7086 /* Update audio instances for each connector. */
7087 amdgpu_dm_commit_audio(dev
, state
);
7090 * send vblank event on all events not handled in flip and
7091 * mark consumed event for drm_atomic_helper_commit_hw_done
7093 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
7094 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7096 if (new_crtc_state
->event
)
7097 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
7099 new_crtc_state
->event
= NULL
;
7101 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
7103 /* Signal HW programming completion */
7104 drm_atomic_helper_commit_hw_done(state
);
7106 if (wait_for_vblank
)
7107 drm_atomic_helper_wait_for_flip_done(dev
, state
);
7109 drm_atomic_helper_cleanup_planes(dev
, state
);
7112 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7113 * so we can put the GPU into runtime suspend if we're not driving any
7116 for (i
= 0; i
< crtc_disable_count
; i
++)
7117 pm_runtime_put_autosuspend(dev
->dev
);
7118 pm_runtime_mark_last_busy(dev
->dev
);
7121 dc_release_state(dc_state_temp
);
7125 static int dm_force_atomic_commit(struct drm_connector
*connector
)
7128 struct drm_device
*ddev
= connector
->dev
;
7129 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
7130 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7131 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
7132 struct drm_connector_state
*conn_state
;
7133 struct drm_crtc_state
*crtc_state
;
7134 struct drm_plane_state
*plane_state
;
7139 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
7141 /* Construct an atomic state to restore previous display setting */
7144 * Attach connectors to drm_atomic_state
7146 conn_state
= drm_atomic_get_connector_state(state
, connector
);
7148 ret
= PTR_ERR_OR_ZERO(conn_state
);
7152 /* Attach crtc to drm_atomic_state*/
7153 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
7155 ret
= PTR_ERR_OR_ZERO(crtc_state
);
7159 /* force a restore */
7160 crtc_state
->mode_changed
= true;
7162 /* Attach plane to drm_atomic_state */
7163 plane_state
= drm_atomic_get_plane_state(state
, plane
);
7165 ret
= PTR_ERR_OR_ZERO(plane_state
);
7170 /* Call commit internally with the state we just constructed */
7171 ret
= drm_atomic_commit(state
);
7176 DRM_ERROR("Restoring old state failed with %i\n", ret
);
7177 drm_atomic_state_put(state
);
7183 * This function handles all cases when set mode does not come upon hotplug.
7184 * This includes when a display is unplugged then plugged back into the
7185 * same port and when running without usermode desktop manager supprot
7187 void dm_restore_drm_connector_state(struct drm_device
*dev
,
7188 struct drm_connector
*connector
)
7190 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7191 struct amdgpu_crtc
*disconnected_acrtc
;
7192 struct dm_crtc_state
*acrtc_state
;
7194 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
7197 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7198 if (!disconnected_acrtc
)
7201 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
7202 if (!acrtc_state
->stream
)
7206 * If the previous sink is not released and different from the current,
7207 * we deduce we are in a state where we can not rely on usermode call
7208 * to turn on the display, so we do it here
7210 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
7211 dm_force_atomic_commit(&aconnector
->base
);
7215 * Grabs all modesetting locks to serialize against any blocking commits,
7216 * Waits for completion of all non blocking commits.
7218 static int do_aquire_global_lock(struct drm_device
*dev
,
7219 struct drm_atomic_state
*state
)
7221 struct drm_crtc
*crtc
;
7222 struct drm_crtc_commit
*commit
;
7226 * Adding all modeset locks to aquire_ctx will
7227 * ensure that when the framework release it the
7228 * extra locks we are locking here will get released to
7230 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
7234 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7235 spin_lock(&crtc
->commit_lock
);
7236 commit
= list_first_entry_or_null(&crtc
->commit_list
,
7237 struct drm_crtc_commit
, commit_entry
);
7239 drm_crtc_commit_get(commit
);
7240 spin_unlock(&crtc
->commit_lock
);
7246 * Make sure all pending HW programming completed and
7249 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
7252 ret
= wait_for_completion_interruptible_timeout(
7253 &commit
->flip_done
, 10*HZ
);
7256 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7257 "timed out\n", crtc
->base
.id
, crtc
->name
);
7259 drm_crtc_commit_put(commit
);
7262 return ret
< 0 ? ret
: 0;
7265 static void get_freesync_config_for_crtc(
7266 struct dm_crtc_state
*new_crtc_state
,
7267 struct dm_connector_state
*new_con_state
)
7269 struct mod_freesync_config config
= {0};
7270 struct amdgpu_dm_connector
*aconnector
=
7271 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
7272 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
7273 int vrefresh
= drm_mode_vrefresh(mode
);
7275 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
7276 vrefresh
>= aconnector
->min_vfreq
&&
7277 vrefresh
<= aconnector
->max_vfreq
;
7279 if (new_crtc_state
->vrr_supported
) {
7280 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
7281 config
.state
= new_crtc_state
->base
.vrr_enabled
?
7282 VRR_STATE_ACTIVE_VARIABLE
:
7284 config
.min_refresh_in_uhz
=
7285 aconnector
->min_vfreq
* 1000000;
7286 config
.max_refresh_in_uhz
=
7287 aconnector
->max_vfreq
* 1000000;
7288 config
.vsif_supported
= true;
7292 new_crtc_state
->freesync_config
= config
;
7295 static void reset_freesync_config_for_crtc(
7296 struct dm_crtc_state
*new_crtc_state
)
7298 new_crtc_state
->vrr_supported
= false;
7300 memset(&new_crtc_state
->vrr_params
, 0,
7301 sizeof(new_crtc_state
->vrr_params
));
7302 memset(&new_crtc_state
->vrr_infopacket
, 0,
7303 sizeof(new_crtc_state
->vrr_infopacket
));
7306 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
7307 struct drm_atomic_state
*state
,
7308 struct drm_crtc
*crtc
,
7309 struct drm_crtc_state
*old_crtc_state
,
7310 struct drm_crtc_state
*new_crtc_state
,
7312 bool *lock_and_validation_needed
)
7314 struct dm_atomic_state
*dm_state
= NULL
;
7315 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7316 struct dc_stream_state
*new_stream
;
7320 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7321 * update changed items
7323 struct amdgpu_crtc
*acrtc
= NULL
;
7324 struct amdgpu_dm_connector
*aconnector
= NULL
;
7325 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
7326 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
7330 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7331 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7332 acrtc
= to_amdgpu_crtc(crtc
);
7333 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
7335 /* TODO This hack should go away */
7336 if (aconnector
&& enable
) {
7337 /* Make sure fake sink is created in plug-in scenario */
7338 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
7340 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
7343 if (IS_ERR(drm_new_conn_state
)) {
7344 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
7348 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
7349 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
7351 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7354 new_stream
= create_stream_for_sink(aconnector
,
7355 &new_crtc_state
->mode
,
7357 dm_old_crtc_state
->stream
);
7360 * we can have no stream on ACTION_SET if a display
7361 * was disconnected during S3, in this case it is not an
7362 * error, the OS will be updated after detection, and
7363 * will do the right thing on next atomic commit
7367 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7368 __func__
, acrtc
->base
.base
.id
);
7373 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7375 ret
= fill_hdr_info_packet(drm_new_conn_state
,
7376 &new_stream
->hdr_static_metadata
);
7381 * If we already removed the old stream from the context
7382 * (and set the new stream to NULL) then we can't reuse
7383 * the old stream even if the stream and scaling are unchanged.
7384 * We'll hit the BUG_ON and black screen.
7386 * TODO: Refactor this function to allow this check to work
7387 * in all conditions.
7389 if (dm_new_crtc_state
->stream
&&
7390 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
7391 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
7392 new_crtc_state
->mode_changed
= false;
7393 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7394 new_crtc_state
->mode_changed
);
7398 /* mode_changed flag may get updated above, need to check again */
7399 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7403 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7404 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7405 "connectors_changed:%d\n",
7407 new_crtc_state
->enable
,
7408 new_crtc_state
->active
,
7409 new_crtc_state
->planes_changed
,
7410 new_crtc_state
->mode_changed
,
7411 new_crtc_state
->active_changed
,
7412 new_crtc_state
->connectors_changed
);
7414 /* Remove stream for any changed/disabled CRTC */
7417 if (!dm_old_crtc_state
->stream
)
7420 ret
= dm_atomic_get_state(state
, &dm_state
);
7424 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7427 /* i.e. reset mode */
7428 if (dc_remove_stream_from_ctx(
7431 dm_old_crtc_state
->stream
) != DC_OK
) {
7436 dc_stream_release(dm_old_crtc_state
->stream
);
7437 dm_new_crtc_state
->stream
= NULL
;
7439 reset_freesync_config_for_crtc(dm_new_crtc_state
);
7441 *lock_and_validation_needed
= true;
7443 } else {/* Add stream for any updated/enabled CRTC */
7445 * Quick fix to prevent NULL pointer on new_stream when
7446 * added MST connectors not found in existing crtc_state in the chained mode
7447 * TODO: need to dig out the root cause of that
7449 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
7452 if (modereset_required(new_crtc_state
))
7455 if (modeset_required(new_crtc_state
, new_stream
,
7456 dm_old_crtc_state
->stream
)) {
7458 WARN_ON(dm_new_crtc_state
->stream
);
7460 ret
= dm_atomic_get_state(state
, &dm_state
);
7464 dm_new_crtc_state
->stream
= new_stream
;
7466 dc_stream_retain(new_stream
);
7468 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7471 if (dc_add_stream_to_ctx(
7474 dm_new_crtc_state
->stream
) != DC_OK
) {
7479 *lock_and_validation_needed
= true;
7484 /* Release extra reference */
7486 dc_stream_release(new_stream
);
7489 * We want to do dc stream updates that do not require a
7490 * full modeset below.
7492 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
7493 new_crtc_state
->active
))
7496 * Given above conditions, the dc state cannot be NULL because:
7497 * 1. We're in the process of enabling CRTCs (just been added
7498 * to the dc context, or already is on the context)
7499 * 2. Has a valid connector attached, and
7500 * 3. Is currently active and enabled.
7501 * => The dc stream state currently exists.
7503 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
7505 /* Scaling or underscan settings */
7506 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
7507 update_stream_scaling_settings(
7508 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
7511 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7514 * Color management settings. We also update color properties
7515 * when a modeset is needed, to ensure it gets reprogrammed.
7517 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
7518 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
7519 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
7524 /* Update Freesync settings. */
7525 get_freesync_config_for_crtc(dm_new_crtc_state
,
7532 dc_stream_release(new_stream
);
7536 static bool should_reset_plane(struct drm_atomic_state
*state
,
7537 struct drm_plane
*plane
,
7538 struct drm_plane_state
*old_plane_state
,
7539 struct drm_plane_state
*new_plane_state
)
7541 struct drm_plane
*other
;
7542 struct drm_plane_state
*old_other_state
, *new_other_state
;
7543 struct drm_crtc_state
*new_crtc_state
;
7547 * TODO: Remove this hack once the checks below are sufficient
7548 * enough to determine when we need to reset all the planes on
7551 if (state
->allow_modeset
)
7554 /* Exit early if we know that we're adding or removing the plane. */
7555 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
7558 /* old crtc == new_crtc == NULL, plane not in context. */
7559 if (!new_plane_state
->crtc
)
7563 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
7565 if (!new_crtc_state
)
7568 /* CRTC Degamma changes currently require us to recreate planes. */
7569 if (new_crtc_state
->color_mgmt_changed
)
7572 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
7576 * If there are any new primary or overlay planes being added or
7577 * removed then the z-order can potentially change. To ensure
7578 * correct z-order and pipe acquisition the current DC architecture
7579 * requires us to remove and recreate all existing planes.
7581 * TODO: Come up with a more elegant solution for this.
7583 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
7584 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
7587 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
7588 new_other_state
->crtc
!= new_plane_state
->crtc
)
7591 if (old_other_state
->crtc
!= new_other_state
->crtc
)
7594 /* TODO: Remove this once we can handle fast format changes. */
7595 if (old_other_state
->fb
&& new_other_state
->fb
&&
7596 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
7603 static int dm_update_plane_state(struct dc
*dc
,
7604 struct drm_atomic_state
*state
,
7605 struct drm_plane
*plane
,
7606 struct drm_plane_state
*old_plane_state
,
7607 struct drm_plane_state
*new_plane_state
,
7609 bool *lock_and_validation_needed
)
7612 struct dm_atomic_state
*dm_state
= NULL
;
7613 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
7614 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7615 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
7616 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
7621 new_plane_crtc
= new_plane_state
->crtc
;
7622 old_plane_crtc
= old_plane_state
->crtc
;
7623 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
7624 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
7626 /*TODO Implement atomic check for cursor plane */
7627 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7630 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
7633 /* Remove any changed/removed planes */
7638 if (!old_plane_crtc
)
7641 old_crtc_state
= drm_atomic_get_old_crtc_state(
7642 state
, old_plane_crtc
);
7643 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7645 if (!dm_old_crtc_state
->stream
)
7648 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7649 plane
->base
.id
, old_plane_crtc
->base
.id
);
7651 ret
= dm_atomic_get_state(state
, &dm_state
);
7655 if (!dc_remove_plane_from_context(
7657 dm_old_crtc_state
->stream
,
7658 dm_old_plane_state
->dc_state
,
7659 dm_state
->context
)) {
7666 dc_plane_state_release(dm_old_plane_state
->dc_state
);
7667 dm_new_plane_state
->dc_state
= NULL
;
7669 *lock_and_validation_needed
= true;
7671 } else { /* Add new planes */
7672 struct dc_plane_state
*dc_new_plane_state
;
7674 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
7677 if (!new_plane_crtc
)
7680 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
7681 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7683 if (!dm_new_crtc_state
->stream
)
7689 WARN_ON(dm_new_plane_state
->dc_state
);
7691 dc_new_plane_state
= dc_create_plane_state(dc
);
7692 if (!dc_new_plane_state
)
7695 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7696 plane
->base
.id
, new_plane_crtc
->base
.id
);
7698 ret
= fill_dc_plane_attributes(
7699 new_plane_crtc
->dev
->dev_private
,
7704 dc_plane_state_release(dc_new_plane_state
);
7708 ret
= dm_atomic_get_state(state
, &dm_state
);
7710 dc_plane_state_release(dc_new_plane_state
);
7715 * Any atomic check errors that occur after this will
7716 * not need a release. The plane state will be attached
7717 * to the stream, and therefore part of the atomic
7718 * state. It'll be released when the atomic state is
7721 if (!dc_add_plane_to_context(
7723 dm_new_crtc_state
->stream
,
7725 dm_state
->context
)) {
7727 dc_plane_state_release(dc_new_plane_state
);
7731 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
7733 /* Tell DC to do a full surface update every time there
7734 * is a plane change. Inefficient, but works for now.
7736 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
7738 *lock_and_validation_needed
= true;
7746 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
7747 struct drm_atomic_state
*state
,
7748 enum surface_update_type
*out_type
)
7750 struct dc
*dc
= dm
->dc
;
7751 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
7752 int i
, j
, num_plane
, ret
= 0;
7753 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
7754 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
7755 struct drm_crtc
*new_plane_crtc
;
7756 struct drm_plane
*plane
;
7758 struct drm_crtc
*crtc
;
7759 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
7760 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
7761 struct dc_stream_status
*status
= NULL
;
7763 struct dc_surface_update
*updates
;
7764 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
7766 updates
= kcalloc(MAX_SURFACES
, sizeof(*updates
), GFP_KERNEL
);
7769 DRM_ERROR("Failed to allocate plane updates\n");
7770 /* Set type to FULL to avoid crashing in DC*/
7771 update_type
= UPDATE_TYPE_FULL
;
7775 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7776 struct dc_scaling_info scaling_info
;
7777 struct dc_stream_update stream_update
;
7779 memset(&stream_update
, 0, sizeof(stream_update
));
7781 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7782 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7785 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
7786 update_type
= UPDATE_TYPE_FULL
;
7790 if (!new_dm_crtc_state
->stream
)
7793 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
7794 const struct amdgpu_framebuffer
*amdgpu_fb
=
7795 to_amdgpu_framebuffer(new_plane_state
->fb
);
7796 struct dc_plane_info plane_info
;
7797 struct dc_flip_addrs flip_addr
;
7798 uint64_t tiling_flags
;
7800 new_plane_crtc
= new_plane_state
->crtc
;
7801 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
7802 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
7804 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7807 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
7808 update_type
= UPDATE_TYPE_FULL
;
7812 if (crtc
!= new_plane_crtc
)
7815 updates
[num_plane
].surface
= new_dm_plane_state
->dc_state
;
7817 if (new_crtc_state
->mode_changed
) {
7818 stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
7819 stream_update
.src
= new_dm_crtc_state
->stream
->src
;
7822 if (new_crtc_state
->color_mgmt_changed
) {
7823 updates
[num_plane
].gamma
=
7824 new_dm_plane_state
->dc_state
->gamma_correction
;
7825 updates
[num_plane
].in_transfer_func
=
7826 new_dm_plane_state
->dc_state
->in_transfer_func
;
7827 stream_update
.gamut_remap
=
7828 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
7829 stream_update
.output_csc_transform
=
7830 &new_dm_crtc_state
->stream
->csc_color_matrix
;
7831 stream_update
.out_transfer_func
=
7832 new_dm_crtc_state
->stream
->out_transfer_func
;
7835 ret
= fill_dc_scaling_info(new_plane_state
,
7840 updates
[num_plane
].scaling_info
= &scaling_info
;
7843 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
7847 memset(&flip_addr
, 0, sizeof(flip_addr
));
7849 ret
= fill_dc_plane_info_and_addr(
7850 dm
->adev
, new_plane_state
, tiling_flags
,
7852 &flip_addr
.address
);
7856 updates
[num_plane
].plane_info
= &plane_info
;
7857 updates
[num_plane
].flip_addr
= &flip_addr
;
7866 ret
= dm_atomic_get_state(state
, &dm_state
);
7870 old_dm_state
= dm_atomic_get_old_state(state
);
7871 if (!old_dm_state
) {
7876 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
7877 new_dm_crtc_state
->stream
);
7878 stream_update
.stream
= new_dm_crtc_state
->stream
;
7880 * TODO: DC modifies the surface during this call so we need
7881 * to lock here - find a way to do this without locking.
7883 mutex_lock(&dm
->dc_lock
);
7884 update_type
= dc_check_update_surfaces_for_stream(dc
, updates
, num_plane
,
7885 &stream_update
, status
);
7886 mutex_unlock(&dm
->dc_lock
);
7888 if (update_type
> UPDATE_TYPE_MED
) {
7889 update_type
= UPDATE_TYPE_FULL
;
7897 *out_type
= update_type
;
7901 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
7903 struct drm_connector
*connector
;
7904 struct drm_connector_state
*conn_state
;
7905 struct amdgpu_dm_connector
*aconnector
= NULL
;
7907 for_each_new_connector_in_state(state
, connector
, conn_state
, i
) {
7908 if (conn_state
->crtc
!= crtc
)
7911 aconnector
= to_amdgpu_dm_connector(connector
);
7912 if (!aconnector
->port
|| !aconnector
->mst_port
)
7921 return drm_dp_mst_add_affected_dsc_crtcs(state
, &aconnector
->mst_port
->mst_mgr
);
7925 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
7926 * @dev: The DRM device
7927 * @state: The atomic state to commit
7929 * Validate that the given atomic state is programmable by DC into hardware.
7930 * This involves constructing a &struct dc_state reflecting the new hardware
7931 * state we wish to commit, then querying DC to see if it is programmable. It's
7932 * important not to modify the existing DC state. Otherwise, atomic_check
7933 * may unexpectedly commit hardware changes.
7935 * When validating the DC state, it's important that the right locks are
7936 * acquired. For full updates case which removes/adds/updates streams on one
7937 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
7938 * that any such full update commit will wait for completion of any outstanding
7939 * flip using DRMs synchronization events. See
7940 * dm_determine_update_type_for_commit()
7942 * Note that DM adds the affected connectors for all CRTCs in state, when that
7943 * might not seem necessary. This is because DC stream creation requires the
7944 * DC sink, which is tied to the DRM connector state. Cleaning this up should
7945 * be possible but non-trivial - a possible TODO item.
7947 * Return: -Error code if validation failed.
7949 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
7950 struct drm_atomic_state
*state
)
7952 struct amdgpu_device
*adev
= dev
->dev_private
;
7953 struct dm_atomic_state
*dm_state
= NULL
;
7954 struct dc
*dc
= adev
->dm
.dc
;
7955 struct drm_connector
*connector
;
7956 struct drm_connector_state
*old_con_state
, *new_con_state
;
7957 struct drm_crtc
*crtc
;
7958 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7959 struct drm_plane
*plane
;
7960 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
7961 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
7962 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
7967 * This bool will be set for true for any modeset/reset
7968 * or plane update which implies non fast surface update.
7970 bool lock_and_validation_needed
= false;
7972 ret
= drm_atomic_helper_check_modeset(dev
, state
);
7976 if (adev
->asic_type
>= CHIP_NAVI10
) {
7977 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7978 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
7979 ret
= add_affected_mst_dsc_crtcs(state
, crtc
);
7986 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7987 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
7988 !new_crtc_state
->color_mgmt_changed
&&
7989 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
7992 if (!new_crtc_state
->enable
)
7995 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
7999 ret
= drm_atomic_add_affected_planes(state
, crtc
);
8005 * Add all primary and overlay planes on the CRTC to the state
8006 * whenever a plane is enabled to maintain correct z-ordering
8007 * and to enable fast surface updates.
8009 drm_for_each_crtc(crtc
, dev
) {
8010 bool modified
= false;
8012 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8013 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8016 if (new_plane_state
->crtc
== crtc
||
8017 old_plane_state
->crtc
== crtc
) {
8026 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
8027 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8031 drm_atomic_get_plane_state(state
, plane
);
8033 if (IS_ERR(new_plane_state
)) {
8034 ret
= PTR_ERR(new_plane_state
);
8040 /* Remove exiting planes if they are modified */
8041 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8042 ret
= dm_update_plane_state(dc
, state
, plane
,
8046 &lock_and_validation_needed
);
8051 /* Disable all crtcs which require disable */
8052 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8053 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8057 &lock_and_validation_needed
);
8062 /* Enable all crtcs which require enable */
8063 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8064 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8068 &lock_and_validation_needed
);
8073 /* Add new/modified planes */
8074 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8075 ret
= dm_update_plane_state(dc
, state
, plane
,
8079 &lock_and_validation_needed
);
8084 /* Run this here since we want to validate the streams we created */
8085 ret
= drm_atomic_helper_check_planes(dev
, state
);
8089 if (state
->legacy_cursor_update
) {
8091 * This is a fast cursor update coming from the plane update
8092 * helper, check if it can be done asynchronously for better
8095 state
->async_update
=
8096 !drm_atomic_helper_async_check(dev
, state
);
8099 * Skip the remaining global validation if this is an async
8100 * update. Cursor updates can be done without affecting
8101 * state or bandwidth calcs and this avoids the performance
8102 * penalty of locking the private state object and
8103 * allocating a new dc_state.
8105 if (state
->async_update
)
8109 /* Check scaling and underscan changes*/
8110 /* TODO Removed scaling changes validation due to inability to commit
8111 * new stream into context w\o causing full reset. Need to
8112 * decide how to handle.
8114 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8115 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8116 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8117 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
8119 /* Skip any modesets/resets */
8120 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
8121 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
8124 /* Skip any thing not scale or underscan changes */
8125 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
8128 overall_update_type
= UPDATE_TYPE_FULL
;
8129 lock_and_validation_needed
= true;
8132 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
8136 if (overall_update_type
< update_type
)
8137 overall_update_type
= update_type
;
8140 * lock_and_validation_needed was an old way to determine if we need to set
8141 * the global lock. Leaving it in to check if we broke any corner cases
8142 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8143 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8145 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
8146 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8148 if (overall_update_type
> UPDATE_TYPE_FAST
) {
8149 ret
= dm_atomic_get_state(state
, &dm_state
);
8153 ret
= do_aquire_global_lock(dev
, state
);
8157 #if defined(CONFIG_DRM_AMD_DC_DCN)
8158 if (!compute_mst_dsc_configs_for_state(state
, dm_state
->context
))
8161 ret
= dm_update_mst_vcpi_slots_for_dsc(state
, dm_state
->context
);
8166 if (dc_validate_global_state(dc
, dm_state
->context
, false) != DC_OK
) {
8172 * The commit is a fast update. Fast updates shouldn't change
8173 * the DC context, affect global validation, and can have their
8174 * commit work done in parallel with other commits not touching
8175 * the same resource. If we have a new DC context as part of
8176 * the DM atomic state from validation we need to free it and
8177 * retain the existing one instead.
8179 struct dm_atomic_state
*new_dm_state
, *old_dm_state
;
8181 new_dm_state
= dm_atomic_get_new_state(state
);
8182 old_dm_state
= dm_atomic_get_old_state(state
);
8184 if (new_dm_state
&& old_dm_state
) {
8185 if (new_dm_state
->context
)
8186 dc_release_state(new_dm_state
->context
);
8188 new_dm_state
->context
= old_dm_state
->context
;
8190 if (old_dm_state
->context
)
8191 dc_retain_state(old_dm_state
->context
);
8194 /* Perform validation of MST topology in the state*/
8195 ret
= drm_dp_mst_atomic_check(state
);
8199 /* Store the overall update type for use later in atomic check. */
8200 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
8201 struct dm_crtc_state
*dm_new_crtc_state
=
8202 to_dm_crtc_state(new_crtc_state
);
8204 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
8207 /* Must be success */
8212 if (ret
== -EDEADLK
)
8213 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8214 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
8215 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8217 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
8222 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
8223 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
8226 bool capable
= false;
8228 if (amdgpu_dm_connector
->dc_link
&&
8229 dm_helpers_dp_read_dpcd(
8231 amdgpu_dm_connector
->dc_link
,
8232 DP_DOWN_STREAM_PORT_COUNT
,
8234 sizeof(dpcd_data
))) {
8235 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
8240 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
8244 bool edid_check_required
;
8245 struct detailed_timing
*timing
;
8246 struct detailed_non_pixel
*data
;
8247 struct detailed_data_monitor_range
*range
;
8248 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
8249 to_amdgpu_dm_connector(connector
);
8250 struct dm_connector_state
*dm_con_state
= NULL
;
8252 struct drm_device
*dev
= connector
->dev
;
8253 struct amdgpu_device
*adev
= dev
->dev_private
;
8254 bool freesync_capable
= false;
8256 if (!connector
->state
) {
8257 DRM_ERROR("%s - Connector has no state", __func__
);
8262 dm_con_state
= to_dm_connector_state(connector
->state
);
8264 amdgpu_dm_connector
->min_vfreq
= 0;
8265 amdgpu_dm_connector
->max_vfreq
= 0;
8266 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
8271 dm_con_state
= to_dm_connector_state(connector
->state
);
8273 edid_check_required
= false;
8274 if (!amdgpu_dm_connector
->dc_sink
) {
8275 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8278 if (!adev
->dm
.freesync_module
)
8281 * if edid non zero restrict freesync only for dp and edp
8284 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
8285 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
8286 edid_check_required
= is_dp_capable_without_timing_msa(
8288 amdgpu_dm_connector
);
8291 if (edid_check_required
== true && (edid
->version
> 1 ||
8292 (edid
->version
== 1 && edid
->revision
> 1))) {
8293 for (i
= 0; i
< 4; i
++) {
8295 timing
= &edid
->detailed_timings
[i
];
8296 data
= &timing
->data
.other_data
;
8297 range
= &data
->data
.range
;
8299 * Check if monitor has continuous frequency mode
8301 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
8304 * Check for flag range limits only. If flag == 1 then
8305 * no additional timing information provided.
8306 * Default GTF, GTF Secondary curve and CVT are not
8309 if (range
->flags
!= 1)
8312 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
8313 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
8314 amdgpu_dm_connector
->pixel_clock_mhz
=
8315 range
->pixel_clock_mhz
* 10;
8319 if (amdgpu_dm_connector
->max_vfreq
-
8320 amdgpu_dm_connector
->min_vfreq
> 10) {
8322 freesync_capable
= true;
8328 dm_con_state
->freesync_capable
= freesync_capable
;
8330 if (connector
->vrr_capable_property
)
8331 drm_connector_set_vrr_capable_property(connector
,
8335 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
)
8337 uint8_t dpcd_data
[EDP_PSR_RECEIVER_CAP_SIZE
];
8339 if (!(link
->connector_signal
& SIGNAL_TYPE_EDP
))
8341 if (link
->type
== dc_connection_none
)
8343 if (dm_helpers_dp_read_dpcd(NULL
, link
, DP_PSR_SUPPORT
,
8344 dpcd_data
, sizeof(dpcd_data
))) {
8345 link
->psr_feature_enabled
= dpcd_data
[0] ? true:false;
8346 DRM_INFO("PSR support:%d\n", link
->psr_feature_enabled
);
8351 * amdgpu_dm_link_setup_psr() - configure psr link
8352 * @stream: stream state
8354 * Return: true if success
8356 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
)
8358 struct dc_link
*link
= NULL
;
8359 struct psr_config psr_config
= {0};
8360 struct psr_context psr_context
= {0};
8361 struct dc
*dc
= NULL
;
8367 link
= stream
->link
;
8370 psr_config
.psr_version
= dc
->res_pool
->dmcu
->dmcu_version
.psr_version
;
8372 if (psr_config
.psr_version
> 0) {
8373 psr_config
.psr_exit_link_training_required
= 0x1;
8374 psr_config
.psr_frame_capture_indication_req
= 0;
8375 psr_config
.psr_rfb_setup_time
= 0x37;
8376 psr_config
.psr_sdp_transmit_line_num_deadline
= 0x20;
8377 psr_config
.allow_smu_optimizations
= 0x0;
8379 ret
= dc_link_setup_psr(link
, stream
, &psr_config
, &psr_context
);
8382 DRM_DEBUG_DRIVER("PSR link: %d\n", link
->psr_feature_enabled
);
8388 * amdgpu_dm_psr_enable() - enable psr f/w
8389 * @stream: stream state
8391 * Return: true if success
8393 bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
)
8395 struct dc_link
*link
= stream
->link
;
8396 unsigned int vsync_rate_hz
= 0;
8397 struct dc_static_screen_params params
= {0};
8398 /* Calculate number of static frames before generating interrupt to
8401 unsigned int frame_time_microsec
= 1000000 / vsync_rate_hz
;
8402 // Init fail safe of 2 frames static
8403 unsigned int num_frames_static
= 2;
8405 DRM_DEBUG_DRIVER("Enabling psr...\n");
8407 vsync_rate_hz
= div64_u64(div64_u64((
8408 stream
->timing
.pix_clk_100hz
* 100),
8409 stream
->timing
.v_total
),
8410 stream
->timing
.h_total
);
8413 * Calculate number of frames such that at least 30 ms of time has
8416 if (vsync_rate_hz
!= 0)
8417 num_frames_static
= (30000 / frame_time_microsec
) + 1;
8419 params
.triggers
.cursor_update
= true;
8420 params
.triggers
.overlay_update
= true;
8421 params
.triggers
.surface_update
= true;
8422 params
.num_frames
= num_frames_static
;
8424 dc_stream_set_static_screen_params(link
->ctx
->dc
,
8428 return dc_link_set_psr_allow_active(link
, true, false);
8432 * amdgpu_dm_psr_disable() - disable psr f/w
8433 * @stream: stream state
8435 * Return: true if success
8437 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
)
8440 DRM_DEBUG_DRIVER("Disabling psr...\n");
8442 return dc_link_set_psr_allow_active(stream
->link
, false, true);