1 // SPDX-License-Identifier: GPL-2.0
3 * Cadence MHDP8546 DP bridge driver.
5 * Copyright (C) 2020 Cadence Design Systems, Inc.
7 * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8 * Swapnil Jakhade <sjakhade@cadence.com>
9 * Yuti Amonkar <yamonkar@cadence.com>
10 * Tomi Valkeinen <tomi.valkeinen@ti.com>
11 * Jyri Sarha <jsarha@ti.com>
14 * - Implement optimized mailbox communication using mailbox interrupts
15 * - Add support for power management
16 * - Add support for features like audio, MST and fast link training
17 * - Implement request_fw_cancel to handle HW_STATE
18 * - Fix asynchronous loading of firmware implementation
19 * - Add DRM helper function for cdns_mhdp_lower_link_rate
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/phy-dp.h>
34 #include <linux/platform_device.h>
35 #include <linux/slab.h>
36 #include <linux/wait.h>
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/display/drm_hdcp_helper.h>
40 #include <drm/drm_atomic.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_atomic_state_helper.h>
43 #include <drm/drm_bridge.h>
44 #include <drm/drm_connector.h>
45 #include <drm/drm_edid.h>
46 #include <drm/drm_modeset_helper_vtables.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
50 #include <linux/unaligned.h>
52 #include "cdns-mhdp8546-core.h"
53 #include "cdns-mhdp8546-hdcp.h"
54 #include "cdns-mhdp8546-j721e.h"
56 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge
*bridge
)
58 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
60 /* Enable SW event interrupts */
61 if (mhdp
->bridge_attached
)
62 writel(readl(mhdp
->regs
+ CDNS_APB_INT_MASK
) &
63 ~CDNS_APB_INT_MASK_SW_EVENT_INT
,
64 mhdp
->regs
+ CDNS_APB_INT_MASK
);
67 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge
*bridge
)
69 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
71 writel(readl(mhdp
->regs
+ CDNS_APB_INT_MASK
) |
72 CDNS_APB_INT_MASK_SW_EVENT_INT
,
73 mhdp
->regs
+ CDNS_APB_INT_MASK
);
76 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device
*mhdp
)
80 WARN_ON(!mutex_is_locked(&mhdp
->mbox_mutex
));
82 ret
= readx_poll_timeout(readl
, mhdp
->regs
+ CDNS_MAILBOX_EMPTY
,
83 empty
, !empty
, MAILBOX_RETRY_US
,
88 return readl(mhdp
->regs
+ CDNS_MAILBOX_RX_DATA
) & 0xff;
91 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device
*mhdp
, u8 val
)
95 WARN_ON(!mutex_is_locked(&mhdp
->mbox_mutex
));
97 ret
= readx_poll_timeout(readl
, mhdp
->regs
+ CDNS_MAILBOX_FULL
,
98 full
, !full
, MAILBOX_RETRY_US
,
103 writel(val
, mhdp
->regs
+ CDNS_MAILBOX_TX_DATA
);
108 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device
*mhdp
,
109 u8 module_id
, u8 opcode
,
116 /* read the header of the message */
117 for (i
= 0; i
< sizeof(header
); i
++) {
118 ret
= cdns_mhdp_mailbox_read(mhdp
);
125 mbox_size
= get_unaligned_be16(header
+ 2);
127 if (opcode
!= header
[0] || module_id
!= header
[1] ||
128 req_size
!= mbox_size
) {
130 * If the message in mailbox is not what we want, we need to
131 * clear the mailbox by reading its contents.
133 for (i
= 0; i
< mbox_size
; i
++)
134 if (cdns_mhdp_mailbox_read(mhdp
) < 0)
143 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device
*mhdp
,
144 u8
*buff
, u16 buff_size
)
149 for (i
= 0; i
< buff_size
; i
++) {
150 ret
= cdns_mhdp_mailbox_read(mhdp
);
160 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device
*mhdp
, u8 module_id
,
161 u8 opcode
, u16 size
, u8
*message
)
167 header
[1] = module_id
;
168 put_unaligned_be16(size
, header
+ 2);
170 for (i
= 0; i
< sizeof(header
); i
++) {
171 ret
= cdns_mhdp_mailbox_write(mhdp
, header
[i
]);
176 for (i
= 0; i
< size
; i
++) {
177 ret
= cdns_mhdp_mailbox_write(mhdp
, message
[i
]);
186 int cdns_mhdp_reg_read(struct cdns_mhdp_device
*mhdp
, u32 addr
, u32
*value
)
191 put_unaligned_be32(addr
, msg
);
193 mutex_lock(&mhdp
->mbox_mutex
);
195 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_GENERAL
,
196 GENERAL_REGISTER_READ
,
201 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_GENERAL
,
202 GENERAL_REGISTER_READ
,
207 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, resp
, sizeof(resp
));
211 /* Returned address value should be the same as requested */
212 if (memcmp(msg
, resp
, sizeof(msg
))) {
217 *value
= get_unaligned_be32(resp
+ 4);
220 mutex_unlock(&mhdp
->mbox_mutex
);
222 dev_err(mhdp
->dev
, "Failed to read register\n");
230 int cdns_mhdp_reg_write(struct cdns_mhdp_device
*mhdp
, u16 addr
, u32 val
)
235 put_unaligned_be16(addr
, msg
);
236 put_unaligned_be32(val
, msg
+ 2);
238 mutex_lock(&mhdp
->mbox_mutex
);
240 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
241 DPTX_WRITE_REGISTER
, sizeof(msg
), msg
);
243 mutex_unlock(&mhdp
->mbox_mutex
);
249 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device
*mhdp
, u16 addr
,
250 u8 start_bit
, u8 bits_no
, u32 val
)
255 put_unaligned_be16(addr
, field
);
256 field
[2] = start_bit
;
258 put_unaligned_be32(val
, field
+ 4);
260 mutex_lock(&mhdp
->mbox_mutex
);
262 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
263 DPTX_WRITE_FIELD
, sizeof(field
), field
);
265 mutex_unlock(&mhdp
->mbox_mutex
);
271 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device
*mhdp
,
272 u32 addr
, u8
*data
, u16 len
)
277 put_unaligned_be16(len
, msg
);
278 put_unaligned_be24(addr
, msg
+ 2);
280 mutex_lock(&mhdp
->mbox_mutex
);
282 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
283 DPTX_READ_DPCD
, sizeof(msg
), msg
);
287 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
293 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, reg
, sizeof(reg
));
297 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, data
, len
);
300 mutex_unlock(&mhdp
->mbox_mutex
);
306 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device
*mhdp
, u32 addr
, u8 value
)
311 put_unaligned_be16(1, msg
);
312 put_unaligned_be24(addr
, msg
+ 2);
315 mutex_lock(&mhdp
->mbox_mutex
);
317 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
318 DPTX_WRITE_DPCD
, sizeof(msg
), msg
);
322 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
323 DPTX_WRITE_DPCD
, sizeof(reg
));
327 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, reg
, sizeof(reg
));
331 if (addr
!= get_unaligned_be24(reg
+ 2))
335 mutex_unlock(&mhdp
->mbox_mutex
);
338 dev_err(mhdp
->dev
, "dpcd write failed: %d\n", ret
);
343 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device
*mhdp
, bool enable
)
348 msg
[0] = GENERAL_MAIN_CONTROL
;
349 msg
[1] = MB_MODULE_ID_GENERAL
;
352 msg
[4] = enable
? FW_ACTIVE
: FW_STANDBY
;
354 mutex_lock(&mhdp
->mbox_mutex
);
356 for (i
= 0; i
< sizeof(msg
); i
++) {
357 ret
= cdns_mhdp_mailbox_write(mhdp
, msg
[i
]);
362 /* read the firmware state */
363 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, msg
, sizeof(msg
));
370 mutex_unlock(&mhdp
->mbox_mutex
);
373 dev_err(mhdp
->dev
, "set firmware active failed\n");
378 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device
*mhdp
)
383 mutex_lock(&mhdp
->mbox_mutex
);
385 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
386 DPTX_HPD_STATE
, 0, NULL
);
390 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
396 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, &status
, sizeof(status
));
400 mutex_unlock(&mhdp
->mbox_mutex
);
402 dev_dbg(mhdp
->dev
, "%s: HPD %splugged\n", __func__
,
408 mutex_unlock(&mhdp
->mbox_mutex
);
414 int cdns_mhdp_get_edid_block(void *data
, u8
*edid
,
415 unsigned int block
, size_t length
)
417 struct cdns_mhdp_device
*mhdp
= data
;
418 u8 msg
[2], reg
[2], i
;
421 mutex_lock(&mhdp
->mbox_mutex
);
423 for (i
= 0; i
< 4; i
++) {
427 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
428 DPTX_GET_EDID
, sizeof(msg
), msg
);
432 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
434 sizeof(reg
) + length
);
438 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, reg
, sizeof(reg
));
442 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, edid
, length
);
446 if (reg
[0] == length
&& reg
[1] == block
/ 2)
450 mutex_unlock(&mhdp
->mbox_mutex
);
453 dev_err(mhdp
->dev
, "get block[%d] edid failed: %d\n",
460 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device
*mhdp
)
465 mutex_lock(&mhdp
->mbox_mutex
);
467 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
468 DPTX_READ_EVENT
, 0, NULL
);
472 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
473 DPTX_READ_EVENT
, sizeof(event
));
477 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, &event
, sizeof(event
));
479 mutex_unlock(&mhdp
->mbox_mutex
);
484 dev_dbg(mhdp
->dev
, "%s: %s%s%s%s\n", __func__
,
485 (event
& DPTX_READ_EVENT_HPD_TO_HIGH
) ? "TO_HIGH " : "",
486 (event
& DPTX_READ_EVENT_HPD_TO_LOW
) ? "TO_LOW " : "",
487 (event
& DPTX_READ_EVENT_HPD_PULSE
) ? "PULSE " : "",
488 (event
& DPTX_READ_EVENT_HPD_STATE
) ? "HPD_STATE " : "");
494 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device
*mhdp
, unsigned int nlanes
,
495 unsigned int udelay
, const u8
*lanes_data
,
496 u8 link_status
[DP_LINK_STATUS_SIZE
])
499 u8 hdr
[5]; /* For DPCD read response header */
503 if (nlanes
!= 4 && nlanes
!= 2 && nlanes
!= 1) {
504 dev_err(mhdp
->dev
, "invalid number of lanes: %u\n", nlanes
);
510 put_unaligned_be16(udelay
, payload
+ 1);
511 memcpy(payload
+ 3, lanes_data
, nlanes
);
513 mutex_lock(&mhdp
->mbox_mutex
);
515 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
517 sizeof(payload
), payload
);
521 /* Yes, read the DPCD read command response */
522 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
524 sizeof(hdr
) + DP_LINK_STATUS_SIZE
);
528 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, hdr
, sizeof(hdr
));
532 addr
= get_unaligned_be24(hdr
+ 2);
533 if (addr
!= DP_LANE0_1_STATUS
)
536 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, link_status
,
537 DP_LINK_STATUS_SIZE
);
540 mutex_unlock(&mhdp
->mbox_mutex
);
543 dev_err(mhdp
->dev
, "Failed to adjust Link Training.\n");
549 * cdns_mhdp_link_power_up() - power up a DisplayPort link
550 * @aux: DisplayPort AUX channel
551 * @link: pointer to a structure containing the link configuration
553 * Returns 0 on success or a negative error code on failure.
556 int cdns_mhdp_link_power_up(struct drm_dp_aux
*aux
, struct cdns_mhdp_link
*link
)
561 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
562 if (link
->revision
< 0x11)
565 err
= drm_dp_dpcd_readb(aux
, DP_SET_POWER
, &value
);
569 value
&= ~DP_SET_POWER_MASK
;
570 value
|= DP_SET_POWER_D0
;
572 err
= drm_dp_dpcd_writeb(aux
, DP_SET_POWER
, value
);
577 * According to the DP 1.1 specification, a "Sink Device must exit the
578 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
579 * Control Field" (register 0x600).
581 usleep_range(1000, 2000);
587 * cdns_mhdp_link_power_down() - power down a DisplayPort link
588 * @aux: DisplayPort AUX channel
589 * @link: pointer to a structure containing the link configuration
591 * Returns 0 on success or a negative error code on failure.
594 int cdns_mhdp_link_power_down(struct drm_dp_aux
*aux
,
595 struct cdns_mhdp_link
*link
)
600 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
601 if (link
->revision
< 0x11)
604 err
= drm_dp_dpcd_readb(aux
, DP_SET_POWER
, &value
);
608 value
&= ~DP_SET_POWER_MASK
;
609 value
|= DP_SET_POWER_D3
;
611 err
= drm_dp_dpcd_writeb(aux
, DP_SET_POWER
, value
);
619 * cdns_mhdp_link_configure() - configure a DisplayPort link
620 * @aux: DisplayPort AUX channel
621 * @link: pointer to a structure containing the link configuration
623 * Returns 0 on success or a negative error code on failure.
626 int cdns_mhdp_link_configure(struct drm_dp_aux
*aux
,
627 struct cdns_mhdp_link
*link
)
632 values
[0] = drm_dp_link_rate_to_bw_code(link
->rate
);
633 values
[1] = link
->num_lanes
;
635 if (link
->capabilities
& DP_LINK_CAP_ENHANCED_FRAMING
)
636 values
[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN
;
638 err
= drm_dp_dpcd_write(aux
, DP_LINK_BW_SET
, values
, sizeof(values
));
645 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device
*mhdp
)
647 return min(mhdp
->host
.link_rate
, mhdp
->sink
.link_rate
);
650 static u8
cdns_mhdp_max_num_lanes(struct cdns_mhdp_device
*mhdp
)
652 return min(mhdp
->sink
.lanes_cnt
, mhdp
->host
.lanes_cnt
);
655 static u8
cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device
*mhdp
)
657 return fls(mhdp
->host
.pattern_supp
& mhdp
->sink
.pattern_supp
);
660 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device
*mhdp
)
662 /* Check if SSC is supported by both sides */
663 return mhdp
->host
.ssc
&& mhdp
->sink
.ssc
;
666 static enum drm_connector_status
cdns_mhdp_detect(struct cdns_mhdp_device
*mhdp
)
668 dev_dbg(mhdp
->dev
, "%s: %d\n", __func__
, mhdp
->plugged
);
671 return connector_status_connected
;
673 return connector_status_disconnected
;
676 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device
*mhdp
)
678 u32 major_num
, minor_num
, revision
;
681 fw_ver
= (readl(mhdp
->regs
+ CDNS_VER_H
) << 8)
682 | readl(mhdp
->regs
+ CDNS_VER_L
);
684 lib_ver
= (readl(mhdp
->regs
+ CDNS_LIB_H_ADDR
) << 8)
685 | readl(mhdp
->regs
+ CDNS_LIB_L_ADDR
);
687 if (lib_ver
< 33984) {
689 * Older FW versions with major number 1, used to store FW
690 * version information by storing repository revision number
691 * in registers. This is for identifying these FW versions.
695 if (fw_ver
== 26098) {
697 } else if (lib_ver
== 0 && fw_ver
== 0) {
700 dev_err(mhdp
->dev
, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
705 /* To identify newer FW versions with major number 2 onwards. */
706 major_num
= fw_ver
/ 10000;
707 minor_num
= (fw_ver
/ 100) % 100;
708 revision
= (fw_ver
% 10000) % 100;
711 dev_dbg(mhdp
->dev
, "FW version: v%u.%u.%u\n", major_num
, minor_num
,
716 static int cdns_mhdp_fw_activate(const struct firmware
*fw
,
717 struct cdns_mhdp_device
*mhdp
)
722 /* Release uCPU reset and stall it. */
723 writel(CDNS_CPU_STALL
, mhdp
->regs
+ CDNS_APB_CTRL
);
725 memcpy_toio(mhdp
->regs
+ CDNS_MHDP_IMEM
, fw
->data
, fw
->size
);
727 /* Leave debug mode, release stall */
728 writel(0, mhdp
->regs
+ CDNS_APB_CTRL
);
731 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
732 * Updated each sched "tick" (~2ms)
734 ret
= readl_poll_timeout(mhdp
->regs
+ CDNS_KEEP_ALIVE
, reg
,
735 reg
& CDNS_KEEP_ALIVE_MASK
, 500,
736 CDNS_KEEP_ALIVE_TIMEOUT
);
739 "device didn't give any life sign: reg %d\n", reg
);
743 ret
= cdns_mhdp_check_fw_version(mhdp
);
747 /* Init events to 0 as it's not cleared by FW at boot but on read */
748 readl(mhdp
->regs
+ CDNS_SW_EVENT0
);
749 readl(mhdp
->regs
+ CDNS_SW_EVENT1
);
750 readl(mhdp
->regs
+ CDNS_SW_EVENT2
);
751 readl(mhdp
->regs
+ CDNS_SW_EVENT3
);
754 ret
= cdns_mhdp_set_firmware_active(mhdp
, true);
758 spin_lock(&mhdp
->start_lock
);
760 mhdp
->hw_state
= MHDP_HW_READY
;
763 * Here we must keep the lock while enabling the interrupts
764 * since it would otherwise be possible that interrupt enable
765 * code is executed after the bridge is detached. The similar
766 * situation is not possible in attach()/detach() callbacks
767 * since the hw_state changes from MHDP_HW_READY to
768 * MHDP_HW_STOPPED happens only due to driver removal when
769 * bridge should already be detached.
771 cdns_mhdp_bridge_hpd_enable(&mhdp
->bridge
);
773 spin_unlock(&mhdp
->start_lock
);
775 wake_up(&mhdp
->fw_load_wq
);
776 dev_dbg(mhdp
->dev
, "DP FW activated\n");
781 static void cdns_mhdp_fw_cb(const struct firmware
*fw
, void *context
)
783 struct cdns_mhdp_device
*mhdp
= context
;
784 bool bridge_attached
;
787 dev_dbg(mhdp
->dev
, "firmware callback\n");
789 if (!fw
|| !fw
->data
) {
790 dev_err(mhdp
->dev
, "%s: No firmware.\n", __func__
);
794 ret
= cdns_mhdp_fw_activate(fw
, mhdp
);
796 release_firmware(fw
);
802 * XXX how to make sure the bridge is still attached when
803 * calling drm_kms_helper_hotplug_event() after releasing
804 * the lock? We should not hold the spin lock when
805 * calling drm_kms_helper_hotplug_event() since it may
806 * cause a dead lock. FB-dev console calls detect from the
807 * same thread just down the call stack started here.
809 spin_lock(&mhdp
->start_lock
);
810 bridge_attached
= mhdp
->bridge_attached
;
811 spin_unlock(&mhdp
->start_lock
);
812 if (bridge_attached
) {
813 if (mhdp
->connector
.dev
)
814 drm_kms_helper_hotplug_event(mhdp
->bridge
.dev
);
816 drm_bridge_hpd_notify(&mhdp
->bridge
, cdns_mhdp_detect(mhdp
));
820 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device
*mhdp
)
824 ret
= request_firmware_nowait(THIS_MODULE
, true, FW_NAME
, mhdp
->dev
,
825 GFP_KERNEL
, mhdp
, cdns_mhdp_fw_cb
);
827 dev_err(mhdp
->dev
, "failed to load firmware (%s), ret: %d\n",
835 static ssize_t
cdns_mhdp_transfer(struct drm_dp_aux
*aux
,
836 struct drm_dp_aux_msg
*msg
)
838 struct cdns_mhdp_device
*mhdp
= dev_get_drvdata(aux
->dev
);
841 if (msg
->request
!= DP_AUX_NATIVE_WRITE
&&
842 msg
->request
!= DP_AUX_NATIVE_READ
)
845 if (msg
->request
== DP_AUX_NATIVE_WRITE
) {
846 const u8
*buf
= msg
->buffer
;
849 for (i
= 0; i
< msg
->size
; ++i
) {
850 ret
= cdns_mhdp_dpcd_write(mhdp
,
851 msg
->address
+ i
, buf
[i
]);
856 "Failed to write DPCD addr %u\n",
862 ret
= cdns_mhdp_dpcd_read(mhdp
, msg
->address
,
863 msg
->buffer
, msg
->size
);
866 "Failed to read DPCD addr %u\n",
876 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device
*mhdp
)
878 union phy_configure_opts phy_cfg
;
882 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
883 DP_TRAINING_PATTERN_DISABLE
);
885 /* Reset PHY configuration */
886 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_TYPE(1);
887 if (!mhdp
->host
.scrambler
)
888 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
890 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
892 cdns_mhdp_reg_write(mhdp
, CDNS_DP_ENHNCD
,
893 mhdp
->sink
.enhanced
& mhdp
->host
.enhanced
);
895 cdns_mhdp_reg_write(mhdp
, CDNS_DP_LANE_EN
,
896 CDNS_DP_LANE_EN_LANES(mhdp
->link
.num_lanes
));
898 cdns_mhdp_link_configure(&mhdp
->aux
, &mhdp
->link
);
899 phy_cfg
.dp
.link_rate
= mhdp
->link
.rate
/ 100;
900 phy_cfg
.dp
.lanes
= mhdp
->link
.num_lanes
;
902 memset(phy_cfg
.dp
.voltage
, 0, sizeof(phy_cfg
.dp
.voltage
));
903 memset(phy_cfg
.dp
.pre
, 0, sizeof(phy_cfg
.dp
.pre
));
905 phy_cfg
.dp
.ssc
= cdns_mhdp_get_ssc_supported(mhdp
);
906 phy_cfg
.dp
.set_lanes
= true;
907 phy_cfg
.dp
.set_rate
= true;
908 phy_cfg
.dp
.set_voltages
= true;
909 ret
= phy_configure(mhdp
->phy
, &phy_cfg
);
911 dev_err(mhdp
->dev
, "%s: phy_configure() failed: %d\n",
916 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
,
917 CDNS_PHY_COMMON_CONFIG
|
918 CDNS_PHY_TRAINING_EN
|
919 CDNS_PHY_TRAINING_TYPE(1) |
920 CDNS_PHY_SCRAMBLER_BYPASS
);
922 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
923 DP_TRAINING_PATTERN_1
| DP_LINK_SCRAMBLING_DISABLE
);
928 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device
*mhdp
,
929 u8 link_status
[DP_LINK_STATUS_SIZE
],
930 u8 lanes_data
[CDNS_DP_MAX_NUM_LANES
],
931 union phy_configure_opts
*phy_cfg
)
933 u8 adjust
, max_pre_emph
, max_volt_swing
;
934 u8 set_volt
, set_pre
;
937 max_pre_emph
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
)
938 << DP_TRAIN_PRE_EMPHASIS_SHIFT
;
939 max_volt_swing
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
941 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
942 /* Check if Voltage swing and pre-emphasis are within limits */
943 adjust
= drm_dp_get_adjust_request_voltage(link_status
, i
);
944 set_volt
= min(adjust
, max_volt_swing
);
946 adjust
= drm_dp_get_adjust_request_pre_emphasis(link_status
, i
);
947 set_pre
= min(adjust
, max_pre_emph
)
948 >> DP_TRAIN_PRE_EMPHASIS_SHIFT
;
951 * Voltage swing level and pre-emphasis level combination is
952 * not allowed: leaving pre-emphasis as-is, and adjusting
955 if (set_volt
+ set_pre
> 3)
956 set_volt
= 3 - set_pre
;
958 phy_cfg
->dp
.voltage
[i
] = set_volt
;
959 lanes_data
[i
] = set_volt
;
961 if (set_volt
== max_volt_swing
)
962 lanes_data
[i
] |= DP_TRAIN_MAX_SWING_REACHED
;
964 phy_cfg
->dp
.pre
[i
] = set_pre
;
965 lanes_data
[i
] |= (set_pre
<< DP_TRAIN_PRE_EMPHASIS_SHIFT
);
967 if (set_pre
== (max_pre_emph
>> DP_TRAIN_PRE_EMPHASIS_SHIFT
))
968 lanes_data
[i
] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED
;
973 void cdns_mhdp_set_adjust_request_voltage(u8 link_status
[DP_LINK_STATUS_SIZE
],
974 unsigned int lane
, u8 volt
)
976 unsigned int s
= ((lane
& 1) ?
977 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT
:
978 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT
);
979 unsigned int idx
= DP_ADJUST_REQUEST_LANE0_1
- DP_LANE0_1_STATUS
+ (lane
>> 1);
981 link_status
[idx
] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK
<< s
);
982 link_status
[idx
] |= volt
<< s
;
986 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status
[DP_LINK_STATUS_SIZE
],
987 unsigned int lane
, u8 pre_emphasis
)
989 unsigned int s
= ((lane
& 1) ?
990 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT
:
991 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT
);
992 unsigned int idx
= DP_ADJUST_REQUEST_LANE0_1
- DP_LANE0_1_STATUS
+ (lane
>> 1);
994 link_status
[idx
] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK
<< s
);
995 link_status
[idx
] |= pre_emphasis
<< s
;
998 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device
*mhdp
,
999 u8 link_status
[DP_LINK_STATUS_SIZE
])
1001 u8 max_pre
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
);
1002 u8 max_volt
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
1006 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1007 volt
= drm_dp_get_adjust_request_voltage(link_status
, i
);
1008 pre
= drm_dp_get_adjust_request_pre_emphasis(link_status
, i
);
1010 cdns_mhdp_set_adjust_request_voltage(link_status
, i
,
1012 if (mhdp
->host
.volt_swing
& CDNS_FORCE_VOLT_SWING
)
1013 cdns_mhdp_set_adjust_request_voltage(link_status
, i
,
1015 if (mhdp
->host
.pre_emphasis
& CDNS_FORCE_PRE_EMPHASIS
)
1016 cdns_mhdp_set_adjust_request_pre_emphasis(link_status
,
1021 static void cdns_mhdp_print_lt_status(const char *prefix
,
1022 struct cdns_mhdp_device
*mhdp
,
1023 union phy_configure_opts
*phy_cfg
)
1025 char vs
[8] = "0/0/0/0";
1026 char pe
[8] = "0/0/0/0";
1029 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1030 vs
[i
* 2] = '0' + phy_cfg
->dp
.voltage
[i
];
1031 pe
[i
* 2] = '0' + phy_cfg
->dp
.pre
[i
];
1034 vs
[i
* 2 - 1] = '\0';
1035 pe
[i
* 2 - 1] = '\0';
1037 dev_dbg(mhdp
->dev
, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1039 mhdp
->link
.num_lanes
, mhdp
->link
.rate
/ 100,
1043 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device
*mhdp
,
1045 unsigned int training_interval
)
1047 u8 lanes_data
[CDNS_DP_MAX_NUM_LANES
], fail_counter_short
= 0;
1048 u8 link_status
[DP_LINK_STATUS_SIZE
];
1049 union phy_configure_opts phy_cfg
;
1054 dev_dbg(mhdp
->dev
, "Starting EQ phase\n");
1056 /* Enable link training TPS[eq_tps] in PHY */
1057 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_EN
|
1058 CDNS_PHY_TRAINING_TYPE(eq_tps
);
1060 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
1061 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
1063 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
1064 (eq_tps
!= 4) ? eq_tps
| DP_LINK_SCRAMBLING_DISABLE
:
1065 CDNS_DP_TRAINING_PATTERN_4
);
1067 drm_dp_dpcd_read_link_status(&mhdp
->aux
, link_status
);
1070 cdns_mhdp_get_adjust_train(mhdp
, link_status
, lanes_data
,
1072 phy_cfg
.dp
.lanes
= mhdp
->link
.num_lanes
;
1073 phy_cfg
.dp
.ssc
= cdns_mhdp_get_ssc_supported(mhdp
);
1074 phy_cfg
.dp
.set_lanes
= false;
1075 phy_cfg
.dp
.set_rate
= false;
1076 phy_cfg
.dp
.set_voltages
= true;
1077 ret
= phy_configure(mhdp
->phy
, &phy_cfg
);
1079 dev_err(mhdp
->dev
, "%s: phy_configure() failed: %d\n",
1084 cdns_mhdp_adjust_lt(mhdp
, mhdp
->link
.num_lanes
,
1085 training_interval
, lanes_data
, link_status
);
1087 r
= drm_dp_clock_recovery_ok(link_status
, mhdp
->link
.num_lanes
);
1091 if (drm_dp_channel_eq_ok(link_status
, mhdp
->link
.num_lanes
)) {
1092 cdns_mhdp_print_lt_status("EQ phase ok", mhdp
,
1097 fail_counter_short
++;
1099 cdns_mhdp_adjust_requested_eq(mhdp
, link_status
);
1100 } while (fail_counter_short
< 5);
1103 cdns_mhdp_print_lt_status("EQ phase failed", mhdp
, &phy_cfg
);
1108 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device
*mhdp
,
1109 u8 link_status
[DP_LINK_STATUS_SIZE
],
1110 u8
*req_volt
, u8
*req_pre
)
1112 const u8 max_volt
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
1113 const u8 max_pre
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
);
1116 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1119 val
= mhdp
->host
.volt_swing
& CDNS_FORCE_VOLT_SWING
?
1120 max_volt
: req_volt
[i
];
1121 cdns_mhdp_set_adjust_request_voltage(link_status
, i
, val
);
1123 val
= mhdp
->host
.pre_emphasis
& CDNS_FORCE_PRE_EMPHASIS
?
1124 max_pre
: req_pre
[i
];
1125 cdns_mhdp_set_adjust_request_pre_emphasis(link_status
, i
, val
);
1130 void cdns_mhdp_validate_cr(struct cdns_mhdp_device
*mhdp
, bool *cr_done
,
1131 bool *same_before_adjust
, bool *max_swing_reached
,
1132 u8 before_cr
[CDNS_DP_MAX_NUM_LANES
],
1133 u8 after_cr
[DP_LINK_STATUS_SIZE
], u8
*req_volt
,
1136 const u8 max_volt
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
1137 const u8 max_pre
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
);
1138 bool same_pre
, same_volt
;
1142 *same_before_adjust
= false;
1143 *max_swing_reached
= false;
1144 *cr_done
= drm_dp_clock_recovery_ok(after_cr
, mhdp
->link
.num_lanes
);
1146 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1147 adjust
= drm_dp_get_adjust_request_voltage(after_cr
, i
);
1148 req_volt
[i
] = min(adjust
, max_volt
);
1150 adjust
= drm_dp_get_adjust_request_pre_emphasis(after_cr
, i
) >>
1151 DP_TRAIN_PRE_EMPHASIS_SHIFT
;
1152 req_pre
[i
] = min(adjust
, max_pre
);
1154 same_pre
= (before_cr
[i
] & DP_TRAIN_PRE_EMPHASIS_MASK
) ==
1155 req_pre
[i
] << DP_TRAIN_PRE_EMPHASIS_SHIFT
;
1156 same_volt
= (before_cr
[i
] & DP_TRAIN_VOLTAGE_SWING_MASK
) ==
1158 if (same_pre
&& same_volt
)
1159 *same_before_adjust
= true;
1161 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1162 if (!*cr_done
&& req_volt
[i
] + req_pre
[i
] >= 3) {
1163 *max_swing_reached
= true;
1169 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device
*mhdp
)
1171 u8 lanes_data
[CDNS_DP_MAX_NUM_LANES
],
1172 fail_counter_short
= 0, fail_counter_cr_long
= 0;
1173 u8 link_status
[DP_LINK_STATUS_SIZE
];
1175 union phy_configure_opts phy_cfg
;
1178 dev_dbg(mhdp
->dev
, "Starting CR phase\n");
1180 ret
= cdns_mhdp_link_training_init(mhdp
);
1184 drm_dp_dpcd_read_link_status(&mhdp
->aux
, link_status
);
1187 u8 requested_adjust_volt_swing
[CDNS_DP_MAX_NUM_LANES
] = {};
1188 u8 requested_adjust_pre_emphasis
[CDNS_DP_MAX_NUM_LANES
] = {};
1189 bool same_before_adjust
, max_swing_reached
;
1191 cdns_mhdp_get_adjust_train(mhdp
, link_status
, lanes_data
,
1193 phy_cfg
.dp
.lanes
= mhdp
->link
.num_lanes
;
1194 phy_cfg
.dp
.ssc
= cdns_mhdp_get_ssc_supported(mhdp
);
1195 phy_cfg
.dp
.set_lanes
= false;
1196 phy_cfg
.dp
.set_rate
= false;
1197 phy_cfg
.dp
.set_voltages
= true;
1198 ret
= phy_configure(mhdp
->phy
, &phy_cfg
);
1200 dev_err(mhdp
->dev
, "%s: phy_configure() failed: %d\n",
1205 cdns_mhdp_adjust_lt(mhdp
, mhdp
->link
.num_lanes
, 100,
1206 lanes_data
, link_status
);
1208 cdns_mhdp_validate_cr(mhdp
, &cr_done
, &same_before_adjust
,
1209 &max_swing_reached
, lanes_data
,
1211 requested_adjust_volt_swing
,
1212 requested_adjust_pre_emphasis
);
1214 if (max_swing_reached
) {
1215 dev_err(mhdp
->dev
, "CR: max swing reached\n");
1220 cdns_mhdp_print_lt_status("CR phase ok", mhdp
,
1225 /* Not all CR_DONE bits set */
1226 fail_counter_cr_long
++;
1228 if (same_before_adjust
) {
1229 fail_counter_short
++;
1233 fail_counter_short
= 0;
1235 * Voltage swing/pre-emphasis adjust requested
1238 cdns_mhdp_adjust_requested_cr(mhdp
, link_status
,
1239 requested_adjust_volt_swing
,
1240 requested_adjust_pre_emphasis
);
1241 } while (fail_counter_short
< 5 && fail_counter_cr_long
< 10);
1244 cdns_mhdp_print_lt_status("CR phase failed", mhdp
, &phy_cfg
);
1249 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link
*link
)
1251 switch (drm_dp_link_rate_to_bw_code(link
->rate
)) {
1252 case DP_LINK_BW_2_7
:
1253 link
->rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62
);
1255 case DP_LINK_BW_5_4
:
1256 link
->rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7
);
1258 case DP_LINK_BW_8_1
:
1259 link
->rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4
);
1264 static int cdns_mhdp_link_training(struct cdns_mhdp_device
*mhdp
,
1265 unsigned int training_interval
)
1268 const u8 eq_tps
= cdns_mhdp_eq_training_pattern_supported(mhdp
);
1272 if (!cdns_mhdp_link_training_cr(mhdp
)) {
1273 if (drm_dp_link_rate_to_bw_code(mhdp
->link
.rate
) !=
1276 "Reducing link rate during CR phase\n");
1277 cdns_mhdp_lower_link_rate(&mhdp
->link
);
1280 } else if (mhdp
->link
.num_lanes
> 1) {
1282 "Reducing lanes number during CR phase\n");
1283 mhdp
->link
.num_lanes
>>= 1;
1284 mhdp
->link
.rate
= cdns_mhdp_max_link_rate(mhdp
);
1290 "Link training failed during CR phase\n");
1294 if (cdns_mhdp_link_training_channel_eq(mhdp
, eq_tps
,
1298 if (mhdp
->link
.num_lanes
> 1) {
1300 "Reducing lanes number during EQ phase\n");
1301 mhdp
->link
.num_lanes
>>= 1;
1304 } else if (drm_dp_link_rate_to_bw_code(mhdp
->link
.rate
) !=
1307 "Reducing link rate during EQ phase\n");
1308 cdns_mhdp_lower_link_rate(&mhdp
->link
);
1309 mhdp
->link
.num_lanes
= cdns_mhdp_max_num_lanes(mhdp
);
1314 dev_err(mhdp
->dev
, "Link training failed during EQ phase\n");
1318 dev_dbg(mhdp
->dev
, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1319 mhdp
->link
.num_lanes
, mhdp
->link
.rate
/ 100);
1321 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
1322 mhdp
->host
.scrambler
? 0 :
1323 DP_LINK_SCRAMBLING_DISABLE
);
1325 ret
= cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, ®32
);
1328 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1332 reg32
&= ~GENMASK(1, 0);
1333 reg32
|= CDNS_DP_NUM_LANES(mhdp
->link
.num_lanes
);
1334 reg32
|= CDNS_DP_WR_FAILING_EDGE_VSYNC
;
1335 reg32
|= CDNS_DP_FRAMER_EN
;
1336 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, reg32
);
1338 /* Reset PHY config */
1339 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_TYPE(1);
1340 if (!mhdp
->host
.scrambler
)
1341 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
1342 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
1346 /* Reset PHY config */
1347 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_TYPE(1);
1348 if (!mhdp
->host
.scrambler
)
1349 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
1350 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
1352 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
1353 DP_TRAINING_PATTERN_DISABLE
);
1358 static u32
cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device
*mhdp
,
1364 return 4000 << (interval
- 1);
1366 "wrong training interval returned by DPCD: %d\n", interval
);
1370 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device
*mhdp
)
1372 unsigned int link_rate
;
1374 /* Get source capabilities based on PHY attributes */
1376 mhdp
->host
.lanes_cnt
= mhdp
->phy
->attrs
.bus_width
;
1377 if (!mhdp
->host
.lanes_cnt
)
1378 mhdp
->host
.lanes_cnt
= 4;
1380 link_rate
= mhdp
->phy
->attrs
.max_link_rate
;
1382 link_rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1
);
1384 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1387 mhdp
->host
.link_rate
= link_rate
;
1388 mhdp
->host
.volt_swing
= CDNS_VOLT_SWING(3);
1389 mhdp
->host
.pre_emphasis
= CDNS_PRE_EMPHASIS(3);
1390 mhdp
->host
.pattern_supp
= CDNS_SUPPORT_TPS(1) |
1391 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1392 CDNS_SUPPORT_TPS(4);
1393 mhdp
->host
.lane_mapping
= CDNS_LANE_MAPPING_NORMAL
;
1394 mhdp
->host
.fast_link
= false;
1395 mhdp
->host
.enhanced
= true;
1396 mhdp
->host
.scrambler
= true;
1397 mhdp
->host
.ssc
= false;
1400 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device
*mhdp
,
1401 u8 dpcd
[DP_RECEIVER_CAP_SIZE
])
1403 mhdp
->sink
.link_rate
= mhdp
->link
.rate
;
1404 mhdp
->sink
.lanes_cnt
= mhdp
->link
.num_lanes
;
1405 mhdp
->sink
.enhanced
= !!(mhdp
->link
.capabilities
&
1406 DP_LINK_CAP_ENHANCED_FRAMING
);
1408 /* Set SSC support */
1409 mhdp
->sink
.ssc
= !!(dpcd
[DP_MAX_DOWNSPREAD
] &
1410 DP_MAX_DOWNSPREAD_0_5
);
1412 /* Set TPS support */
1413 mhdp
->sink
.pattern_supp
= CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1414 if (drm_dp_tps3_supported(dpcd
))
1415 mhdp
->sink
.pattern_supp
|= CDNS_SUPPORT_TPS(3);
1416 if (drm_dp_tps4_supported(dpcd
))
1417 mhdp
->sink
.pattern_supp
|= CDNS_SUPPORT_TPS(4);
1419 /* Set fast link support */
1420 mhdp
->sink
.fast_link
= !!(dpcd
[DP_MAX_DOWNSPREAD
] &
1421 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
);
1424 static int cdns_mhdp_link_up(struct cdns_mhdp_device
*mhdp
)
1426 u8 dpcd
[DP_RECEIVER_CAP_SIZE
], amp
[2];
1427 u32 resp
, interval
, interval_us
;
1432 WARN_ON(!mutex_is_locked(&mhdp
->link_mutex
));
1434 drm_dp_dpcd_readb(&mhdp
->aux
, DP_TRAINING_AUX_RD_INTERVAL
,
1437 if (ext_cap_chk
& DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT
)
1438 addr
= DP_DP13_DPCD_REV
;
1442 err
= drm_dp_dpcd_read(&mhdp
->aux
, addr
, dpcd
, DP_RECEIVER_CAP_SIZE
);
1444 dev_err(mhdp
->dev
, "Failed to read receiver capabilities\n");
1448 mhdp
->link
.revision
= dpcd
[0];
1449 mhdp
->link
.rate
= drm_dp_bw_code_to_link_rate(dpcd
[1]);
1450 mhdp
->link
.num_lanes
= dpcd
[2] & DP_MAX_LANE_COUNT_MASK
;
1452 if (dpcd
[2] & DP_ENHANCED_FRAME_CAP
)
1453 mhdp
->link
.capabilities
|= DP_LINK_CAP_ENHANCED_FRAMING
;
1455 dev_dbg(mhdp
->dev
, "Set sink device power state via DPCD\n");
1456 cdns_mhdp_link_power_up(&mhdp
->aux
, &mhdp
->link
);
1458 cdns_mhdp_fill_sink_caps(mhdp
, dpcd
);
1460 mhdp
->link
.rate
= cdns_mhdp_max_link_rate(mhdp
);
1461 mhdp
->link
.num_lanes
= cdns_mhdp_max_num_lanes(mhdp
);
1463 /* Disable framer for link training */
1464 err
= cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, &resp
);
1467 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1472 resp
&= ~CDNS_DP_FRAMER_EN
;
1473 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, resp
);
1475 /* Spread AMP if required, enable 8b/10b coding */
1476 amp
[0] = cdns_mhdp_get_ssc_supported(mhdp
) ? DP_SPREAD_AMP_0_5
: 0;
1477 amp
[1] = DP_SET_ANSI_8B10B
;
1478 drm_dp_dpcd_write(&mhdp
->aux
, DP_DOWNSPREAD_CTRL
, amp
, 2);
1480 if (mhdp
->host
.fast_link
& mhdp
->sink
.fast_link
) {
1481 dev_err(mhdp
->dev
, "fastlink not supported\n");
1485 interval
= dpcd
[DP_TRAINING_AUX_RD_INTERVAL
] & DP_TRAINING_AUX_RD_MASK
;
1486 interval_us
= cdns_mhdp_get_training_interval_us(mhdp
, interval
);
1488 cdns_mhdp_link_training(mhdp
, interval_us
)) {
1489 dev_err(mhdp
->dev
, "Link training failed. Exiting.\n");
1493 mhdp
->link_up
= true;
1498 static void cdns_mhdp_link_down(struct cdns_mhdp_device
*mhdp
)
1500 WARN_ON(!mutex_is_locked(&mhdp
->link_mutex
));
1503 cdns_mhdp_link_power_down(&mhdp
->aux
, &mhdp
->link
);
1505 mhdp
->link_up
= false;
1508 static const struct drm_edid
*cdns_mhdp_edid_read(struct cdns_mhdp_device
*mhdp
,
1509 struct drm_connector
*connector
)
1514 return drm_edid_read_custom(connector
, cdns_mhdp_get_edid_block
, mhdp
);
1517 static int cdns_mhdp_get_modes(struct drm_connector
*connector
)
1519 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(connector
);
1520 const struct drm_edid
*drm_edid
;
1526 drm_edid
= cdns_mhdp_edid_read(mhdp
, connector
);
1528 drm_edid_connector_update(connector
, drm_edid
);
1531 dev_err(mhdp
->dev
, "Failed to read EDID\n");
1535 num_modes
= drm_edid_connector_add_modes(connector
);
1536 drm_edid_free(drm_edid
);
1539 * HACK: Warn about unsupported display formats until we deal
1540 * with them correctly.
1542 if (connector
->display_info
.color_formats
&&
1543 !(connector
->display_info
.color_formats
&
1544 mhdp
->display_fmt
.color_format
))
1546 "%s: No supported color_format found (0x%08x)\n",
1547 __func__
, connector
->display_info
.color_formats
);
1549 if (connector
->display_info
.bpc
&&
1550 connector
->display_info
.bpc
< mhdp
->display_fmt
.bpc
)
1551 dev_warn(mhdp
->dev
, "%s: Display bpc only %d < %d\n",
1552 __func__
, connector
->display_info
.bpc
,
1553 mhdp
->display_fmt
.bpc
);
1558 static int cdns_mhdp_connector_detect(struct drm_connector
*conn
,
1559 struct drm_modeset_acquire_ctx
*ctx
,
1562 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(conn
);
1564 return cdns_mhdp_detect(mhdp
);
1567 static u32
cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt
*fmt
)
1574 switch (fmt
->color_format
) {
1575 case DRM_COLOR_FORMAT_RGB444
:
1576 case DRM_COLOR_FORMAT_YCBCR444
:
1579 case DRM_COLOR_FORMAT_YCBCR422
:
1582 case DRM_COLOR_FORMAT_YCBCR420
:
1583 bpp
= fmt
->bpc
* 3 / 2;
1593 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device
*mhdp
,
1594 const struct drm_display_mode
*mode
,
1595 unsigned int lanes
, unsigned int rate
)
1597 u32 max_bw
, req_bw
, bpp
;
1600 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1601 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1602 * value thus equals the bandwidth in 10kb/s units, which matches the
1603 * units of the rate parameter.
1606 bpp
= cdns_mhdp_get_bpp(&mhdp
->display_fmt
);
1607 req_bw
= mode
->clock
* bpp
/ 8;
1608 max_bw
= lanes
* rate
;
1609 if (req_bw
> max_bw
) {
1611 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1612 mode
->name
, req_bw
, max_bw
);
1621 enum drm_mode_status
cdns_mhdp_mode_valid(struct drm_connector
*conn
,
1622 struct drm_display_mode
*mode
)
1624 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(conn
);
1626 mutex_lock(&mhdp
->link_mutex
);
1628 if (!cdns_mhdp_bandwidth_ok(mhdp
, mode
, mhdp
->link
.num_lanes
,
1630 mutex_unlock(&mhdp
->link_mutex
);
1631 return MODE_CLOCK_HIGH
;
1634 mutex_unlock(&mhdp
->link_mutex
);
1638 static int cdns_mhdp_connector_atomic_check(struct drm_connector
*conn
,
1639 struct drm_atomic_state
*state
)
1641 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(conn
);
1642 struct drm_connector_state
*old_state
, *new_state
;
1643 struct drm_crtc_state
*crtc_state
;
1646 if (!mhdp
->hdcp_supported
)
1649 old_state
= drm_atomic_get_old_connector_state(state
, conn
);
1650 new_state
= drm_atomic_get_new_connector_state(state
, conn
);
1651 old_cp
= old_state
->content_protection
;
1652 new_cp
= new_state
->content_protection
;
1654 if (old_state
->hdcp_content_type
!= new_state
->hdcp_content_type
&&
1655 new_cp
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
1656 new_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1660 if (!new_state
->crtc
) {
1661 if (old_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
1662 new_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1666 if (old_cp
== new_cp
||
1667 (old_cp
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&&
1668 new_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
))
1672 crtc_state
= drm_atomic_get_new_crtc_state(state
, new_state
->crtc
);
1673 crtc_state
->mode_changed
= true;
1678 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs
= {
1679 .detect_ctx
= cdns_mhdp_connector_detect
,
1680 .get_modes
= cdns_mhdp_get_modes
,
1681 .mode_valid
= cdns_mhdp_mode_valid
,
1682 .atomic_check
= cdns_mhdp_connector_atomic_check
,
1685 static const struct drm_connector_funcs cdns_mhdp_conn_funcs
= {
1686 .fill_modes
= drm_helper_probe_single_connector_modes
,
1687 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
1688 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
1689 .reset
= drm_atomic_helper_connector_reset
,
1690 .destroy
= drm_connector_cleanup
,
1693 static int cdns_mhdp_connector_init(struct cdns_mhdp_device
*mhdp
)
1695 u32 bus_format
= MEDIA_BUS_FMT_RGB121212_1X36
;
1696 struct drm_connector
*conn
= &mhdp
->connector
;
1697 struct drm_bridge
*bridge
= &mhdp
->bridge
;
1700 conn
->polled
= DRM_CONNECTOR_POLL_HPD
;
1702 ret
= drm_connector_init(bridge
->dev
, conn
, &cdns_mhdp_conn_funcs
,
1703 DRM_MODE_CONNECTOR_DisplayPort
);
1705 dev_err(mhdp
->dev
, "Failed to initialize connector with drm\n");
1709 drm_connector_helper_add(conn
, &cdns_mhdp_conn_helper_funcs
);
1711 ret
= drm_display_info_set_bus_formats(&conn
->display_info
,
1716 ret
= drm_connector_attach_encoder(conn
, bridge
->encoder
);
1718 dev_err(mhdp
->dev
, "Failed to attach connector to encoder\n");
1722 if (mhdp
->hdcp_supported
)
1723 ret
= drm_connector_attach_content_protection_property(conn
, true);
1728 static int cdns_mhdp_attach(struct drm_bridge
*bridge
,
1729 enum drm_bridge_attach_flags flags
)
1731 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
1735 dev_dbg(mhdp
->dev
, "%s\n", __func__
);
1737 mhdp
->aux
.drm_dev
= bridge
->dev
;
1738 ret
= drm_dp_aux_register(&mhdp
->aux
);
1742 if (!(flags
& DRM_BRIDGE_ATTACH_NO_CONNECTOR
)) {
1743 ret
= cdns_mhdp_connector_init(mhdp
);
1745 goto aux_unregister
;
1748 spin_lock(&mhdp
->start_lock
);
1750 mhdp
->bridge_attached
= true;
1751 hw_ready
= mhdp
->hw_state
== MHDP_HW_READY
;
1753 spin_unlock(&mhdp
->start_lock
);
1755 /* Enable SW event interrupts */
1757 cdns_mhdp_bridge_hpd_enable(bridge
);
1761 drm_dp_aux_unregister(&mhdp
->aux
);
1765 static void cdns_mhdp_configure_video(struct cdns_mhdp_device
*mhdp
,
1766 const struct drm_display_mode
*mode
)
1768 unsigned int dp_framer_sp
= 0, msa_horizontal_1
,
1769 msa_vertical_1
, bnd_hsync2vsync
, hsync2vsync_pol_ctrl
,
1770 misc0
= 0, misc1
= 0, pxl_repr
,
1771 front_porch
, back_porch
, msa_h0
, msa_v0
, hsync
, vsync
,
1773 u8 stream_id
= mhdp
->stream_id
;
1774 u32 bpp
, bpc
, pxlfmt
, framer
;
1777 pxlfmt
= mhdp
->display_fmt
.color_format
;
1778 bpc
= mhdp
->display_fmt
.bpc
;
1781 * If YCBCR supported and stream not SD, use ITU709
1782 * Need to handle ITU version with YCBCR420 when supported
1784 if ((pxlfmt
== DRM_COLOR_FORMAT_YCBCR444
||
1785 pxlfmt
== DRM_COLOR_FORMAT_YCBCR422
) && mode
->crtc_vdisplay
>= 720)
1786 misc0
= DP_YCBCR_COEFFICIENTS_ITU709
;
1788 bpp
= cdns_mhdp_get_bpp(&mhdp
->display_fmt
);
1791 case DRM_COLOR_FORMAT_RGB444
:
1792 pxl_repr
= CDNS_DP_FRAMER_RGB
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1793 misc0
|= DP_COLOR_FORMAT_RGB
;
1795 case DRM_COLOR_FORMAT_YCBCR444
:
1796 pxl_repr
= CDNS_DP_FRAMER_YCBCR444
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1797 misc0
|= DP_COLOR_FORMAT_YCbCr444
| DP_TEST_DYNAMIC_RANGE_CEA
;
1799 case DRM_COLOR_FORMAT_YCBCR422
:
1800 pxl_repr
= CDNS_DP_FRAMER_YCBCR422
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1801 misc0
|= DP_COLOR_FORMAT_YCbCr422
| DP_TEST_DYNAMIC_RANGE_CEA
;
1803 case DRM_COLOR_FORMAT_YCBCR420
:
1804 pxl_repr
= CDNS_DP_FRAMER_YCBCR420
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1807 pxl_repr
= CDNS_DP_FRAMER_Y_ONLY
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1812 misc0
|= DP_TEST_BIT_DEPTH_6
;
1813 pxl_repr
|= CDNS_DP_FRAMER_6_BPC
;
1816 misc0
|= DP_TEST_BIT_DEPTH_8
;
1817 pxl_repr
|= CDNS_DP_FRAMER_8_BPC
;
1820 misc0
|= DP_TEST_BIT_DEPTH_10
;
1821 pxl_repr
|= CDNS_DP_FRAMER_10_BPC
;
1824 misc0
|= DP_TEST_BIT_DEPTH_12
;
1825 pxl_repr
|= CDNS_DP_FRAMER_12_BPC
;
1828 misc0
|= DP_TEST_BIT_DEPTH_16
;
1829 pxl_repr
|= CDNS_DP_FRAMER_16_BPC
;
1833 bnd_hsync2vsync
= CDNS_IP_BYPASS_V_INTERFACE
;
1834 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1835 bnd_hsync2vsync
|= CDNS_IP_DET_INTERLACE_FORMAT
;
1837 cdns_mhdp_reg_write(mhdp
, CDNS_BND_HSYNC2VSYNC(stream_id
),
1840 hsync2vsync_pol_ctrl
= 0;
1841 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
1842 hsync2vsync_pol_ctrl
|= CDNS_H2V_HSYNC_POL_ACTIVE_LOW
;
1843 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
1844 hsync2vsync_pol_ctrl
|= CDNS_H2V_VSYNC_POL_ACTIVE_LOW
;
1845 cdns_mhdp_reg_write(mhdp
, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id
),
1846 hsync2vsync_pol_ctrl
);
1848 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_PXL_REPR(stream_id
), pxl_repr
);
1850 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1851 dp_framer_sp
|= CDNS_DP_FRAMER_INTERLACE
;
1852 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
1853 dp_framer_sp
|= CDNS_DP_FRAMER_HSYNC_POL_LOW
;
1854 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
1855 dp_framer_sp
|= CDNS_DP_FRAMER_VSYNC_POL_LOW
;
1856 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_SP(stream_id
), dp_framer_sp
);
1858 front_porch
= mode
->crtc_hsync_start
- mode
->crtc_hdisplay
;
1859 back_porch
= mode
->crtc_htotal
- mode
->crtc_hsync_end
;
1860 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRONT_BACK_PORCH(stream_id
),
1861 CDNS_DP_FRONT_PORCH(front_porch
) |
1862 CDNS_DP_BACK_PORCH(back_porch
));
1864 cdns_mhdp_reg_write(mhdp
, CDNS_DP_BYTE_COUNT(stream_id
),
1865 mode
->crtc_hdisplay
* bpp
/ 8);
1867 msa_h0
= mode
->crtc_htotal
- mode
->crtc_hsync_start
;
1868 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_HORIZONTAL_0(stream_id
),
1869 CDNS_DP_MSAH0_H_TOTAL(mode
->crtc_htotal
) |
1870 CDNS_DP_MSAH0_HSYNC_START(msa_h0
));
1872 hsync
= mode
->crtc_hsync_end
- mode
->crtc_hsync_start
;
1873 msa_horizontal_1
= CDNS_DP_MSAH1_HSYNC_WIDTH(hsync
) |
1874 CDNS_DP_MSAH1_HDISP_WIDTH(mode
->crtc_hdisplay
);
1875 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
1876 msa_horizontal_1
|= CDNS_DP_MSAH1_HSYNC_POL_LOW
;
1877 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_HORIZONTAL_1(stream_id
),
1880 msa_v0
= mode
->crtc_vtotal
- mode
->crtc_vsync_start
;
1881 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_VERTICAL_0(stream_id
),
1882 CDNS_DP_MSAV0_V_TOTAL(mode
->crtc_vtotal
) |
1883 CDNS_DP_MSAV0_VSYNC_START(msa_v0
));
1885 vsync
= mode
->crtc_vsync_end
- mode
->crtc_vsync_start
;
1886 msa_vertical_1
= CDNS_DP_MSAV1_VSYNC_WIDTH(vsync
) |
1887 CDNS_DP_MSAV1_VDISP_WIDTH(mode
->crtc_vdisplay
);
1888 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
1889 msa_vertical_1
|= CDNS_DP_MSAV1_VSYNC_POL_LOW
;
1890 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_VERTICAL_1(stream_id
),
1893 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) &&
1894 mode
->crtc_vtotal
% 2 == 0)
1895 misc1
= DP_TEST_INTERLACED
;
1896 if (mhdp
->display_fmt
.y_only
)
1897 misc1
|= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY
;
1898 /* Use VSC SDP for Y420 */
1899 if (pxlfmt
== DRM_COLOR_FORMAT_YCBCR420
)
1900 misc1
= CDNS_DP_TEST_VSC_SDP
;
1902 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_MISC(stream_id
),
1903 misc0
| (misc1
<< 8));
1905 cdns_mhdp_reg_write(mhdp
, CDNS_DP_HORIZONTAL(stream_id
),
1906 CDNS_DP_H_HSYNC_WIDTH(hsync
) |
1907 CDNS_DP_H_H_TOTAL(mode
->crtc_hdisplay
));
1909 cdns_mhdp_reg_write(mhdp
, CDNS_DP_VERTICAL_0(stream_id
),
1910 CDNS_DP_V0_VHEIGHT(mode
->crtc_vdisplay
) |
1911 CDNS_DP_V0_VSTART(msa_v0
));
1913 dp_vertical_1
= CDNS_DP_V1_VTOTAL(mode
->crtc_vtotal
);
1914 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) &&
1915 mode
->crtc_vtotal
% 2 == 0)
1916 dp_vertical_1
|= CDNS_DP_V1_VTOTAL_EVEN
;
1918 cdns_mhdp_reg_write(mhdp
, CDNS_DP_VERTICAL_1(stream_id
), dp_vertical_1
);
1920 cdns_mhdp_reg_write_bit(mhdp
, CDNS_DP_VB_ID(stream_id
), 2, 1,
1921 (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ?
1922 CDNS_DP_VB_ID_INTERLACED
: 0);
1924 ret
= cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, &framer
);
1927 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1931 framer
|= CDNS_DP_FRAMER_EN
;
1932 framer
&= ~CDNS_DP_NO_VIDEO_MODE
;
1933 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, framer
);
1936 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device
*mhdp
,
1937 const struct drm_display_mode
*mode
)
1939 u32 rate
, vs
, required_bandwidth
, available_bandwidth
;
1940 s32 line_thresh1
, line_thresh2
, line_thresh
= 0;
1941 int pxlclock
= mode
->crtc_clock
;
1945 /* Get rate in MSymbols per second per lane */
1946 rate
= mhdp
->link
.rate
/ 1000;
1948 bpp
= cdns_mhdp_get_bpp(&mhdp
->display_fmt
);
1950 required_bandwidth
= pxlclock
* bpp
/ 8;
1951 available_bandwidth
= mhdp
->link
.num_lanes
* rate
;
1953 vs
= tu_size
* required_bandwidth
/ available_bandwidth
;
1959 line_thresh1
= ((vs
+ 1) << 5) * 8 / bpp
;
1960 line_thresh2
= (pxlclock
<< 5) / 1000 / rate
* (vs
+ 1) - (1 << 5);
1961 line_thresh
= line_thresh1
- line_thresh2
/ (s32
)mhdp
->link
.num_lanes
;
1962 line_thresh
= (line_thresh
>> 5) + 2;
1964 mhdp
->stream_id
= 0;
1966 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_TU
,
1967 CDNS_DP_FRAMER_TU_VS(vs
) |
1968 CDNS_DP_FRAMER_TU_SIZE(tu_size
) |
1969 CDNS_DP_FRAMER_TU_CNT_RST_EN
);
1971 cdns_mhdp_reg_write(mhdp
, CDNS_DP_LINE_THRESH(0),
1972 line_thresh
& GENMASK(5, 0));
1974 cdns_mhdp_reg_write(mhdp
, CDNS_DP_STREAM_CONFIG_2(0),
1975 CDNS_DP_SC2_TU_VS_DIFF((tu_size
- vs
> 3) ?
1978 cdns_mhdp_configure_video(mhdp
, mode
);
1981 static void cdns_mhdp_atomic_enable(struct drm_bridge
*bridge
,
1982 struct drm_bridge_state
*bridge_state
)
1984 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
1985 struct drm_atomic_state
*state
= bridge_state
->base
.state
;
1986 struct cdns_mhdp_bridge_state
*mhdp_state
;
1987 struct drm_crtc_state
*crtc_state
;
1988 struct drm_connector
*connector
;
1989 struct drm_connector_state
*conn_state
;
1990 struct drm_bridge_state
*new_state
;
1991 const struct drm_display_mode
*mode
;
1995 dev_dbg(mhdp
->dev
, "bridge enable\n");
1997 mutex_lock(&mhdp
->link_mutex
);
1999 if (mhdp
->plugged
&& !mhdp
->link_up
) {
2000 ret
= cdns_mhdp_link_up(mhdp
);
2005 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->enable
)
2006 mhdp
->info
->ops
->enable(mhdp
);
2008 /* Enable VIF clock for stream 0 */
2009 ret
= cdns_mhdp_reg_read(mhdp
, CDNS_DPTX_CAR
, &resp
);
2011 dev_err(mhdp
->dev
, "Failed to read CDNS_DPTX_CAR %d\n", ret
);
2015 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_CAR
,
2016 resp
| CDNS_VIF_CLK_EN
| CDNS_VIF_CLK_RSTN
);
2018 connector
= drm_atomic_get_new_connector_for_encoder(state
,
2020 if (WARN_ON(!connector
))
2023 conn_state
= drm_atomic_get_new_connector_state(state
, connector
);
2024 if (WARN_ON(!conn_state
))
2027 if (mhdp
->hdcp_supported
&&
2028 mhdp
->hw_state
== MHDP_HW_READY
&&
2029 conn_state
->content_protection
==
2030 DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
2031 mutex_unlock(&mhdp
->link_mutex
);
2032 cdns_mhdp_hdcp_enable(mhdp
, conn_state
->hdcp_content_type
);
2033 mutex_lock(&mhdp
->link_mutex
);
2036 crtc_state
= drm_atomic_get_new_crtc_state(state
, conn_state
->crtc
);
2037 if (WARN_ON(!crtc_state
))
2040 mode
= &crtc_state
->adjusted_mode
;
2042 new_state
= drm_atomic_get_new_bridge_state(state
, bridge
);
2043 if (WARN_ON(!new_state
))
2046 if (!cdns_mhdp_bandwidth_ok(mhdp
, mode
, mhdp
->link
.num_lanes
,
2052 cdns_mhdp_sst_enable(mhdp
, mode
);
2054 mhdp_state
= to_cdns_mhdp_bridge_state(new_state
);
2056 mhdp_state
->current_mode
= drm_mode_duplicate(bridge
->dev
, mode
);
2057 if (!mhdp_state
->current_mode
)
2060 drm_mode_set_name(mhdp_state
->current_mode
);
2062 dev_dbg(mhdp
->dev
, "%s: Enabling mode %s\n", __func__
, mode
->name
);
2064 mhdp
->bridge_enabled
= true;
2067 mutex_unlock(&mhdp
->link_mutex
);
2069 schedule_work(&mhdp
->modeset_retry_work
);
2072 static void cdns_mhdp_atomic_disable(struct drm_bridge
*bridge
,
2073 struct drm_bridge_state
*bridge_state
)
2075 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2078 dev_dbg(mhdp
->dev
, "%s\n", __func__
);
2080 mutex_lock(&mhdp
->link_mutex
);
2082 if (mhdp
->hdcp_supported
)
2083 cdns_mhdp_hdcp_disable(mhdp
);
2085 mhdp
->bridge_enabled
= false;
2086 cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, &resp
);
2087 resp
&= ~CDNS_DP_FRAMER_EN
;
2088 resp
|= CDNS_DP_NO_VIDEO_MODE
;
2089 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, resp
);
2091 cdns_mhdp_link_down(mhdp
);
2093 /* Disable VIF clock for stream 0 */
2094 cdns_mhdp_reg_read(mhdp
, CDNS_DPTX_CAR
, &resp
);
2095 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_CAR
,
2096 resp
& ~(CDNS_VIF_CLK_EN
| CDNS_VIF_CLK_RSTN
));
2098 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->disable
)
2099 mhdp
->info
->ops
->disable(mhdp
);
2101 mutex_unlock(&mhdp
->link_mutex
);
2104 static void cdns_mhdp_detach(struct drm_bridge
*bridge
)
2106 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2108 dev_dbg(mhdp
->dev
, "%s\n", __func__
);
2110 drm_dp_aux_unregister(&mhdp
->aux
);
2112 spin_lock(&mhdp
->start_lock
);
2114 mhdp
->bridge_attached
= false;
2116 spin_unlock(&mhdp
->start_lock
);
2118 writel(~0, mhdp
->regs
+ CDNS_APB_INT_MASK
);
2121 static struct drm_bridge_state
*
2122 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge
*bridge
)
2124 struct cdns_mhdp_bridge_state
*state
;
2126 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2130 __drm_atomic_helper_bridge_duplicate_state(bridge
, &state
->base
);
2132 return &state
->base
;
2136 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge
*bridge
,
2137 struct drm_bridge_state
*state
)
2139 struct cdns_mhdp_bridge_state
*cdns_mhdp_state
;
2141 cdns_mhdp_state
= to_cdns_mhdp_bridge_state(state
);
2143 if (cdns_mhdp_state
->current_mode
) {
2144 drm_mode_destroy(bridge
->dev
, cdns_mhdp_state
->current_mode
);
2145 cdns_mhdp_state
->current_mode
= NULL
;
2148 kfree(cdns_mhdp_state
);
2151 static struct drm_bridge_state
*
2152 cdns_mhdp_bridge_atomic_reset(struct drm_bridge
*bridge
)
2154 struct cdns_mhdp_bridge_state
*cdns_mhdp_state
;
2156 cdns_mhdp_state
= kzalloc(sizeof(*cdns_mhdp_state
), GFP_KERNEL
);
2157 if (!cdns_mhdp_state
)
2160 __drm_atomic_helper_bridge_reset(bridge
, &cdns_mhdp_state
->base
);
2162 return &cdns_mhdp_state
->base
;
2165 static u32
*cdns_mhdp_get_input_bus_fmts(struct drm_bridge
*bridge
,
2166 struct drm_bridge_state
*bridge_state
,
2167 struct drm_crtc_state
*crtc_state
,
2168 struct drm_connector_state
*conn_state
,
2170 unsigned int *num_input_fmts
)
2174 *num_input_fmts
= 0;
2176 input_fmts
= kzalloc(sizeof(*input_fmts
), GFP_KERNEL
);
2180 *num_input_fmts
= 1;
2181 input_fmts
[0] = MEDIA_BUS_FMT_RGB121212_1X36
;
2186 static int cdns_mhdp_atomic_check(struct drm_bridge
*bridge
,
2187 struct drm_bridge_state
*bridge_state
,
2188 struct drm_crtc_state
*crtc_state
,
2189 struct drm_connector_state
*conn_state
)
2191 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2192 const struct drm_display_mode
*mode
= &crtc_state
->adjusted_mode
;
2194 mutex_lock(&mhdp
->link_mutex
);
2196 if (!cdns_mhdp_bandwidth_ok(mhdp
, mode
, mhdp
->link
.num_lanes
,
2198 dev_err(mhdp
->dev
, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2199 __func__
, mode
->name
, mhdp
->link
.num_lanes
,
2200 mhdp
->link
.rate
/ 100);
2201 mutex_unlock(&mhdp
->link_mutex
);
2206 * There might be flags negotiation supported in future.
2207 * Set the bus flags in atomic_check statically for now.
2210 bridge_state
->input_bus_cfg
.flags
= *mhdp
->info
->input_bus_flags
;
2212 mutex_unlock(&mhdp
->link_mutex
);
2216 static enum drm_connector_status
cdns_mhdp_bridge_detect(struct drm_bridge
*bridge
)
2218 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2220 return cdns_mhdp_detect(mhdp
);
2223 static const struct drm_edid
*cdns_mhdp_bridge_edid_read(struct drm_bridge
*bridge
,
2224 struct drm_connector
*connector
)
2226 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2228 return cdns_mhdp_edid_read(mhdp
, connector
);
2231 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs
= {
2232 .atomic_enable
= cdns_mhdp_atomic_enable
,
2233 .atomic_disable
= cdns_mhdp_atomic_disable
,
2234 .atomic_check
= cdns_mhdp_atomic_check
,
2235 .attach
= cdns_mhdp_attach
,
2236 .detach
= cdns_mhdp_detach
,
2237 .atomic_duplicate_state
= cdns_mhdp_bridge_atomic_duplicate_state
,
2238 .atomic_destroy_state
= cdns_mhdp_bridge_atomic_destroy_state
,
2239 .atomic_reset
= cdns_mhdp_bridge_atomic_reset
,
2240 .atomic_get_input_bus_fmts
= cdns_mhdp_get_input_bus_fmts
,
2241 .detect
= cdns_mhdp_bridge_detect
,
2242 .edid_read
= cdns_mhdp_bridge_edid_read
,
2243 .hpd_enable
= cdns_mhdp_bridge_hpd_enable
,
2244 .hpd_disable
= cdns_mhdp_bridge_hpd_disable
,
2247 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device
*mhdp
, bool *hpd_pulse
)
2249 int hpd_event
, hpd_status
;
2253 hpd_event
= cdns_mhdp_read_hpd_event(mhdp
);
2255 /* Getting event bits failed, bail out */
2256 if (hpd_event
< 0) {
2257 dev_warn(mhdp
->dev
, "%s: read event failed: %d\n",
2258 __func__
, hpd_event
);
2262 hpd_status
= cdns_mhdp_get_hpd_status(mhdp
);
2263 if (hpd_status
< 0) {
2264 dev_warn(mhdp
->dev
, "%s: get hpd status failed: %d\n",
2265 __func__
, hpd_status
);
2269 if (hpd_event
& DPTX_READ_EVENT_HPD_PULSE
)
2272 return !!hpd_status
;
2275 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device
*mhdp
)
2277 struct cdns_mhdp_bridge_state
*cdns_bridge_state
;
2278 struct drm_display_mode
*current_mode
;
2279 bool old_plugged
= mhdp
->plugged
;
2280 struct drm_bridge_state
*state
;
2281 u8 status
[DP_LINK_STATUS_SIZE
];
2285 mutex_lock(&mhdp
->link_mutex
);
2287 mhdp
->plugged
= cdns_mhdp_detect_hpd(mhdp
, &hpd_pulse
);
2289 if (!mhdp
->plugged
) {
2290 cdns_mhdp_link_down(mhdp
);
2291 mhdp
->link
.rate
= mhdp
->host
.link_rate
;
2292 mhdp
->link
.num_lanes
= mhdp
->host
.lanes_cnt
;
2297 * If we get a HPD pulse event and we were and still are connected,
2298 * check the link status. If link status is ok, there's nothing to do
2299 * as we don't handle DP interrupts. If link status is bad, continue
2300 * with full link setup.
2302 if (hpd_pulse
&& old_plugged
== mhdp
->plugged
) {
2303 ret
= drm_dp_dpcd_read_link_status(&mhdp
->aux
, status
);
2306 * If everything looks fine, just return, as we don't handle
2310 drm_dp_channel_eq_ok(status
, mhdp
->link
.num_lanes
) &&
2311 drm_dp_clock_recovery_ok(status
, mhdp
->link
.num_lanes
))
2314 /* If link is bad, mark link as down so that we do a new LT */
2315 mhdp
->link_up
= false;
2318 if (!mhdp
->link_up
) {
2319 ret
= cdns_mhdp_link_up(mhdp
);
2324 if (mhdp
->bridge_enabled
) {
2325 state
= drm_priv_to_bridge_state(mhdp
->bridge
.base
.state
);
2331 cdns_bridge_state
= to_cdns_mhdp_bridge_state(state
);
2332 if (!cdns_bridge_state
) {
2337 current_mode
= cdns_bridge_state
->current_mode
;
2338 if (!current_mode
) {
2343 if (!cdns_mhdp_bandwidth_ok(mhdp
, current_mode
, mhdp
->link
.num_lanes
,
2349 dev_dbg(mhdp
->dev
, "%s: Enabling mode %s\n", __func__
,
2350 current_mode
->name
);
2352 cdns_mhdp_sst_enable(mhdp
, current_mode
);
2355 mutex_unlock(&mhdp
->link_mutex
);
2359 static void cdns_mhdp_modeset_retry_fn(struct work_struct
*work
)
2361 struct cdns_mhdp_device
*mhdp
;
2362 struct drm_connector
*conn
;
2364 mhdp
= container_of(work
, typeof(*mhdp
), modeset_retry_work
);
2366 conn
= &mhdp
->connector
;
2368 /* Grab the locks before changing connector property */
2369 mutex_lock(&conn
->dev
->mode_config
.mutex
);
2372 * Set connector link status to BAD and send a Uevent to notify
2373 * userspace to do a modeset.
2375 drm_connector_set_link_status_property(conn
, DRM_MODE_LINK_STATUS_BAD
);
2376 mutex_unlock(&conn
->dev
->mode_config
.mutex
);
2378 /* Send Hotplug uevent so userspace can reprobe */
2379 drm_kms_helper_hotplug_event(mhdp
->bridge
.dev
);
2382 static irqreturn_t
cdns_mhdp_irq_handler(int irq
, void *data
)
2384 struct cdns_mhdp_device
*mhdp
= data
;
2385 u32 apb_stat
, sw_ev0
;
2386 bool bridge_attached
;
2388 apb_stat
= readl(mhdp
->regs
+ CDNS_APB_INT_STATUS
);
2389 if (!(apb_stat
& CDNS_APB_INT_MASK_SW_EVENT_INT
))
2392 sw_ev0
= readl(mhdp
->regs
+ CDNS_SW_EVENT0
);
2395 * Calling drm_kms_helper_hotplug_event() when not attached
2396 * to drm device causes an oops because the drm_bridge->dev
2397 * is NULL. See cdns_mhdp_fw_cb() comments for details about the
2398 * problems related drm_kms_helper_hotplug_event() call.
2400 spin_lock(&mhdp
->start_lock
);
2401 bridge_attached
= mhdp
->bridge_attached
;
2402 spin_unlock(&mhdp
->start_lock
);
2404 if (bridge_attached
&& (sw_ev0
& CDNS_DPTX_HPD
)) {
2405 schedule_work(&mhdp
->hpd_work
);
2408 if (sw_ev0
& ~CDNS_DPTX_HPD
) {
2409 mhdp
->sw_events
|= (sw_ev0
& ~CDNS_DPTX_HPD
);
2410 wake_up(&mhdp
->sw_events_wq
);
2416 u32
cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device
*mhdp
, u32 event
)
2420 ret
= wait_event_timeout(mhdp
->sw_events_wq
,
2421 mhdp
->sw_events
& event
,
2422 msecs_to_jiffies(500));
2424 dev_dbg(mhdp
->dev
, "SW event 0x%x timeout\n", event
);
2428 ret
= mhdp
->sw_events
;
2429 mhdp
->sw_events
&= ~event
;
2435 static void cdns_mhdp_hpd_work(struct work_struct
*work
)
2437 struct cdns_mhdp_device
*mhdp
= container_of(work
,
2438 struct cdns_mhdp_device
,
2442 ret
= cdns_mhdp_update_link_status(mhdp
);
2443 if (mhdp
->connector
.dev
) {
2445 schedule_work(&mhdp
->modeset_retry_work
);
2447 drm_kms_helper_hotplug_event(mhdp
->bridge
.dev
);
2449 drm_bridge_hpd_notify(&mhdp
->bridge
, cdns_mhdp_detect(mhdp
));
2453 static int cdns_mhdp_probe(struct platform_device
*pdev
)
2455 struct device
*dev
= &pdev
->dev
;
2456 struct cdns_mhdp_device
*mhdp
;
2462 mhdp
= devm_kzalloc(dev
, sizeof(*mhdp
), GFP_KERNEL
);
2466 clk
= devm_clk_get(dev
, NULL
);
2468 dev_err(dev
, "couldn't get clk: %ld\n", PTR_ERR(clk
));
2469 return PTR_ERR(clk
);
2474 mutex_init(&mhdp
->mbox_mutex
);
2475 mutex_init(&mhdp
->link_mutex
);
2476 spin_lock_init(&mhdp
->start_lock
);
2478 drm_dp_aux_init(&mhdp
->aux
);
2479 mhdp
->aux
.dev
= dev
;
2480 mhdp
->aux
.transfer
= cdns_mhdp_transfer
;
2482 mhdp
->regs
= devm_platform_ioremap_resource(pdev
, 0);
2483 if (IS_ERR(mhdp
->regs
)) {
2484 dev_err(dev
, "Failed to get memory resource\n");
2485 return PTR_ERR(mhdp
->regs
);
2488 mhdp
->sapb_regs
= devm_platform_ioremap_resource_byname(pdev
, "mhdptx-sapb");
2489 if (IS_ERR(mhdp
->sapb_regs
)) {
2490 mhdp
->hdcp_supported
= false;
2492 "Failed to get SAPB memory resource, HDCP not supported\n");
2494 mhdp
->hdcp_supported
= true;
2497 mhdp
->phy
= devm_of_phy_get_by_index(dev
, pdev
->dev
.of_node
, 0);
2498 if (IS_ERR(mhdp
->phy
)) {
2499 dev_err(dev
, "no PHY configured\n");
2500 return PTR_ERR(mhdp
->phy
);
2503 platform_set_drvdata(pdev
, mhdp
);
2505 mhdp
->info
= of_device_get_match_data(dev
);
2507 clk_prepare_enable(clk
);
2509 pm_runtime_enable(dev
);
2510 ret
= pm_runtime_resume_and_get(dev
);
2512 dev_err(dev
, "pm_runtime_resume_and_get failed\n");
2513 pm_runtime_disable(dev
);
2517 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->init
) {
2518 ret
= mhdp
->info
->ops
->init(mhdp
);
2520 dev_err(dev
, "MHDP platform initialization failed: %d\n",
2526 rate
= clk_get_rate(clk
);
2527 writel(rate
% 1000000, mhdp
->regs
+ CDNS_SW_CLK_L
);
2528 writel(rate
/ 1000000, mhdp
->regs
+ CDNS_SW_CLK_H
);
2530 dev_dbg(dev
, "func clk rate %lu Hz\n", rate
);
2532 writel(~0, mhdp
->regs
+ CDNS_APB_INT_MASK
);
2534 irq
= platform_get_irq(pdev
, 0);
2535 ret
= devm_request_threaded_irq(mhdp
->dev
, irq
, NULL
,
2536 cdns_mhdp_irq_handler
, IRQF_ONESHOT
,
2539 dev_err(dev
, "cannot install IRQ %d\n", irq
);
2544 cdns_mhdp_fill_host_caps(mhdp
);
2546 /* Initialize link rate and num of lanes to host values */
2547 mhdp
->link
.rate
= mhdp
->host
.link_rate
;
2548 mhdp
->link
.num_lanes
= mhdp
->host
.lanes_cnt
;
2550 /* The only currently supported format */
2551 mhdp
->display_fmt
.y_only
= false;
2552 mhdp
->display_fmt
.color_format
= DRM_COLOR_FORMAT_RGB444
;
2553 mhdp
->display_fmt
.bpc
= 8;
2555 mhdp
->bridge
.of_node
= pdev
->dev
.of_node
;
2556 mhdp
->bridge
.funcs
= &cdns_mhdp_bridge_funcs
;
2557 mhdp
->bridge
.ops
= DRM_BRIDGE_OP_DETECT
| DRM_BRIDGE_OP_EDID
|
2559 mhdp
->bridge
.type
= DRM_MODE_CONNECTOR_DisplayPort
;
2561 ret
= phy_init(mhdp
->phy
);
2563 dev_err(mhdp
->dev
, "Failed to initialize PHY: %d\n", ret
);
2567 /* Initialize the work for modeset in case of link train failure */
2568 INIT_WORK(&mhdp
->modeset_retry_work
, cdns_mhdp_modeset_retry_fn
);
2569 INIT_WORK(&mhdp
->hpd_work
, cdns_mhdp_hpd_work
);
2571 init_waitqueue_head(&mhdp
->fw_load_wq
);
2572 init_waitqueue_head(&mhdp
->sw_events_wq
);
2574 ret
= cdns_mhdp_load_firmware(mhdp
);
2578 if (mhdp
->hdcp_supported
)
2579 cdns_mhdp_hdcp_init(mhdp
);
2581 drm_bridge_add(&mhdp
->bridge
);
2586 phy_exit(mhdp
->phy
);
2588 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->exit
)
2589 mhdp
->info
->ops
->exit(mhdp
);
2591 pm_runtime_put_sync(dev
);
2592 pm_runtime_disable(dev
);
2594 clk_disable_unprepare(mhdp
->clk
);
2599 static void cdns_mhdp_remove(struct platform_device
*pdev
)
2601 struct cdns_mhdp_device
*mhdp
= platform_get_drvdata(pdev
);
2602 unsigned long timeout
= msecs_to_jiffies(100);
2605 drm_bridge_remove(&mhdp
->bridge
);
2607 ret
= wait_event_timeout(mhdp
->fw_load_wq
,
2608 mhdp
->hw_state
== MHDP_HW_READY
,
2610 spin_lock(&mhdp
->start_lock
);
2611 mhdp
->hw_state
= MHDP_HW_STOPPED
;
2612 spin_unlock(&mhdp
->start_lock
);
2615 dev_err(mhdp
->dev
, "%s: Timeout waiting for fw loading\n",
2618 ret
= cdns_mhdp_set_firmware_active(mhdp
, false);
2620 dev_err(mhdp
->dev
, "Failed to stop firmware (%pe)\n",
2624 phy_exit(mhdp
->phy
);
2626 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->exit
)
2627 mhdp
->info
->ops
->exit(mhdp
);
2629 pm_runtime_put_sync(&pdev
->dev
);
2630 pm_runtime_disable(&pdev
->dev
);
2632 cancel_work_sync(&mhdp
->modeset_retry_work
);
2633 flush_work(&mhdp
->hpd_work
);
2634 /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
2636 clk_disable_unprepare(mhdp
->clk
);
2639 static const struct of_device_id mhdp_ids
[] = {
2640 { .compatible
= "cdns,mhdp8546", },
2641 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2642 { .compatible
= "ti,j721e-mhdp8546",
2643 .data
= &(const struct cdns_mhdp_platform_info
) {
2644 .input_bus_flags
= &mhdp_ti_j721e_bridge_input_bus_flags
,
2645 .ops
= &mhdp_ti_j721e_ops
,
2651 MODULE_DEVICE_TABLE(of
, mhdp_ids
);
2653 static struct platform_driver mhdp_driver
= {
2655 .name
= "cdns-mhdp8546",
2656 .of_match_table
= mhdp_ids
,
2658 .probe
= cdns_mhdp_probe
,
2659 .remove
= cdns_mhdp_remove
,
2661 module_platform_driver(mhdp_driver
);
2663 MODULE_FIRMWARE(FW_NAME
);
2665 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2666 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2667 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2668 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2669 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2670 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2671 MODULE_LICENSE("GPL");
2672 MODULE_ALIAS("platform:cdns-mhdp8546");