1 // SPDX-License-Identifier: GPL-2.0
3 * Cadence MHDP8546 DP bridge driver.
5 * Copyright (C) 2020 Cadence Design Systems, Inc.
7 * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8 * Swapnil Jakhade <sjakhade@cadence.com>
9 * Yuti Amonkar <yamonkar@cadence.com>
10 * Tomi Valkeinen <tomi.valkeinen@ti.com>
11 * Jyri Sarha <jsarha@ti.com>
14 * - Implement optimized mailbox communication using mailbox interrupts
15 * - Add support for power management
16 * - Add support for features like audio, MST and fast link training
17 * - Implement request_fw_cancel to handle HW_STATE
18 * - Fix asynchronous loading of firmware implementation
19 * - Add DRM helper function for cdns_mhdp_lower_link_rate
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
31 #include <linux/of_device.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/phy-dp.h>
34 #include <linux/platform_device.h>
35 #include <linux/slab.h>
36 #include <linux/wait.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_state_helper.h>
41 #include <drm/drm_bridge.h>
42 #include <drm/drm_connector.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_dp_helper.h>
45 #include <drm/drm_modeset_helper_vtables.h>
46 #include <drm/drm_print.h>
47 #include <drm/drm_probe_helper.h>
49 #include <asm/unaligned.h>
51 #include "cdns-mhdp8546-core.h"
53 #include "cdns-mhdp8546-j721e.h"
55 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device
*mhdp
)
59 WARN_ON(!mutex_is_locked(&mhdp
->mbox_mutex
));
61 ret
= readx_poll_timeout(readl
, mhdp
->regs
+ CDNS_MAILBOX_EMPTY
,
62 empty
, !empty
, MAILBOX_RETRY_US
,
67 return readl(mhdp
->regs
+ CDNS_MAILBOX_RX_DATA
) & 0xff;
70 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device
*mhdp
, u8 val
)
74 WARN_ON(!mutex_is_locked(&mhdp
->mbox_mutex
));
76 ret
= readx_poll_timeout(readl
, mhdp
->regs
+ CDNS_MAILBOX_FULL
,
77 full
, !full
, MAILBOX_RETRY_US
,
82 writel(val
, mhdp
->regs
+ CDNS_MAILBOX_TX_DATA
);
87 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device
*mhdp
,
88 u8 module_id
, u8 opcode
,
95 /* read the header of the message */
96 for (i
= 0; i
< sizeof(header
); i
++) {
97 ret
= cdns_mhdp_mailbox_read(mhdp
);
104 mbox_size
= get_unaligned_be16(header
+ 2);
106 if (opcode
!= header
[0] || module_id
!= header
[1] ||
107 req_size
!= mbox_size
) {
109 * If the message in mailbox is not what we want, we need to
110 * clear the mailbox by reading its contents.
112 for (i
= 0; i
< mbox_size
; i
++)
113 if (cdns_mhdp_mailbox_read(mhdp
) < 0)
122 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device
*mhdp
,
123 u8
*buff
, u16 buff_size
)
128 for (i
= 0; i
< buff_size
; i
++) {
129 ret
= cdns_mhdp_mailbox_read(mhdp
);
139 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device
*mhdp
, u8 module_id
,
140 u8 opcode
, u16 size
, u8
*message
)
146 header
[1] = module_id
;
147 put_unaligned_be16(size
, header
+ 2);
149 for (i
= 0; i
< sizeof(header
); i
++) {
150 ret
= cdns_mhdp_mailbox_write(mhdp
, header
[i
]);
155 for (i
= 0; i
< size
; i
++) {
156 ret
= cdns_mhdp_mailbox_write(mhdp
, message
[i
]);
165 int cdns_mhdp_reg_read(struct cdns_mhdp_device
*mhdp
, u32 addr
, u32
*value
)
170 put_unaligned_be32(addr
, msg
);
172 mutex_lock(&mhdp
->mbox_mutex
);
174 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_GENERAL
,
175 GENERAL_REGISTER_READ
,
180 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_GENERAL
,
181 GENERAL_REGISTER_READ
,
186 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, resp
, sizeof(resp
));
190 /* Returned address value should be the same as requested */
191 if (memcmp(msg
, resp
, sizeof(msg
))) {
196 *value
= get_unaligned_be32(resp
+ 4);
199 mutex_unlock(&mhdp
->mbox_mutex
);
201 dev_err(mhdp
->dev
, "Failed to read register\n");
209 int cdns_mhdp_reg_write(struct cdns_mhdp_device
*mhdp
, u16 addr
, u32 val
)
214 put_unaligned_be16(addr
, msg
);
215 put_unaligned_be32(val
, msg
+ 2);
217 mutex_lock(&mhdp
->mbox_mutex
);
219 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
220 DPTX_WRITE_REGISTER
, sizeof(msg
), msg
);
222 mutex_unlock(&mhdp
->mbox_mutex
);
228 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device
*mhdp
, u16 addr
,
229 u8 start_bit
, u8 bits_no
, u32 val
)
234 put_unaligned_be16(addr
, field
);
235 field
[2] = start_bit
;
237 put_unaligned_be32(val
, field
+ 4);
239 mutex_lock(&mhdp
->mbox_mutex
);
241 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
242 DPTX_WRITE_FIELD
, sizeof(field
), field
);
244 mutex_unlock(&mhdp
->mbox_mutex
);
250 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device
*mhdp
,
251 u32 addr
, u8
*data
, u16 len
)
256 put_unaligned_be16(len
, msg
);
257 put_unaligned_be24(addr
, msg
+ 2);
259 mutex_lock(&mhdp
->mbox_mutex
);
261 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
262 DPTX_READ_DPCD
, sizeof(msg
), msg
);
266 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
272 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, reg
, sizeof(reg
));
276 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, data
, len
);
279 mutex_unlock(&mhdp
->mbox_mutex
);
285 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device
*mhdp
, u32 addr
, u8 value
)
290 put_unaligned_be16(1, msg
);
291 put_unaligned_be24(addr
, msg
+ 2);
294 mutex_lock(&mhdp
->mbox_mutex
);
296 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
297 DPTX_WRITE_DPCD
, sizeof(msg
), msg
);
301 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
302 DPTX_WRITE_DPCD
, sizeof(reg
));
306 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, reg
, sizeof(reg
));
310 if (addr
!= get_unaligned_be24(reg
+ 2))
314 mutex_unlock(&mhdp
->mbox_mutex
);
317 dev_err(mhdp
->dev
, "dpcd write failed: %d\n", ret
);
322 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device
*mhdp
, bool enable
)
327 msg
[0] = GENERAL_MAIN_CONTROL
;
328 msg
[1] = MB_MODULE_ID_GENERAL
;
331 msg
[4] = enable
? FW_ACTIVE
: FW_STANDBY
;
333 mutex_lock(&mhdp
->mbox_mutex
);
335 for (i
= 0; i
< sizeof(msg
); i
++) {
336 ret
= cdns_mhdp_mailbox_write(mhdp
, msg
[i
]);
341 /* read the firmware state */
342 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, msg
, sizeof(msg
));
349 mutex_unlock(&mhdp
->mbox_mutex
);
352 dev_err(mhdp
->dev
, "set firmware active failed\n");
357 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device
*mhdp
)
362 mutex_lock(&mhdp
->mbox_mutex
);
364 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
365 DPTX_HPD_STATE
, 0, NULL
);
369 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
375 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, &status
, sizeof(status
));
379 mutex_unlock(&mhdp
->mbox_mutex
);
381 dev_dbg(mhdp
->dev
, "%s: HPD %splugged\n", __func__
,
387 mutex_unlock(&mhdp
->mbox_mutex
);
393 int cdns_mhdp_get_edid_block(void *data
, u8
*edid
,
394 unsigned int block
, size_t length
)
396 struct cdns_mhdp_device
*mhdp
= data
;
397 u8 msg
[2], reg
[2], i
;
400 mutex_lock(&mhdp
->mbox_mutex
);
402 for (i
= 0; i
< 4; i
++) {
406 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
407 DPTX_GET_EDID
, sizeof(msg
), msg
);
411 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
413 sizeof(reg
) + length
);
417 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, reg
, sizeof(reg
));
421 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, edid
, length
);
425 if (reg
[0] == length
&& reg
[1] == block
/ 2)
429 mutex_unlock(&mhdp
->mbox_mutex
);
432 dev_err(mhdp
->dev
, "get block[%d] edid failed: %d\n",
439 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device
*mhdp
)
444 mutex_lock(&mhdp
->mbox_mutex
);
446 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
447 DPTX_READ_EVENT
, 0, NULL
);
451 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
452 DPTX_READ_EVENT
, sizeof(event
));
456 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, &event
, sizeof(event
));
458 mutex_unlock(&mhdp
->mbox_mutex
);
463 dev_dbg(mhdp
->dev
, "%s: %s%s%s%s\n", __func__
,
464 (event
& DPTX_READ_EVENT_HPD_TO_HIGH
) ? "TO_HIGH " : "",
465 (event
& DPTX_READ_EVENT_HPD_TO_LOW
) ? "TO_LOW " : "",
466 (event
& DPTX_READ_EVENT_HPD_PULSE
) ? "PULSE " : "",
467 (event
& DPTX_READ_EVENT_HPD_STATE
) ? "HPD_STATE " : "");
473 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device
*mhdp
, unsigned int nlanes
,
474 unsigned int udelay
, const u8
*lanes_data
,
475 u8 link_status
[DP_LINK_STATUS_SIZE
])
478 u8 hdr
[5]; /* For DPCD read response header */
482 if (nlanes
!= 4 && nlanes
!= 2 && nlanes
!= 1) {
483 dev_err(mhdp
->dev
, "invalid number of lanes: %u\n", nlanes
);
489 put_unaligned_be16(udelay
, payload
+ 1);
490 memcpy(payload
+ 3, lanes_data
, nlanes
);
492 mutex_lock(&mhdp
->mbox_mutex
);
494 ret
= cdns_mhdp_mailbox_send(mhdp
, MB_MODULE_ID_DP_TX
,
496 sizeof(payload
), payload
);
500 /* Yes, read the DPCD read command response */
501 ret
= cdns_mhdp_mailbox_recv_header(mhdp
, MB_MODULE_ID_DP_TX
,
503 sizeof(hdr
) + DP_LINK_STATUS_SIZE
);
507 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, hdr
, sizeof(hdr
));
511 addr
= get_unaligned_be24(hdr
+ 2);
512 if (addr
!= DP_LANE0_1_STATUS
)
515 ret
= cdns_mhdp_mailbox_recv_data(mhdp
, link_status
,
516 DP_LINK_STATUS_SIZE
);
519 mutex_unlock(&mhdp
->mbox_mutex
);
522 dev_err(mhdp
->dev
, "Failed to adjust Link Training.\n");
528 * cdns_mhdp_link_power_up() - power up a DisplayPort link
529 * @aux: DisplayPort AUX channel
530 * @link: pointer to a structure containing the link configuration
532 * Returns 0 on success or a negative error code on failure.
535 int cdns_mhdp_link_power_up(struct drm_dp_aux
*aux
, struct cdns_mhdp_link
*link
)
540 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
541 if (link
->revision
< 0x11)
544 err
= drm_dp_dpcd_readb(aux
, DP_SET_POWER
, &value
);
548 value
&= ~DP_SET_POWER_MASK
;
549 value
|= DP_SET_POWER_D0
;
551 err
= drm_dp_dpcd_writeb(aux
, DP_SET_POWER
, value
);
556 * According to the DP 1.1 specification, a "Sink Device must exit the
557 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
558 * Control Field" (register 0x600).
560 usleep_range(1000, 2000);
566 * cdns_mhdp_link_power_down() - power down a DisplayPort link
567 * @aux: DisplayPort AUX channel
568 * @link: pointer to a structure containing the link configuration
570 * Returns 0 on success or a negative error code on failure.
573 int cdns_mhdp_link_power_down(struct drm_dp_aux
*aux
,
574 struct cdns_mhdp_link
*link
)
579 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
580 if (link
->revision
< 0x11)
583 err
= drm_dp_dpcd_readb(aux
, DP_SET_POWER
, &value
);
587 value
&= ~DP_SET_POWER_MASK
;
588 value
|= DP_SET_POWER_D3
;
590 err
= drm_dp_dpcd_writeb(aux
, DP_SET_POWER
, value
);
598 * cdns_mhdp_link_configure() - configure a DisplayPort link
599 * @aux: DisplayPort AUX channel
600 * @link: pointer to a structure containing the link configuration
602 * Returns 0 on success or a negative error code on failure.
605 int cdns_mhdp_link_configure(struct drm_dp_aux
*aux
,
606 struct cdns_mhdp_link
*link
)
611 values
[0] = drm_dp_link_rate_to_bw_code(link
->rate
);
612 values
[1] = link
->num_lanes
;
614 if (link
->capabilities
& DP_LINK_CAP_ENHANCED_FRAMING
)
615 values
[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN
;
617 err
= drm_dp_dpcd_write(aux
, DP_LINK_BW_SET
, values
, sizeof(values
));
624 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device
*mhdp
)
626 return min(mhdp
->host
.link_rate
, mhdp
->sink
.link_rate
);
629 static u8
cdns_mhdp_max_num_lanes(struct cdns_mhdp_device
*mhdp
)
631 return min(mhdp
->sink
.lanes_cnt
, mhdp
->host
.lanes_cnt
);
634 static u8
cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device
*mhdp
)
636 return fls(mhdp
->host
.pattern_supp
& mhdp
->sink
.pattern_supp
);
639 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device
*mhdp
)
641 /* Check if SSC is supported by both sides */
642 return mhdp
->host
.ssc
&& mhdp
->sink
.ssc
;
645 static enum drm_connector_status
cdns_mhdp_detect(struct cdns_mhdp_device
*mhdp
)
647 dev_dbg(mhdp
->dev
, "%s: %d\n", __func__
, mhdp
->plugged
);
650 return connector_status_connected
;
652 return connector_status_disconnected
;
655 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device
*mhdp
)
657 u32 major_num
, minor_num
, revision
;
660 fw_ver
= (readl(mhdp
->regs
+ CDNS_VER_H
) << 8)
661 | readl(mhdp
->regs
+ CDNS_VER_L
);
663 lib_ver
= (readl(mhdp
->regs
+ CDNS_LIB_H_ADDR
) << 8)
664 | readl(mhdp
->regs
+ CDNS_LIB_L_ADDR
);
666 if (lib_ver
< 33984) {
668 * Older FW versions with major number 1, used to store FW
669 * version information by storing repository revision number
670 * in registers. This is for identifying these FW versions.
674 if (fw_ver
== 26098) {
676 } else if (lib_ver
== 0 && fw_ver
== 0) {
679 dev_err(mhdp
->dev
, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
684 /* To identify newer FW versions with major number 2 onwards. */
685 major_num
= fw_ver
/ 10000;
686 minor_num
= (fw_ver
/ 100) % 100;
687 revision
= (fw_ver
% 10000) % 100;
690 dev_dbg(mhdp
->dev
, "FW version: v%u.%u.%u\n", major_num
, minor_num
,
695 static int cdns_mhdp_fw_activate(const struct firmware
*fw
,
696 struct cdns_mhdp_device
*mhdp
)
701 /* Release uCPU reset and stall it. */
702 writel(CDNS_CPU_STALL
, mhdp
->regs
+ CDNS_APB_CTRL
);
704 memcpy_toio(mhdp
->regs
+ CDNS_MHDP_IMEM
, fw
->data
, fw
->size
);
706 /* Leave debug mode, release stall */
707 writel(0, mhdp
->regs
+ CDNS_APB_CTRL
);
710 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
711 * Updated each sched "tick" (~2ms)
713 ret
= readl_poll_timeout(mhdp
->regs
+ CDNS_KEEP_ALIVE
, reg
,
714 reg
& CDNS_KEEP_ALIVE_MASK
, 500,
715 CDNS_KEEP_ALIVE_TIMEOUT
);
718 "device didn't give any life sign: reg %d\n", reg
);
722 ret
= cdns_mhdp_check_fw_version(mhdp
);
726 /* Init events to 0 as it's not cleared by FW at boot but on read */
727 readl(mhdp
->regs
+ CDNS_SW_EVENT0
);
728 readl(mhdp
->regs
+ CDNS_SW_EVENT1
);
729 readl(mhdp
->regs
+ CDNS_SW_EVENT2
);
730 readl(mhdp
->regs
+ CDNS_SW_EVENT3
);
733 ret
= cdns_mhdp_set_firmware_active(mhdp
, true);
737 spin_lock(&mhdp
->start_lock
);
739 mhdp
->hw_state
= MHDP_HW_READY
;
742 * Here we must keep the lock while enabling the interrupts
743 * since it would otherwise be possible that interrupt enable
744 * code is executed after the bridge is detached. The similar
745 * situation is not possible in attach()/detach() callbacks
746 * since the hw_state changes from MHDP_HW_READY to
747 * MHDP_HW_STOPPED happens only due to driver removal when
748 * bridge should already be detached.
750 if (mhdp
->bridge_attached
)
751 writel(~(u32
)CDNS_APB_INT_MASK_SW_EVENT_INT
,
752 mhdp
->regs
+ CDNS_APB_INT_MASK
);
754 spin_unlock(&mhdp
->start_lock
);
756 wake_up(&mhdp
->fw_load_wq
);
757 dev_dbg(mhdp
->dev
, "DP FW activated\n");
762 static void cdns_mhdp_fw_cb(const struct firmware
*fw
, void *context
)
764 struct cdns_mhdp_device
*mhdp
= context
;
765 bool bridge_attached
;
768 dev_dbg(mhdp
->dev
, "firmware callback\n");
770 if (!fw
|| !fw
->data
) {
771 dev_err(mhdp
->dev
, "%s: No firmware.\n", __func__
);
775 ret
= cdns_mhdp_fw_activate(fw
, mhdp
);
777 release_firmware(fw
);
783 * XXX how to make sure the bridge is still attached when
784 * calling drm_kms_helper_hotplug_event() after releasing
785 * the lock? We should not hold the spin lock when
786 * calling drm_kms_helper_hotplug_event() since it may
787 * cause a dead lock. FB-dev console calls detect from the
788 * same thread just down the call stack started here.
790 spin_lock(&mhdp
->start_lock
);
791 bridge_attached
= mhdp
->bridge_attached
;
792 spin_unlock(&mhdp
->start_lock
);
793 if (bridge_attached
) {
794 if (mhdp
->connector
.dev
)
795 drm_kms_helper_hotplug_event(mhdp
->bridge
.dev
);
797 drm_bridge_hpd_notify(&mhdp
->bridge
, cdns_mhdp_detect(mhdp
));
801 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device
*mhdp
)
805 ret
= request_firmware_nowait(THIS_MODULE
, true, FW_NAME
, mhdp
->dev
,
806 GFP_KERNEL
, mhdp
, cdns_mhdp_fw_cb
);
808 dev_err(mhdp
->dev
, "failed to load firmware (%s), ret: %d\n",
816 static ssize_t
cdns_mhdp_transfer(struct drm_dp_aux
*aux
,
817 struct drm_dp_aux_msg
*msg
)
819 struct cdns_mhdp_device
*mhdp
= dev_get_drvdata(aux
->dev
);
822 if (msg
->request
!= DP_AUX_NATIVE_WRITE
&&
823 msg
->request
!= DP_AUX_NATIVE_READ
)
826 if (msg
->request
== DP_AUX_NATIVE_WRITE
) {
827 const u8
*buf
= msg
->buffer
;
830 for (i
= 0; i
< msg
->size
; ++i
) {
831 ret
= cdns_mhdp_dpcd_write(mhdp
,
832 msg
->address
+ i
, buf
[i
]);
837 "Failed to write DPCD addr %u\n",
843 ret
= cdns_mhdp_dpcd_read(mhdp
, msg
->address
,
844 msg
->buffer
, msg
->size
);
847 "Failed to read DPCD addr %u\n",
857 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device
*mhdp
)
859 union phy_configure_opts phy_cfg
;
863 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
864 DP_TRAINING_PATTERN_DISABLE
);
866 /* Reset PHY configuration */
867 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_TYPE(1);
868 if (!mhdp
->host
.scrambler
)
869 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
871 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
873 cdns_mhdp_reg_write(mhdp
, CDNS_DP_ENHNCD
,
874 mhdp
->sink
.enhanced
& mhdp
->host
.enhanced
);
876 cdns_mhdp_reg_write(mhdp
, CDNS_DP_LANE_EN
,
877 CDNS_DP_LANE_EN_LANES(mhdp
->link
.num_lanes
));
879 cdns_mhdp_link_configure(&mhdp
->aux
, &mhdp
->link
);
880 phy_cfg
.dp
.link_rate
= mhdp
->link
.rate
/ 100;
881 phy_cfg
.dp
.lanes
= mhdp
->link
.num_lanes
;
883 memset(phy_cfg
.dp
.voltage
, 0, sizeof(phy_cfg
.dp
.voltage
));
884 memset(phy_cfg
.dp
.pre
, 0, sizeof(phy_cfg
.dp
.pre
));
886 phy_cfg
.dp
.ssc
= cdns_mhdp_get_ssc_supported(mhdp
);
887 phy_cfg
.dp
.set_lanes
= true;
888 phy_cfg
.dp
.set_rate
= true;
889 phy_cfg
.dp
.set_voltages
= true;
890 ret
= phy_configure(mhdp
->phy
, &phy_cfg
);
892 dev_err(mhdp
->dev
, "%s: phy_configure() failed: %d\n",
897 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
,
898 CDNS_PHY_COMMON_CONFIG
|
899 CDNS_PHY_TRAINING_EN
|
900 CDNS_PHY_TRAINING_TYPE(1) |
901 CDNS_PHY_SCRAMBLER_BYPASS
);
903 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
904 DP_TRAINING_PATTERN_1
| DP_LINK_SCRAMBLING_DISABLE
);
909 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device
*mhdp
,
910 u8 link_status
[DP_LINK_STATUS_SIZE
],
911 u8 lanes_data
[CDNS_DP_MAX_NUM_LANES
],
912 union phy_configure_opts
*phy_cfg
)
914 u8 adjust
, max_pre_emph
, max_volt_swing
;
915 u8 set_volt
, set_pre
;
918 max_pre_emph
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
)
919 << DP_TRAIN_PRE_EMPHASIS_SHIFT
;
920 max_volt_swing
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
922 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
923 /* Check if Voltage swing and pre-emphasis are within limits */
924 adjust
= drm_dp_get_adjust_request_voltage(link_status
, i
);
925 set_volt
= min(adjust
, max_volt_swing
);
927 adjust
= drm_dp_get_adjust_request_pre_emphasis(link_status
, i
);
928 set_pre
= min(adjust
, max_pre_emph
)
929 >> DP_TRAIN_PRE_EMPHASIS_SHIFT
;
932 * Voltage swing level and pre-emphasis level combination is
933 * not allowed: leaving pre-emphasis as-is, and adjusting
936 if (set_volt
+ set_pre
> 3)
937 set_volt
= 3 - set_pre
;
939 phy_cfg
->dp
.voltage
[i
] = set_volt
;
940 lanes_data
[i
] = set_volt
;
942 if (set_volt
== max_volt_swing
)
943 lanes_data
[i
] |= DP_TRAIN_MAX_SWING_REACHED
;
945 phy_cfg
->dp
.pre
[i
] = set_pre
;
946 lanes_data
[i
] |= (set_pre
<< DP_TRAIN_PRE_EMPHASIS_SHIFT
);
948 if (set_pre
== (max_pre_emph
>> DP_TRAIN_PRE_EMPHASIS_SHIFT
))
949 lanes_data
[i
] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED
;
954 void cdns_mhdp_set_adjust_request_voltage(u8 link_status
[DP_LINK_STATUS_SIZE
],
955 unsigned int lane
, u8 volt
)
957 unsigned int s
= ((lane
& 1) ?
958 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT
:
959 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT
);
960 unsigned int idx
= DP_ADJUST_REQUEST_LANE0_1
- DP_LANE0_1_STATUS
+ (lane
>> 1);
962 link_status
[idx
] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK
<< s
);
963 link_status
[idx
] |= volt
<< s
;
967 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status
[DP_LINK_STATUS_SIZE
],
968 unsigned int lane
, u8 pre_emphasis
)
970 unsigned int s
= ((lane
& 1) ?
971 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT
:
972 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT
);
973 unsigned int idx
= DP_ADJUST_REQUEST_LANE0_1
- DP_LANE0_1_STATUS
+ (lane
>> 1);
975 link_status
[idx
] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK
<< s
);
976 link_status
[idx
] |= pre_emphasis
<< s
;
979 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device
*mhdp
,
980 u8 link_status
[DP_LINK_STATUS_SIZE
])
982 u8 max_pre
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
);
983 u8 max_volt
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
987 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
988 volt
= drm_dp_get_adjust_request_voltage(link_status
, i
);
989 pre
= drm_dp_get_adjust_request_pre_emphasis(link_status
, i
);
991 cdns_mhdp_set_adjust_request_voltage(link_status
, i
,
993 if (mhdp
->host
.volt_swing
& CDNS_FORCE_VOLT_SWING
)
994 cdns_mhdp_set_adjust_request_voltage(link_status
, i
,
996 if (mhdp
->host
.pre_emphasis
& CDNS_FORCE_PRE_EMPHASIS
)
997 cdns_mhdp_set_adjust_request_pre_emphasis(link_status
,
1002 static void cdns_mhdp_print_lt_status(const char *prefix
,
1003 struct cdns_mhdp_device
*mhdp
,
1004 union phy_configure_opts
*phy_cfg
)
1006 char vs
[8] = "0/0/0/0";
1007 char pe
[8] = "0/0/0/0";
1010 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1011 vs
[i
* 2] = '0' + phy_cfg
->dp
.voltage
[i
];
1012 pe
[i
* 2] = '0' + phy_cfg
->dp
.pre
[i
];
1015 vs
[i
* 2 - 1] = '\0';
1016 pe
[i
* 2 - 1] = '\0';
1018 dev_dbg(mhdp
->dev
, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1020 mhdp
->link
.num_lanes
, mhdp
->link
.rate
/ 100,
1024 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device
*mhdp
,
1026 unsigned int training_interval
)
1028 u8 lanes_data
[CDNS_DP_MAX_NUM_LANES
], fail_counter_short
= 0;
1029 u8 link_status
[DP_LINK_STATUS_SIZE
];
1030 union phy_configure_opts phy_cfg
;
1035 dev_dbg(mhdp
->dev
, "Starting EQ phase\n");
1037 /* Enable link training TPS[eq_tps] in PHY */
1038 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_EN
|
1039 CDNS_PHY_TRAINING_TYPE(eq_tps
);
1041 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
1042 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
1044 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
1045 (eq_tps
!= 4) ? eq_tps
| DP_LINK_SCRAMBLING_DISABLE
:
1046 CDNS_DP_TRAINING_PATTERN_4
);
1048 drm_dp_dpcd_read_link_status(&mhdp
->aux
, link_status
);
1051 cdns_mhdp_get_adjust_train(mhdp
, link_status
, lanes_data
,
1053 phy_cfg
.dp
.lanes
= mhdp
->link
.num_lanes
;
1054 phy_cfg
.dp
.ssc
= cdns_mhdp_get_ssc_supported(mhdp
);
1055 phy_cfg
.dp
.set_lanes
= false;
1056 phy_cfg
.dp
.set_rate
= false;
1057 phy_cfg
.dp
.set_voltages
= true;
1058 ret
= phy_configure(mhdp
->phy
, &phy_cfg
);
1060 dev_err(mhdp
->dev
, "%s: phy_configure() failed: %d\n",
1065 cdns_mhdp_adjust_lt(mhdp
, mhdp
->link
.num_lanes
,
1066 training_interval
, lanes_data
, link_status
);
1068 r
= drm_dp_clock_recovery_ok(link_status
, mhdp
->link
.num_lanes
);
1072 if (drm_dp_channel_eq_ok(link_status
, mhdp
->link
.num_lanes
)) {
1073 cdns_mhdp_print_lt_status("EQ phase ok", mhdp
,
1078 fail_counter_short
++;
1080 cdns_mhdp_adjust_requested_eq(mhdp
, link_status
);
1081 } while (fail_counter_short
< 5);
1084 cdns_mhdp_print_lt_status("EQ phase failed", mhdp
, &phy_cfg
);
1089 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device
*mhdp
,
1090 u8 link_status
[DP_LINK_STATUS_SIZE
],
1091 u8
*req_volt
, u8
*req_pre
)
1093 const u8 max_volt
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
1094 const u8 max_pre
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
);
1097 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1100 val
= mhdp
->host
.volt_swing
& CDNS_FORCE_VOLT_SWING
?
1101 max_volt
: req_volt
[i
];
1102 cdns_mhdp_set_adjust_request_voltage(link_status
, i
, val
);
1104 val
= mhdp
->host
.pre_emphasis
& CDNS_FORCE_PRE_EMPHASIS
?
1105 max_pre
: req_pre
[i
];
1106 cdns_mhdp_set_adjust_request_pre_emphasis(link_status
, i
, val
);
1111 void cdns_mhdp_validate_cr(struct cdns_mhdp_device
*mhdp
, bool *cr_done
,
1112 bool *same_before_adjust
, bool *max_swing_reached
,
1113 u8 before_cr
[CDNS_DP_MAX_NUM_LANES
],
1114 u8 after_cr
[DP_LINK_STATUS_SIZE
], u8
*req_volt
,
1117 const u8 max_volt
= CDNS_VOLT_SWING(mhdp
->host
.volt_swing
);
1118 const u8 max_pre
= CDNS_PRE_EMPHASIS(mhdp
->host
.pre_emphasis
);
1119 bool same_pre
, same_volt
;
1123 *same_before_adjust
= false;
1124 *max_swing_reached
= false;
1125 *cr_done
= drm_dp_clock_recovery_ok(after_cr
, mhdp
->link
.num_lanes
);
1127 for (i
= 0; i
< mhdp
->link
.num_lanes
; i
++) {
1128 adjust
= drm_dp_get_adjust_request_voltage(after_cr
, i
);
1129 req_volt
[i
] = min(adjust
, max_volt
);
1131 adjust
= drm_dp_get_adjust_request_pre_emphasis(after_cr
, i
) >>
1132 DP_TRAIN_PRE_EMPHASIS_SHIFT
;
1133 req_pre
[i
] = min(adjust
, max_pre
);
1135 same_pre
= (before_cr
[i
] & DP_TRAIN_PRE_EMPHASIS_MASK
) ==
1136 req_pre
[i
] << DP_TRAIN_PRE_EMPHASIS_SHIFT
;
1137 same_volt
= (before_cr
[i
] & DP_TRAIN_VOLTAGE_SWING_MASK
) ==
1139 if (same_pre
&& same_volt
)
1140 *same_before_adjust
= true;
1142 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1143 if (!*cr_done
&& req_volt
[i
] + req_pre
[i
] >= 3) {
1144 *max_swing_reached
= true;
1150 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device
*mhdp
)
1152 u8 lanes_data
[CDNS_DP_MAX_NUM_LANES
],
1153 fail_counter_short
= 0, fail_counter_cr_long
= 0;
1154 u8 link_status
[DP_LINK_STATUS_SIZE
];
1156 union phy_configure_opts phy_cfg
;
1159 dev_dbg(mhdp
->dev
, "Starting CR phase\n");
1161 ret
= cdns_mhdp_link_training_init(mhdp
);
1165 drm_dp_dpcd_read_link_status(&mhdp
->aux
, link_status
);
1168 u8 requested_adjust_volt_swing
[CDNS_DP_MAX_NUM_LANES
] = {};
1169 u8 requested_adjust_pre_emphasis
[CDNS_DP_MAX_NUM_LANES
] = {};
1170 bool same_before_adjust
, max_swing_reached
;
1172 cdns_mhdp_get_adjust_train(mhdp
, link_status
, lanes_data
,
1174 phy_cfg
.dp
.lanes
= mhdp
->link
.num_lanes
;
1175 phy_cfg
.dp
.ssc
= cdns_mhdp_get_ssc_supported(mhdp
);
1176 phy_cfg
.dp
.set_lanes
= false;
1177 phy_cfg
.dp
.set_rate
= false;
1178 phy_cfg
.dp
.set_voltages
= true;
1179 ret
= phy_configure(mhdp
->phy
, &phy_cfg
);
1181 dev_err(mhdp
->dev
, "%s: phy_configure() failed: %d\n",
1186 cdns_mhdp_adjust_lt(mhdp
, mhdp
->link
.num_lanes
, 100,
1187 lanes_data
, link_status
);
1189 cdns_mhdp_validate_cr(mhdp
, &cr_done
, &same_before_adjust
,
1190 &max_swing_reached
, lanes_data
,
1192 requested_adjust_volt_swing
,
1193 requested_adjust_pre_emphasis
);
1195 if (max_swing_reached
) {
1196 dev_err(mhdp
->dev
, "CR: max swing reached\n");
1201 cdns_mhdp_print_lt_status("CR phase ok", mhdp
,
1206 /* Not all CR_DONE bits set */
1207 fail_counter_cr_long
++;
1209 if (same_before_adjust
) {
1210 fail_counter_short
++;
1214 fail_counter_short
= 0;
1216 * Voltage swing/pre-emphasis adjust requested
1219 cdns_mhdp_adjust_requested_cr(mhdp
, link_status
,
1220 requested_adjust_volt_swing
,
1221 requested_adjust_pre_emphasis
);
1222 } while (fail_counter_short
< 5 && fail_counter_cr_long
< 10);
1225 cdns_mhdp_print_lt_status("CR phase failed", mhdp
, &phy_cfg
);
1230 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link
*link
)
1232 switch (drm_dp_link_rate_to_bw_code(link
->rate
)) {
1233 case DP_LINK_BW_2_7
:
1234 link
->rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62
);
1236 case DP_LINK_BW_5_4
:
1237 link
->rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7
);
1239 case DP_LINK_BW_8_1
:
1240 link
->rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4
);
1245 static int cdns_mhdp_link_training(struct cdns_mhdp_device
*mhdp
,
1246 unsigned int training_interval
)
1249 const u8 eq_tps
= cdns_mhdp_eq_training_pattern_supported(mhdp
);
1253 if (!cdns_mhdp_link_training_cr(mhdp
)) {
1254 if (drm_dp_link_rate_to_bw_code(mhdp
->link
.rate
) !=
1257 "Reducing link rate during CR phase\n");
1258 cdns_mhdp_lower_link_rate(&mhdp
->link
);
1261 } else if (mhdp
->link
.num_lanes
> 1) {
1263 "Reducing lanes number during CR phase\n");
1264 mhdp
->link
.num_lanes
>>= 1;
1265 mhdp
->link
.rate
= cdns_mhdp_max_link_rate(mhdp
);
1271 "Link training failed during CR phase\n");
1275 if (cdns_mhdp_link_training_channel_eq(mhdp
, eq_tps
,
1279 if (mhdp
->link
.num_lanes
> 1) {
1281 "Reducing lanes number during EQ phase\n");
1282 mhdp
->link
.num_lanes
>>= 1;
1285 } else if (drm_dp_link_rate_to_bw_code(mhdp
->link
.rate
) !=
1288 "Reducing link rate during EQ phase\n");
1289 cdns_mhdp_lower_link_rate(&mhdp
->link
);
1290 mhdp
->link
.num_lanes
= cdns_mhdp_max_num_lanes(mhdp
);
1295 dev_err(mhdp
->dev
, "Link training failed during EQ phase\n");
1299 dev_dbg(mhdp
->dev
, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1300 mhdp
->link
.num_lanes
, mhdp
->link
.rate
/ 100);
1302 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
1303 mhdp
->host
.scrambler
? 0 :
1304 DP_LINK_SCRAMBLING_DISABLE
);
1306 ret
= cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, ®32
);
1309 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1313 reg32
&= ~GENMASK(1, 0);
1314 reg32
|= CDNS_DP_NUM_LANES(mhdp
->link
.num_lanes
);
1315 reg32
|= CDNS_DP_WR_FAILING_EDGE_VSYNC
;
1316 reg32
|= CDNS_DP_FRAMER_EN
;
1317 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, reg32
);
1319 /* Reset PHY config */
1320 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_TYPE(1);
1321 if (!mhdp
->host
.scrambler
)
1322 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
1323 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
1327 /* Reset PHY config */
1328 reg32
= CDNS_PHY_COMMON_CONFIG
| CDNS_PHY_TRAINING_TYPE(1);
1329 if (!mhdp
->host
.scrambler
)
1330 reg32
|= CDNS_PHY_SCRAMBLER_BYPASS
;
1331 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_PHY_CONFIG
, reg32
);
1333 drm_dp_dpcd_writeb(&mhdp
->aux
, DP_TRAINING_PATTERN_SET
,
1334 DP_TRAINING_PATTERN_DISABLE
);
1339 static u32
cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device
*mhdp
,
1345 return 4000 << (interval
- 1);
1347 "wrong training interval returned by DPCD: %d\n", interval
);
1351 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device
*mhdp
)
1353 unsigned int link_rate
;
1355 /* Get source capabilities based on PHY attributes */
1357 mhdp
->host
.lanes_cnt
= mhdp
->phy
->attrs
.bus_width
;
1358 if (!mhdp
->host
.lanes_cnt
)
1359 mhdp
->host
.lanes_cnt
= 4;
1361 link_rate
= mhdp
->phy
->attrs
.max_link_rate
;
1363 link_rate
= drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1
);
1365 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1368 mhdp
->host
.link_rate
= link_rate
;
1369 mhdp
->host
.volt_swing
= CDNS_VOLT_SWING(3);
1370 mhdp
->host
.pre_emphasis
= CDNS_PRE_EMPHASIS(3);
1371 mhdp
->host
.pattern_supp
= CDNS_SUPPORT_TPS(1) |
1372 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1373 CDNS_SUPPORT_TPS(4);
1374 mhdp
->host
.lane_mapping
= CDNS_LANE_MAPPING_NORMAL
;
1375 mhdp
->host
.fast_link
= false;
1376 mhdp
->host
.enhanced
= true;
1377 mhdp
->host
.scrambler
= true;
1378 mhdp
->host
.ssc
= false;
1381 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device
*mhdp
,
1382 u8 dpcd
[DP_RECEIVER_CAP_SIZE
])
1384 mhdp
->sink
.link_rate
= mhdp
->link
.rate
;
1385 mhdp
->sink
.lanes_cnt
= mhdp
->link
.num_lanes
;
1386 mhdp
->sink
.enhanced
= !!(mhdp
->link
.capabilities
&
1387 DP_LINK_CAP_ENHANCED_FRAMING
);
1389 /* Set SSC support */
1390 mhdp
->sink
.ssc
= !!(dpcd
[DP_MAX_DOWNSPREAD
] &
1391 DP_MAX_DOWNSPREAD_0_5
);
1393 /* Set TPS support */
1394 mhdp
->sink
.pattern_supp
= CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1395 if (drm_dp_tps3_supported(dpcd
))
1396 mhdp
->sink
.pattern_supp
|= CDNS_SUPPORT_TPS(3);
1397 if (drm_dp_tps4_supported(dpcd
))
1398 mhdp
->sink
.pattern_supp
|= CDNS_SUPPORT_TPS(4);
1400 /* Set fast link support */
1401 mhdp
->sink
.fast_link
= !!(dpcd
[DP_MAX_DOWNSPREAD
] &
1402 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
);
1405 static int cdns_mhdp_link_up(struct cdns_mhdp_device
*mhdp
)
1407 u8 dpcd
[DP_RECEIVER_CAP_SIZE
], amp
[2];
1408 u32 resp
, interval
, interval_us
;
1413 WARN_ON(!mutex_is_locked(&mhdp
->link_mutex
));
1415 drm_dp_dpcd_readb(&mhdp
->aux
, DP_TRAINING_AUX_RD_INTERVAL
,
1418 if (ext_cap_chk
& DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT
)
1419 addr
= DP_DP13_DPCD_REV
;
1423 err
= drm_dp_dpcd_read(&mhdp
->aux
, addr
, dpcd
, DP_RECEIVER_CAP_SIZE
);
1425 dev_err(mhdp
->dev
, "Failed to read receiver capabilities\n");
1429 mhdp
->link
.revision
= dpcd
[0];
1430 mhdp
->link
.rate
= drm_dp_bw_code_to_link_rate(dpcd
[1]);
1431 mhdp
->link
.num_lanes
= dpcd
[2] & DP_MAX_LANE_COUNT_MASK
;
1433 if (dpcd
[2] & DP_ENHANCED_FRAME_CAP
)
1434 mhdp
->link
.capabilities
|= DP_LINK_CAP_ENHANCED_FRAMING
;
1436 dev_dbg(mhdp
->dev
, "Set sink device power state via DPCD\n");
1437 cdns_mhdp_link_power_up(&mhdp
->aux
, &mhdp
->link
);
1439 cdns_mhdp_fill_sink_caps(mhdp
, dpcd
);
1441 mhdp
->link
.rate
= cdns_mhdp_max_link_rate(mhdp
);
1442 mhdp
->link
.num_lanes
= cdns_mhdp_max_num_lanes(mhdp
);
1444 /* Disable framer for link training */
1445 err
= cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, &resp
);
1448 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1453 resp
&= ~CDNS_DP_FRAMER_EN
;
1454 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, resp
);
1456 /* Spread AMP if required, enable 8b/10b coding */
1457 amp
[0] = cdns_mhdp_get_ssc_supported(mhdp
) ? DP_SPREAD_AMP_0_5
: 0;
1458 amp
[1] = DP_SET_ANSI_8B10B
;
1459 drm_dp_dpcd_write(&mhdp
->aux
, DP_DOWNSPREAD_CTRL
, amp
, 2);
1461 if (mhdp
->host
.fast_link
& mhdp
->sink
.fast_link
) {
1462 dev_err(mhdp
->dev
, "fastlink not supported\n");
1466 interval
= dpcd
[DP_TRAINING_AUX_RD_INTERVAL
] & DP_TRAINING_AUX_RD_MASK
;
1467 interval_us
= cdns_mhdp_get_training_interval_us(mhdp
, interval
);
1469 cdns_mhdp_link_training(mhdp
, interval_us
)) {
1470 dev_err(mhdp
->dev
, "Link training failed. Exiting.\n");
1474 mhdp
->link_up
= true;
1479 static void cdns_mhdp_link_down(struct cdns_mhdp_device
*mhdp
)
1481 WARN_ON(!mutex_is_locked(&mhdp
->link_mutex
));
1484 cdns_mhdp_link_power_down(&mhdp
->aux
, &mhdp
->link
);
1486 mhdp
->link_up
= false;
1489 static struct edid
*cdns_mhdp_get_edid(struct cdns_mhdp_device
*mhdp
,
1490 struct drm_connector
*connector
)
1495 return drm_do_get_edid(connector
, cdns_mhdp_get_edid_block
, mhdp
);
1498 static int cdns_mhdp_get_modes(struct drm_connector
*connector
)
1500 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(connector
);
1507 edid
= cdns_mhdp_get_edid(mhdp
, connector
);
1509 dev_err(mhdp
->dev
, "Failed to read EDID\n");
1513 drm_connector_update_edid_property(connector
, edid
);
1514 num_modes
= drm_add_edid_modes(connector
, edid
);
1518 * HACK: Warn about unsupported display formats until we deal
1519 * with them correctly.
1521 if (connector
->display_info
.color_formats
&&
1522 !(connector
->display_info
.color_formats
&
1523 mhdp
->display_fmt
.color_format
))
1525 "%s: No supported color_format found (0x%08x)\n",
1526 __func__
, connector
->display_info
.color_formats
);
1528 if (connector
->display_info
.bpc
&&
1529 connector
->display_info
.bpc
< mhdp
->display_fmt
.bpc
)
1530 dev_warn(mhdp
->dev
, "%s: Display bpc only %d < %d\n",
1531 __func__
, connector
->display_info
.bpc
,
1532 mhdp
->display_fmt
.bpc
);
1537 static int cdns_mhdp_connector_detect(struct drm_connector
*conn
,
1538 struct drm_modeset_acquire_ctx
*ctx
,
1541 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(conn
);
1543 return cdns_mhdp_detect(mhdp
);
1546 static u32
cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt
*fmt
)
1553 switch (fmt
->color_format
) {
1554 case DRM_COLOR_FORMAT_RGB444
:
1555 case DRM_COLOR_FORMAT_YCRCB444
:
1558 case DRM_COLOR_FORMAT_YCRCB422
:
1561 case DRM_COLOR_FORMAT_YCRCB420
:
1562 bpp
= fmt
->bpc
* 3 / 2;
1572 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device
*mhdp
,
1573 const struct drm_display_mode
*mode
,
1574 unsigned int lanes
, unsigned int rate
)
1576 u32 max_bw
, req_bw
, bpp
;
1579 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1580 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1581 * value thus equals the bandwidth in 10kb/s units, which matches the
1582 * units of the rate parameter.
1585 bpp
= cdns_mhdp_get_bpp(&mhdp
->display_fmt
);
1586 req_bw
= mode
->clock
* bpp
/ 8;
1587 max_bw
= lanes
* rate
;
1588 if (req_bw
> max_bw
) {
1590 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1591 mode
->name
, req_bw
, max_bw
);
1600 enum drm_mode_status
cdns_mhdp_mode_valid(struct drm_connector
*conn
,
1601 struct drm_display_mode
*mode
)
1603 struct cdns_mhdp_device
*mhdp
= connector_to_mhdp(conn
);
1605 mutex_lock(&mhdp
->link_mutex
);
1607 if (!cdns_mhdp_bandwidth_ok(mhdp
, mode
, mhdp
->link
.num_lanes
,
1609 mutex_unlock(&mhdp
->link_mutex
);
1610 return MODE_CLOCK_HIGH
;
1613 mutex_unlock(&mhdp
->link_mutex
);
1617 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs
= {
1618 .detect_ctx
= cdns_mhdp_connector_detect
,
1619 .get_modes
= cdns_mhdp_get_modes
,
1620 .mode_valid
= cdns_mhdp_mode_valid
,
1623 static const struct drm_connector_funcs cdns_mhdp_conn_funcs
= {
1624 .fill_modes
= drm_helper_probe_single_connector_modes
,
1625 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
1626 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
1627 .reset
= drm_atomic_helper_connector_reset
,
1628 .destroy
= drm_connector_cleanup
,
1631 static int cdns_mhdp_connector_init(struct cdns_mhdp_device
*mhdp
)
1633 u32 bus_format
= MEDIA_BUS_FMT_RGB121212_1X36
;
1634 struct drm_connector
*conn
= &mhdp
->connector
;
1635 struct drm_bridge
*bridge
= &mhdp
->bridge
;
1638 if (!bridge
->encoder
) {
1639 dev_err(mhdp
->dev
, "Parent encoder object not found");
1643 conn
->polled
= DRM_CONNECTOR_POLL_HPD
;
1645 ret
= drm_connector_init(bridge
->dev
, conn
, &cdns_mhdp_conn_funcs
,
1646 DRM_MODE_CONNECTOR_DisplayPort
);
1648 dev_err(mhdp
->dev
, "Failed to initialize connector with drm\n");
1652 drm_connector_helper_add(conn
, &cdns_mhdp_conn_helper_funcs
);
1654 ret
= drm_display_info_set_bus_formats(&conn
->display_info
,
1659 ret
= drm_connector_attach_encoder(conn
, bridge
->encoder
);
1661 dev_err(mhdp
->dev
, "Failed to attach connector to encoder\n");
1668 static int cdns_mhdp_attach(struct drm_bridge
*bridge
,
1669 enum drm_bridge_attach_flags flags
)
1671 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
1675 dev_dbg(mhdp
->dev
, "%s\n", __func__
);
1677 if (!(flags
& DRM_BRIDGE_ATTACH_NO_CONNECTOR
)) {
1678 ret
= cdns_mhdp_connector_init(mhdp
);
1683 spin_lock(&mhdp
->start_lock
);
1685 mhdp
->bridge_attached
= true;
1686 hw_ready
= mhdp
->hw_state
== MHDP_HW_READY
;
1688 spin_unlock(&mhdp
->start_lock
);
1690 /* Enable SW event interrupts */
1692 writel(~(u32
)CDNS_APB_INT_MASK_SW_EVENT_INT
,
1693 mhdp
->regs
+ CDNS_APB_INT_MASK
);
1698 static void cdns_mhdp_configure_video(struct cdns_mhdp_device
*mhdp
,
1699 const struct drm_display_mode
*mode
)
1701 unsigned int dp_framer_sp
= 0, msa_horizontal_1
,
1702 msa_vertical_1
, bnd_hsync2vsync
, hsync2vsync_pol_ctrl
,
1703 misc0
= 0, misc1
= 0, pxl_repr
,
1704 front_porch
, back_porch
, msa_h0
, msa_v0
, hsync
, vsync
,
1706 u8 stream_id
= mhdp
->stream_id
;
1707 u32 bpp
, bpc
, pxlfmt
, framer
;
1710 pxlfmt
= mhdp
->display_fmt
.color_format
;
1711 bpc
= mhdp
->display_fmt
.bpc
;
1714 * If YCBCR supported and stream not SD, use ITU709
1715 * Need to handle ITU version with YCBCR420 when supported
1717 if ((pxlfmt
== DRM_COLOR_FORMAT_YCRCB444
||
1718 pxlfmt
== DRM_COLOR_FORMAT_YCRCB422
) && mode
->crtc_vdisplay
>= 720)
1719 misc0
= DP_YCBCR_COEFFICIENTS_ITU709
;
1721 bpp
= cdns_mhdp_get_bpp(&mhdp
->display_fmt
);
1724 case DRM_COLOR_FORMAT_RGB444
:
1725 pxl_repr
= CDNS_DP_FRAMER_RGB
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1726 misc0
|= DP_COLOR_FORMAT_RGB
;
1728 case DRM_COLOR_FORMAT_YCRCB444
:
1729 pxl_repr
= CDNS_DP_FRAMER_YCBCR444
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1730 misc0
|= DP_COLOR_FORMAT_YCbCr444
| DP_TEST_DYNAMIC_RANGE_CEA
;
1732 case DRM_COLOR_FORMAT_YCRCB422
:
1733 pxl_repr
= CDNS_DP_FRAMER_YCBCR422
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1734 misc0
|= DP_COLOR_FORMAT_YCbCr422
| DP_TEST_DYNAMIC_RANGE_CEA
;
1736 case DRM_COLOR_FORMAT_YCRCB420
:
1737 pxl_repr
= CDNS_DP_FRAMER_YCBCR420
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1740 pxl_repr
= CDNS_DP_FRAMER_Y_ONLY
<< CDNS_DP_FRAMER_PXL_FORMAT
;
1745 misc0
|= DP_TEST_BIT_DEPTH_6
;
1746 pxl_repr
|= CDNS_DP_FRAMER_6_BPC
;
1749 misc0
|= DP_TEST_BIT_DEPTH_8
;
1750 pxl_repr
|= CDNS_DP_FRAMER_8_BPC
;
1753 misc0
|= DP_TEST_BIT_DEPTH_10
;
1754 pxl_repr
|= CDNS_DP_FRAMER_10_BPC
;
1757 misc0
|= DP_TEST_BIT_DEPTH_12
;
1758 pxl_repr
|= CDNS_DP_FRAMER_12_BPC
;
1761 misc0
|= DP_TEST_BIT_DEPTH_16
;
1762 pxl_repr
|= CDNS_DP_FRAMER_16_BPC
;
1766 bnd_hsync2vsync
= CDNS_IP_BYPASS_V_INTERFACE
;
1767 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1768 bnd_hsync2vsync
|= CDNS_IP_DET_INTERLACE_FORMAT
;
1770 cdns_mhdp_reg_write(mhdp
, CDNS_BND_HSYNC2VSYNC(stream_id
),
1773 hsync2vsync_pol_ctrl
= 0;
1774 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
1775 hsync2vsync_pol_ctrl
|= CDNS_H2V_HSYNC_POL_ACTIVE_LOW
;
1776 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
1777 hsync2vsync_pol_ctrl
|= CDNS_H2V_VSYNC_POL_ACTIVE_LOW
;
1778 cdns_mhdp_reg_write(mhdp
, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id
),
1779 hsync2vsync_pol_ctrl
);
1781 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_PXL_REPR(stream_id
), pxl_repr
);
1783 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1784 dp_framer_sp
|= CDNS_DP_FRAMER_INTERLACE
;
1785 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
1786 dp_framer_sp
|= CDNS_DP_FRAMER_HSYNC_POL_LOW
;
1787 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
1788 dp_framer_sp
|= CDNS_DP_FRAMER_VSYNC_POL_LOW
;
1789 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_SP(stream_id
), dp_framer_sp
);
1791 front_porch
= mode
->crtc_hsync_start
- mode
->crtc_hdisplay
;
1792 back_porch
= mode
->crtc_htotal
- mode
->crtc_hsync_end
;
1793 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRONT_BACK_PORCH(stream_id
),
1794 CDNS_DP_FRONT_PORCH(front_porch
) |
1795 CDNS_DP_BACK_PORCH(back_porch
));
1797 cdns_mhdp_reg_write(mhdp
, CDNS_DP_BYTE_COUNT(stream_id
),
1798 mode
->crtc_hdisplay
* bpp
/ 8);
1800 msa_h0
= mode
->crtc_htotal
- mode
->crtc_hsync_start
;
1801 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_HORIZONTAL_0(stream_id
),
1802 CDNS_DP_MSAH0_H_TOTAL(mode
->crtc_htotal
) |
1803 CDNS_DP_MSAH0_HSYNC_START(msa_h0
));
1805 hsync
= mode
->crtc_hsync_end
- mode
->crtc_hsync_start
;
1806 msa_horizontal_1
= CDNS_DP_MSAH1_HSYNC_WIDTH(hsync
) |
1807 CDNS_DP_MSAH1_HDISP_WIDTH(mode
->crtc_hdisplay
);
1808 if (mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
1809 msa_horizontal_1
|= CDNS_DP_MSAH1_HSYNC_POL_LOW
;
1810 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_HORIZONTAL_1(stream_id
),
1813 msa_v0
= mode
->crtc_vtotal
- mode
->crtc_vsync_start
;
1814 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_VERTICAL_0(stream_id
),
1815 CDNS_DP_MSAV0_V_TOTAL(mode
->crtc_vtotal
) |
1816 CDNS_DP_MSAV0_VSYNC_START(msa_v0
));
1818 vsync
= mode
->crtc_vsync_end
- mode
->crtc_vsync_start
;
1819 msa_vertical_1
= CDNS_DP_MSAV1_VSYNC_WIDTH(vsync
) |
1820 CDNS_DP_MSAV1_VDISP_WIDTH(mode
->crtc_vdisplay
);
1821 if (mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
1822 msa_vertical_1
|= CDNS_DP_MSAV1_VSYNC_POL_LOW
;
1823 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_VERTICAL_1(stream_id
),
1826 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) &&
1827 mode
->crtc_vtotal
% 2 == 0)
1828 misc1
= DP_TEST_INTERLACED
;
1829 if (mhdp
->display_fmt
.y_only
)
1830 misc1
|= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY
;
1831 /* Use VSC SDP for Y420 */
1832 if (pxlfmt
== DRM_COLOR_FORMAT_YCRCB420
)
1833 misc1
= CDNS_DP_TEST_VSC_SDP
;
1835 cdns_mhdp_reg_write(mhdp
, CDNS_DP_MSA_MISC(stream_id
),
1836 misc0
| (misc1
<< 8));
1838 cdns_mhdp_reg_write(mhdp
, CDNS_DP_HORIZONTAL(stream_id
),
1839 CDNS_DP_H_HSYNC_WIDTH(hsync
) |
1840 CDNS_DP_H_H_TOTAL(mode
->crtc_hdisplay
));
1842 cdns_mhdp_reg_write(mhdp
, CDNS_DP_VERTICAL_0(stream_id
),
1843 CDNS_DP_V0_VHEIGHT(mode
->crtc_vdisplay
) |
1844 CDNS_DP_V0_VSTART(msa_v0
));
1846 dp_vertical_1
= CDNS_DP_V1_VTOTAL(mode
->crtc_vtotal
);
1847 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) &&
1848 mode
->crtc_vtotal
% 2 == 0)
1849 dp_vertical_1
|= CDNS_DP_V1_VTOTAL_EVEN
;
1851 cdns_mhdp_reg_write(mhdp
, CDNS_DP_VERTICAL_1(stream_id
), dp_vertical_1
);
1853 cdns_mhdp_reg_write_bit(mhdp
, CDNS_DP_VB_ID(stream_id
), 2, 1,
1854 (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ?
1855 CDNS_DP_VB_ID_INTERLACED
: 0);
1857 ret
= cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, &framer
);
1860 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1864 framer
|= CDNS_DP_FRAMER_EN
;
1865 framer
&= ~CDNS_DP_NO_VIDEO_MODE
;
1866 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, framer
);
1869 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device
*mhdp
,
1870 const struct drm_display_mode
*mode
)
1872 u32 rate
, vs
, required_bandwidth
, available_bandwidth
;
1873 s32 line_thresh1
, line_thresh2
, line_thresh
= 0;
1874 int pxlclock
= mode
->crtc_clock
;
1878 /* Get rate in MSymbols per second per lane */
1879 rate
= mhdp
->link
.rate
/ 1000;
1881 bpp
= cdns_mhdp_get_bpp(&mhdp
->display_fmt
);
1883 required_bandwidth
= pxlclock
* bpp
/ 8;
1884 available_bandwidth
= mhdp
->link
.num_lanes
* rate
;
1886 vs
= tu_size
* required_bandwidth
/ available_bandwidth
;
1892 line_thresh1
= ((vs
+ 1) << 5) * 8 / bpp
;
1893 line_thresh2
= (pxlclock
<< 5) / 1000 / rate
* (vs
+ 1) - (1 << 5);
1894 line_thresh
= line_thresh1
- line_thresh2
/ (s32
)mhdp
->link
.num_lanes
;
1895 line_thresh
= (line_thresh
>> 5) + 2;
1897 mhdp
->stream_id
= 0;
1899 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_TU
,
1900 CDNS_DP_FRAMER_TU_VS(vs
) |
1901 CDNS_DP_FRAMER_TU_SIZE(tu_size
) |
1902 CDNS_DP_FRAMER_TU_CNT_RST_EN
);
1904 cdns_mhdp_reg_write(mhdp
, CDNS_DP_LINE_THRESH(0),
1905 line_thresh
& GENMASK(5, 0));
1907 cdns_mhdp_reg_write(mhdp
, CDNS_DP_STREAM_CONFIG_2(0),
1908 CDNS_DP_SC2_TU_VS_DIFF((tu_size
- vs
> 3) ?
1911 cdns_mhdp_configure_video(mhdp
, mode
);
1914 static void cdns_mhdp_atomic_enable(struct drm_bridge
*bridge
,
1915 struct drm_bridge_state
*bridge_state
)
1917 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
1918 struct drm_atomic_state
*state
= bridge_state
->base
.state
;
1919 struct cdns_mhdp_bridge_state
*mhdp_state
;
1920 struct drm_crtc_state
*crtc_state
;
1921 struct drm_connector
*connector
;
1922 struct drm_connector_state
*conn_state
;
1923 struct drm_bridge_state
*new_state
;
1924 const struct drm_display_mode
*mode
;
1928 dev_dbg(mhdp
->dev
, "bridge enable\n");
1930 mutex_lock(&mhdp
->link_mutex
);
1932 if (mhdp
->plugged
&& !mhdp
->link_up
) {
1933 ret
= cdns_mhdp_link_up(mhdp
);
1938 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->enable
)
1939 mhdp
->info
->ops
->enable(mhdp
);
1941 /* Enable VIF clock for stream 0 */
1942 ret
= cdns_mhdp_reg_read(mhdp
, CDNS_DPTX_CAR
, &resp
);
1944 dev_err(mhdp
->dev
, "Failed to read CDNS_DPTX_CAR %d\n", ret
);
1948 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_CAR
,
1949 resp
| CDNS_VIF_CLK_EN
| CDNS_VIF_CLK_RSTN
);
1951 connector
= drm_atomic_get_new_connector_for_encoder(state
,
1953 if (WARN_ON(!connector
))
1956 conn_state
= drm_atomic_get_new_connector_state(state
, connector
);
1957 if (WARN_ON(!conn_state
))
1960 crtc_state
= drm_atomic_get_new_crtc_state(state
, conn_state
->crtc
);
1961 if (WARN_ON(!crtc_state
))
1964 mode
= &crtc_state
->adjusted_mode
;
1966 new_state
= drm_atomic_get_new_bridge_state(state
, bridge
);
1967 if (WARN_ON(!new_state
))
1970 if (!cdns_mhdp_bandwidth_ok(mhdp
, mode
, mhdp
->link
.num_lanes
,
1976 cdns_mhdp_sst_enable(mhdp
, mode
);
1978 mhdp_state
= to_cdns_mhdp_bridge_state(new_state
);
1980 mhdp_state
->current_mode
= drm_mode_duplicate(bridge
->dev
, mode
);
1981 drm_mode_set_name(mhdp_state
->current_mode
);
1983 dev_dbg(mhdp
->dev
, "%s: Enabling mode %s\n", __func__
, mode
->name
);
1985 mhdp
->bridge_enabled
= true;
1988 mutex_unlock(&mhdp
->link_mutex
);
1990 schedule_work(&mhdp
->modeset_retry_work
);
1993 static void cdns_mhdp_atomic_disable(struct drm_bridge
*bridge
,
1994 struct drm_bridge_state
*bridge_state
)
1996 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
1999 dev_dbg(mhdp
->dev
, "%s\n", __func__
);
2001 mutex_lock(&mhdp
->link_mutex
);
2003 mhdp
->bridge_enabled
= false;
2004 cdns_mhdp_reg_read(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, &resp
);
2005 resp
&= ~CDNS_DP_FRAMER_EN
;
2006 resp
|= CDNS_DP_NO_VIDEO_MODE
;
2007 cdns_mhdp_reg_write(mhdp
, CDNS_DP_FRAMER_GLOBAL_CONFIG
, resp
);
2009 cdns_mhdp_link_down(mhdp
);
2011 /* Disable VIF clock for stream 0 */
2012 cdns_mhdp_reg_read(mhdp
, CDNS_DPTX_CAR
, &resp
);
2013 cdns_mhdp_reg_write(mhdp
, CDNS_DPTX_CAR
,
2014 resp
& ~(CDNS_VIF_CLK_EN
| CDNS_VIF_CLK_RSTN
));
2016 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->disable
)
2017 mhdp
->info
->ops
->disable(mhdp
);
2019 mutex_unlock(&mhdp
->link_mutex
);
2022 static void cdns_mhdp_detach(struct drm_bridge
*bridge
)
2024 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2026 dev_dbg(mhdp
->dev
, "%s\n", __func__
);
2028 spin_lock(&mhdp
->start_lock
);
2030 mhdp
->bridge_attached
= false;
2032 spin_unlock(&mhdp
->start_lock
);
2034 writel(~0, mhdp
->regs
+ CDNS_APB_INT_MASK
);
2037 static struct drm_bridge_state
*
2038 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge
*bridge
)
2040 struct cdns_mhdp_bridge_state
*state
;
2042 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2046 __drm_atomic_helper_bridge_duplicate_state(bridge
, &state
->base
);
2048 return &state
->base
;
2052 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge
*bridge
,
2053 struct drm_bridge_state
*state
)
2055 struct cdns_mhdp_bridge_state
*cdns_mhdp_state
;
2057 cdns_mhdp_state
= to_cdns_mhdp_bridge_state(state
);
2059 if (cdns_mhdp_state
->current_mode
) {
2060 drm_mode_destroy(bridge
->dev
, cdns_mhdp_state
->current_mode
);
2061 cdns_mhdp_state
->current_mode
= NULL
;
2064 kfree(cdns_mhdp_state
);
2067 static struct drm_bridge_state
*
2068 cdns_mhdp_bridge_atomic_reset(struct drm_bridge
*bridge
)
2070 struct cdns_mhdp_bridge_state
*cdns_mhdp_state
;
2072 cdns_mhdp_state
= kzalloc(sizeof(*cdns_mhdp_state
), GFP_KERNEL
);
2073 if (!cdns_mhdp_state
)
2076 __drm_atomic_helper_bridge_reset(bridge
, &cdns_mhdp_state
->base
);
2078 return &cdns_mhdp_state
->base
;
2081 static int cdns_mhdp_atomic_check(struct drm_bridge
*bridge
,
2082 struct drm_bridge_state
*bridge_state
,
2083 struct drm_crtc_state
*crtc_state
,
2084 struct drm_connector_state
*conn_state
)
2086 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2087 const struct drm_display_mode
*mode
= &crtc_state
->adjusted_mode
;
2089 mutex_lock(&mhdp
->link_mutex
);
2091 if (!cdns_mhdp_bandwidth_ok(mhdp
, mode
, mhdp
->link
.num_lanes
,
2093 dev_err(mhdp
->dev
, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2094 __func__
, mode
->name
, mhdp
->link
.num_lanes
,
2095 mhdp
->link
.rate
/ 100);
2096 mutex_unlock(&mhdp
->link_mutex
);
2100 mutex_unlock(&mhdp
->link_mutex
);
2104 static enum drm_connector_status
cdns_mhdp_bridge_detect(struct drm_bridge
*bridge
)
2106 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2108 return cdns_mhdp_detect(mhdp
);
2111 static struct edid
*cdns_mhdp_bridge_get_edid(struct drm_bridge
*bridge
,
2112 struct drm_connector
*connector
)
2114 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2116 return cdns_mhdp_get_edid(mhdp
, connector
);
2119 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge
*bridge
)
2121 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2123 /* Enable SW event interrupts */
2124 if (mhdp
->bridge_attached
)
2125 writel(~(u32
)CDNS_APB_INT_MASK_SW_EVENT_INT
,
2126 mhdp
->regs
+ CDNS_APB_INT_MASK
);
2129 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge
*bridge
)
2131 struct cdns_mhdp_device
*mhdp
= bridge_to_mhdp(bridge
);
2133 writel(CDNS_APB_INT_MASK_SW_EVENT_INT
, mhdp
->regs
+ CDNS_APB_INT_MASK
);
2136 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs
= {
2137 .atomic_enable
= cdns_mhdp_atomic_enable
,
2138 .atomic_disable
= cdns_mhdp_atomic_disable
,
2139 .atomic_check
= cdns_mhdp_atomic_check
,
2140 .attach
= cdns_mhdp_attach
,
2141 .detach
= cdns_mhdp_detach
,
2142 .atomic_duplicate_state
= cdns_mhdp_bridge_atomic_duplicate_state
,
2143 .atomic_destroy_state
= cdns_mhdp_bridge_atomic_destroy_state
,
2144 .atomic_reset
= cdns_mhdp_bridge_atomic_reset
,
2145 .detect
= cdns_mhdp_bridge_detect
,
2146 .get_edid
= cdns_mhdp_bridge_get_edid
,
2147 .hpd_enable
= cdns_mhdp_bridge_hpd_enable
,
2148 .hpd_disable
= cdns_mhdp_bridge_hpd_disable
,
2151 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device
*mhdp
, bool *hpd_pulse
)
2153 int hpd_event
, hpd_status
;
2157 hpd_event
= cdns_mhdp_read_hpd_event(mhdp
);
2159 /* Getting event bits failed, bail out */
2160 if (hpd_event
< 0) {
2161 dev_warn(mhdp
->dev
, "%s: read event failed: %d\n",
2162 __func__
, hpd_event
);
2166 hpd_status
= cdns_mhdp_get_hpd_status(mhdp
);
2167 if (hpd_status
< 0) {
2168 dev_warn(mhdp
->dev
, "%s: get hpd status failed: %d\n",
2169 __func__
, hpd_status
);
2173 if (hpd_event
& DPTX_READ_EVENT_HPD_PULSE
)
2176 return !!hpd_status
;
2179 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device
*mhdp
)
2181 struct cdns_mhdp_bridge_state
*cdns_bridge_state
;
2182 struct drm_display_mode
*current_mode
;
2183 bool old_plugged
= mhdp
->plugged
;
2184 struct drm_bridge_state
*state
;
2185 u8 status
[DP_LINK_STATUS_SIZE
];
2189 mutex_lock(&mhdp
->link_mutex
);
2191 mhdp
->plugged
= cdns_mhdp_detect_hpd(mhdp
, &hpd_pulse
);
2193 if (!mhdp
->plugged
) {
2194 cdns_mhdp_link_down(mhdp
);
2195 mhdp
->link
.rate
= mhdp
->host
.link_rate
;
2196 mhdp
->link
.num_lanes
= mhdp
->host
.lanes_cnt
;
2201 * If we get a HPD pulse event and we were and still are connected,
2202 * check the link status. If link status is ok, there's nothing to do
2203 * as we don't handle DP interrupts. If link status is bad, continue
2204 * with full link setup.
2206 if (hpd_pulse
&& old_plugged
== mhdp
->plugged
) {
2207 ret
= drm_dp_dpcd_read_link_status(&mhdp
->aux
, status
);
2210 * If everything looks fine, just return, as we don't handle
2214 drm_dp_channel_eq_ok(status
, mhdp
->link
.num_lanes
) &&
2215 drm_dp_clock_recovery_ok(status
, mhdp
->link
.num_lanes
))
2218 /* If link is bad, mark link as down so that we do a new LT */
2219 mhdp
->link_up
= false;
2222 if (!mhdp
->link_up
) {
2223 ret
= cdns_mhdp_link_up(mhdp
);
2228 if (mhdp
->bridge_enabled
) {
2229 state
= drm_priv_to_bridge_state(mhdp
->bridge
.base
.state
);
2235 cdns_bridge_state
= to_cdns_mhdp_bridge_state(state
);
2236 if (!cdns_bridge_state
) {
2241 current_mode
= cdns_bridge_state
->current_mode
;
2242 if (!current_mode
) {
2247 if (!cdns_mhdp_bandwidth_ok(mhdp
, current_mode
, mhdp
->link
.num_lanes
,
2253 dev_dbg(mhdp
->dev
, "%s: Enabling mode %s\n", __func__
,
2254 current_mode
->name
);
2256 cdns_mhdp_sst_enable(mhdp
, current_mode
);
2259 mutex_unlock(&mhdp
->link_mutex
);
2263 static void cdns_mhdp_modeset_retry_fn(struct work_struct
*work
)
2265 struct cdns_mhdp_device
*mhdp
;
2266 struct drm_connector
*conn
;
2268 mhdp
= container_of(work
, typeof(*mhdp
), modeset_retry_work
);
2270 conn
= &mhdp
->connector
;
2272 /* Grab the locks before changing connector property */
2273 mutex_lock(&conn
->dev
->mode_config
.mutex
);
2276 * Set connector link status to BAD and send a Uevent to notify
2277 * userspace to do a modeset.
2279 drm_connector_set_link_status_property(conn
, DRM_MODE_LINK_STATUS_BAD
);
2280 mutex_unlock(&conn
->dev
->mode_config
.mutex
);
2282 /* Send Hotplug uevent so userspace can reprobe */
2283 drm_kms_helper_hotplug_event(mhdp
->bridge
.dev
);
2286 static irqreturn_t
cdns_mhdp_irq_handler(int irq
, void *data
)
2288 struct cdns_mhdp_device
*mhdp
= data
;
2289 u32 apb_stat
, sw_ev0
;
2290 bool bridge_attached
;
2293 apb_stat
= readl(mhdp
->regs
+ CDNS_APB_INT_STATUS
);
2294 if (!(apb_stat
& CDNS_APB_INT_MASK_SW_EVENT_INT
))
2297 sw_ev0
= readl(mhdp
->regs
+ CDNS_SW_EVENT0
);
2300 * Calling drm_kms_helper_hotplug_event() when not attached
2301 * to drm device causes an oops because the drm_bridge->dev
2302 * is NULL. See cdns_mhdp_fw_cb() comments for details about the
2303 * problems related drm_kms_helper_hotplug_event() call.
2305 spin_lock(&mhdp
->start_lock
);
2306 bridge_attached
= mhdp
->bridge_attached
;
2307 spin_unlock(&mhdp
->start_lock
);
2309 if (bridge_attached
&& (sw_ev0
& CDNS_DPTX_HPD
)) {
2310 ret
= cdns_mhdp_update_link_status(mhdp
);
2311 if (mhdp
->connector
.dev
) {
2313 schedule_work(&mhdp
->modeset_retry_work
);
2315 drm_kms_helper_hotplug_event(mhdp
->bridge
.dev
);
2317 drm_bridge_hpd_notify(&mhdp
->bridge
, cdns_mhdp_detect(mhdp
));
2324 static int cdns_mhdp_probe(struct platform_device
*pdev
)
2326 struct device
*dev
= &pdev
->dev
;
2327 struct cdns_mhdp_device
*mhdp
;
2333 mhdp
= devm_kzalloc(dev
, sizeof(*mhdp
), GFP_KERNEL
);
2337 clk
= devm_clk_get(dev
, NULL
);
2339 dev_err(dev
, "couldn't get clk: %ld\n", PTR_ERR(clk
));
2340 return PTR_ERR(clk
);
2345 mutex_init(&mhdp
->mbox_mutex
);
2346 mutex_init(&mhdp
->link_mutex
);
2347 spin_lock_init(&mhdp
->start_lock
);
2349 drm_dp_aux_init(&mhdp
->aux
);
2350 mhdp
->aux
.dev
= dev
;
2351 mhdp
->aux
.transfer
= cdns_mhdp_transfer
;
2353 mhdp
->regs
= devm_platform_ioremap_resource(pdev
, 0);
2354 if (IS_ERR(mhdp
->regs
)) {
2355 dev_err(dev
, "Failed to get memory resource\n");
2356 return PTR_ERR(mhdp
->regs
);
2359 mhdp
->phy
= devm_of_phy_get_by_index(dev
, pdev
->dev
.of_node
, 0);
2360 if (IS_ERR(mhdp
->phy
)) {
2361 dev_err(dev
, "no PHY configured\n");
2362 return PTR_ERR(mhdp
->phy
);
2365 platform_set_drvdata(pdev
, mhdp
);
2367 mhdp
->info
= of_device_get_match_data(dev
);
2369 clk_prepare_enable(clk
);
2371 pm_runtime_enable(dev
);
2372 ret
= pm_runtime_get_sync(dev
);
2374 dev_err(dev
, "pm_runtime_get_sync failed\n");
2375 pm_runtime_disable(dev
);
2379 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->init
) {
2380 ret
= mhdp
->info
->ops
->init(mhdp
);
2382 dev_err(dev
, "MHDP platform initialization failed: %d\n",
2388 rate
= clk_get_rate(clk
);
2389 writel(rate
% 1000000, mhdp
->regs
+ CDNS_SW_CLK_L
);
2390 writel(rate
/ 1000000, mhdp
->regs
+ CDNS_SW_CLK_H
);
2392 dev_dbg(dev
, "func clk rate %lu Hz\n", rate
);
2394 writel(~0, mhdp
->regs
+ CDNS_APB_INT_MASK
);
2396 irq
= platform_get_irq(pdev
, 0);
2397 ret
= devm_request_threaded_irq(mhdp
->dev
, irq
, NULL
,
2398 cdns_mhdp_irq_handler
, IRQF_ONESHOT
,
2401 dev_err(dev
, "cannot install IRQ %d\n", irq
);
2406 cdns_mhdp_fill_host_caps(mhdp
);
2408 /* Initialize link rate and num of lanes to host values */
2409 mhdp
->link
.rate
= mhdp
->host
.link_rate
;
2410 mhdp
->link
.num_lanes
= mhdp
->host
.lanes_cnt
;
2412 /* The only currently supported format */
2413 mhdp
->display_fmt
.y_only
= false;
2414 mhdp
->display_fmt
.color_format
= DRM_COLOR_FORMAT_RGB444
;
2415 mhdp
->display_fmt
.bpc
= 8;
2417 mhdp
->bridge
.of_node
= pdev
->dev
.of_node
;
2418 mhdp
->bridge
.funcs
= &cdns_mhdp_bridge_funcs
;
2419 mhdp
->bridge
.ops
= DRM_BRIDGE_OP_DETECT
| DRM_BRIDGE_OP_EDID
|
2421 mhdp
->bridge
.type
= DRM_MODE_CONNECTOR_DisplayPort
;
2423 mhdp
->bridge
.timings
= mhdp
->info
->timings
;
2425 ret
= phy_init(mhdp
->phy
);
2427 dev_err(mhdp
->dev
, "Failed to initialize PHY: %d\n", ret
);
2431 /* Initialize the work for modeset in case of link train failure */
2432 INIT_WORK(&mhdp
->modeset_retry_work
, cdns_mhdp_modeset_retry_fn
);
2434 init_waitqueue_head(&mhdp
->fw_load_wq
);
2436 ret
= cdns_mhdp_load_firmware(mhdp
);
2440 drm_bridge_add(&mhdp
->bridge
);
2445 phy_exit(mhdp
->phy
);
2447 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->exit
)
2448 mhdp
->info
->ops
->exit(mhdp
);
2450 pm_runtime_put_sync(dev
);
2451 pm_runtime_disable(dev
);
2453 clk_disable_unprepare(mhdp
->clk
);
2458 static int cdns_mhdp_remove(struct platform_device
*pdev
)
2460 struct cdns_mhdp_device
*mhdp
= dev_get_drvdata(&pdev
->dev
);
2461 unsigned long timeout
= msecs_to_jiffies(100);
2462 bool stop_fw
= false;
2465 drm_bridge_remove(&mhdp
->bridge
);
2467 ret
= wait_event_timeout(mhdp
->fw_load_wq
,
2468 mhdp
->hw_state
== MHDP_HW_READY
,
2471 dev_err(mhdp
->dev
, "%s: Timeout waiting for fw loading\n",
2476 spin_lock(&mhdp
->start_lock
);
2477 mhdp
->hw_state
= MHDP_HW_STOPPED
;
2478 spin_unlock(&mhdp
->start_lock
);
2481 ret
= cdns_mhdp_set_firmware_active(mhdp
, false);
2483 phy_exit(mhdp
->phy
);
2485 if (mhdp
->info
&& mhdp
->info
->ops
&& mhdp
->info
->ops
->exit
)
2486 mhdp
->info
->ops
->exit(mhdp
);
2488 pm_runtime_put_sync(&pdev
->dev
);
2489 pm_runtime_disable(&pdev
->dev
);
2491 cancel_work_sync(&mhdp
->modeset_retry_work
);
2492 flush_scheduled_work();
2494 clk_disable_unprepare(mhdp
->clk
);
2499 static const struct of_device_id mhdp_ids
[] = {
2500 { .compatible
= "cdns,mhdp8546", },
2501 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2502 { .compatible
= "ti,j721e-mhdp8546",
2503 .data
= &(const struct cdns_mhdp_platform_info
) {
2504 .timings
= &mhdp_ti_j721e_bridge_timings
,
2505 .ops
= &mhdp_ti_j721e_ops
,
2511 MODULE_DEVICE_TABLE(of
, mhdp_ids
);
2513 static struct platform_driver mhdp_driver
= {
2515 .name
= "cdns-mhdp8546",
2516 .of_match_table
= of_match_ptr(mhdp_ids
),
2518 .probe
= cdns_mhdp_probe
,
2519 .remove
= cdns_mhdp_remove
,
2521 module_platform_driver(mhdp_driver
);
2523 MODULE_FIRMWARE(FW_NAME
);
2525 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2526 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2527 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2528 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2529 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2530 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2531 MODULE_LICENSE("GPL");
2532 MODULE_ALIAS("platform:cdns-mhdp8546");