1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
7 #include "intel_display.h"
8 #include "intel_display_types.h"
9 #include "intel_dp_mst.h"
12 static const char *tc_port_mode_name(enum tc_port_mode mode
)
14 static const char * const names
[] = {
15 [TC_PORT_TBT_ALT
] = "tbt-alt",
16 [TC_PORT_DP_ALT
] = "dp-alt",
17 [TC_PORT_LEGACY
] = "legacy",
20 if (WARN_ON(mode
>= ARRAY_SIZE(names
)))
21 mode
= TC_PORT_TBT_ALT
;
27 tc_port_load_fia_params(struct drm_i915_private
*i915
,
28 struct intel_digital_port
*dig_port
)
30 enum port port
= dig_port
->base
.port
;
31 enum tc_port tc_port
= intel_port_to_tc(i915
, port
);
34 if (INTEL_INFO(i915
)->display
.has_modular_fia
) {
35 modular_fia
= intel_uncore_read(&i915
->uncore
,
36 PORT_TX_DFLEXDPSP(FIA1
));
37 drm_WARN_ON(&i915
->drm
, modular_fia
== 0xffffffff);
38 modular_fia
&= MODULAR_FIA_MASK
;
44 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
45 * than two TC ports, there are multiple instances of Modular FIA.
48 dig_port
->tc_phy_fia
= tc_port
/ 2;
49 dig_port
->tc_phy_fia_idx
= tc_port
% 2;
51 dig_port
->tc_phy_fia
= FIA1
;
52 dig_port
->tc_phy_fia_idx
= tc_port
;
56 static enum intel_display_power_domain
57 tc_cold_get_power_domain(struct intel_digital_port
*dig_port
)
59 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
61 if (INTEL_GEN(i915
) == 11)
62 return intel_legacy_aux_to_power_domain(dig_port
->aux_ch
);
64 return POWER_DOMAIN_TC_COLD_OFF
;
67 static intel_wakeref_t
68 tc_cold_block(struct intel_digital_port
*dig_port
)
70 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
71 enum intel_display_power_domain domain
;
73 if (INTEL_GEN(i915
) == 11 && !dig_port
->tc_legacy_port
)
76 domain
= tc_cold_get_power_domain(dig_port
);
77 return intel_display_power_get(i915
, domain
);
81 tc_cold_unblock(struct intel_digital_port
*dig_port
, intel_wakeref_t wakeref
)
83 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
84 enum intel_display_power_domain domain
;
87 * wakeref == -1, means some error happened saving save_depot_stack but
88 * power should still be put down and 0 is a invalid save_depot_stack
89 * id so can be used to skip it for non TC legacy ports.
94 domain
= tc_cold_get_power_domain(dig_port
);
95 intel_display_power_put_async(i915
, domain
, wakeref
);
99 assert_tc_cold_blocked(struct intel_digital_port
*dig_port
)
101 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
104 if (INTEL_GEN(i915
) == 11 && !dig_port
->tc_legacy_port
)
107 enabled
= intel_display_power_is_enabled(i915
,
108 tc_cold_get_power_domain(dig_port
));
109 drm_WARN_ON(&i915
->drm
, !enabled
);
112 u32
intel_tc_port_get_lane_mask(struct intel_digital_port
*dig_port
)
114 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
115 struct intel_uncore
*uncore
= &i915
->uncore
;
118 lane_mask
= intel_uncore_read(uncore
,
119 PORT_TX_DFLEXDPSP(dig_port
->tc_phy_fia
));
121 drm_WARN_ON(&i915
->drm
, lane_mask
== 0xffffffff);
122 assert_tc_cold_blocked(dig_port
);
124 lane_mask
&= DP_LANE_ASSIGNMENT_MASK(dig_port
->tc_phy_fia_idx
);
125 return lane_mask
>> DP_LANE_ASSIGNMENT_SHIFT(dig_port
->tc_phy_fia_idx
);
128 u32
intel_tc_port_get_pin_assignment_mask(struct intel_digital_port
*dig_port
)
130 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
131 struct intel_uncore
*uncore
= &i915
->uncore
;
134 pin_mask
= intel_uncore_read(uncore
,
135 PORT_TX_DFLEXPA1(dig_port
->tc_phy_fia
));
137 drm_WARN_ON(&i915
->drm
, pin_mask
== 0xffffffff);
138 assert_tc_cold_blocked(dig_port
);
140 return (pin_mask
& DP_PIN_ASSIGNMENT_MASK(dig_port
->tc_phy_fia_idx
)) >>
141 DP_PIN_ASSIGNMENT_SHIFT(dig_port
->tc_phy_fia_idx
);
144 int intel_tc_port_fia_max_lane_count(struct intel_digital_port
*dig_port
)
146 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
147 intel_wakeref_t wakeref
;
150 if (dig_port
->tc_mode
!= TC_PORT_DP_ALT
)
153 assert_tc_cold_blocked(dig_port
);
156 with_intel_display_power(i915
, POWER_DOMAIN_DISPLAY_CORE
, wakeref
)
157 lane_mask
= intel_tc_port_get_lane_mask(dig_port
);
161 MISSING_CASE(lane_mask
);
176 void intel_tc_port_set_fia_lane_count(struct intel_digital_port
*dig_port
,
179 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
180 bool lane_reversal
= dig_port
->saved_port_bits
& DDI_BUF_PORT_REVERSAL
;
181 struct intel_uncore
*uncore
= &i915
->uncore
;
184 drm_WARN_ON(&i915
->drm
,
185 lane_reversal
&& dig_port
->tc_mode
!= TC_PORT_LEGACY
);
187 assert_tc_cold_blocked(dig_port
);
189 val
= intel_uncore_read(uncore
,
190 PORT_TX_DFLEXDPMLE1(dig_port
->tc_phy_fia
));
191 val
&= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port
->tc_phy_fia_idx
);
193 switch (required_lanes
) {
195 val
|= lane_reversal
?
196 DFLEXDPMLE1_DPMLETC_ML3(dig_port
->tc_phy_fia_idx
) :
197 DFLEXDPMLE1_DPMLETC_ML0(dig_port
->tc_phy_fia_idx
);
200 val
|= lane_reversal
?
201 DFLEXDPMLE1_DPMLETC_ML3_2(dig_port
->tc_phy_fia_idx
) :
202 DFLEXDPMLE1_DPMLETC_ML1_0(dig_port
->tc_phy_fia_idx
);
205 val
|= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port
->tc_phy_fia_idx
);
208 MISSING_CASE(required_lanes
);
211 intel_uncore_write(uncore
,
212 PORT_TX_DFLEXDPMLE1(dig_port
->tc_phy_fia
), val
);
215 static void tc_port_fixup_legacy_flag(struct intel_digital_port
*dig_port
,
216 u32 live_status_mask
)
218 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
221 if (dig_port
->tc_legacy_port
)
222 valid_hpd_mask
= BIT(TC_PORT_LEGACY
);
224 valid_hpd_mask
= BIT(TC_PORT_DP_ALT
) |
225 BIT(TC_PORT_TBT_ALT
);
227 if (!(live_status_mask
& ~valid_hpd_mask
))
230 /* If live status mismatches the VBT flag, trust the live status. */
231 drm_dbg_kms(&i915
->drm
,
232 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
233 dig_port
->tc_port_name
, live_status_mask
, valid_hpd_mask
);
235 dig_port
->tc_legacy_port
= !dig_port
->tc_legacy_port
;
238 static u32
tc_port_live_status_mask(struct intel_digital_port
*dig_port
)
240 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
241 struct intel_uncore
*uncore
= &i915
->uncore
;
242 u32 isr_bit
= i915
->hotplug
.pch_hpd
[dig_port
->base
.hpd_pin
];
246 val
= intel_uncore_read(uncore
,
247 PORT_TX_DFLEXDPSP(dig_port
->tc_phy_fia
));
249 if (val
== 0xffffffff) {
250 drm_dbg_kms(&i915
->drm
,
251 "Port %s: PHY in TCCOLD, nothing connected\n",
252 dig_port
->tc_port_name
);
256 if (val
& TC_LIVE_STATE_TBT(dig_port
->tc_phy_fia_idx
))
257 mask
|= BIT(TC_PORT_TBT_ALT
);
258 if (val
& TC_LIVE_STATE_TC(dig_port
->tc_phy_fia_idx
))
259 mask
|= BIT(TC_PORT_DP_ALT
);
261 if (intel_uncore_read(uncore
, SDEISR
) & isr_bit
)
262 mask
|= BIT(TC_PORT_LEGACY
);
264 /* The sink can be connected only in a single mode. */
265 if (!drm_WARN_ON(&i915
->drm
, hweight32(mask
) > 1))
266 tc_port_fixup_legacy_flag(dig_port
, mask
);
271 static bool icl_tc_phy_status_complete(struct intel_digital_port
*dig_port
)
273 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
274 struct intel_uncore
*uncore
= &i915
->uncore
;
277 val
= intel_uncore_read(uncore
,
278 PORT_TX_DFLEXDPPMS(dig_port
->tc_phy_fia
));
279 if (val
== 0xffffffff) {
280 drm_dbg_kms(&i915
->drm
,
281 "Port %s: PHY in TCCOLD, assuming not complete\n",
282 dig_port
->tc_port_name
);
286 return val
& DP_PHY_MODE_STATUS_COMPLETED(dig_port
->tc_phy_fia_idx
);
289 static bool icl_tc_phy_set_safe_mode(struct intel_digital_port
*dig_port
,
292 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
293 struct intel_uncore
*uncore
= &i915
->uncore
;
296 val
= intel_uncore_read(uncore
,
297 PORT_TX_DFLEXDPCSSS(dig_port
->tc_phy_fia
));
298 if (val
== 0xffffffff) {
299 drm_dbg_kms(&i915
->drm
,
300 "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
301 dig_port
->tc_port_name
, enableddisabled(enable
));
306 val
&= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port
->tc_phy_fia_idx
);
308 val
|= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port
->tc_phy_fia_idx
);
310 intel_uncore_write(uncore
,
311 PORT_TX_DFLEXDPCSSS(dig_port
->tc_phy_fia
), val
);
313 if (enable
&& wait_for(!icl_tc_phy_status_complete(dig_port
), 10))
314 drm_dbg_kms(&i915
->drm
,
315 "Port %s: PHY complete clear timed out\n",
316 dig_port
->tc_port_name
);
321 static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port
*dig_port
)
323 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
324 struct intel_uncore
*uncore
= &i915
->uncore
;
327 val
= intel_uncore_read(uncore
,
328 PORT_TX_DFLEXDPCSSS(dig_port
->tc_phy_fia
));
329 if (val
== 0xffffffff) {
330 drm_dbg_kms(&i915
->drm
,
331 "Port %s: PHY in TCCOLD, assume safe mode\n",
332 dig_port
->tc_port_name
);
336 return !(val
& DP_PHY_MODE_STATUS_NOT_SAFE(dig_port
->tc_phy_fia_idx
));
340 * This function implements the first part of the Connect Flow described by our
341 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
342 * lanes, EDID, etc) is done as needed in the typical places.
344 * Unlike the other ports, type-C ports are not available to use as soon as we
345 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
346 * display, USB, etc. As a result, handshaking through FIA is required around
347 * connect and disconnect to cleanly transfer ownership with the controller and
348 * set the type-C power state.
350 static void icl_tc_phy_connect(struct intel_digital_port
*dig_port
,
353 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
356 if (!icl_tc_phy_status_complete(dig_port
)) {
357 drm_dbg_kms(&i915
->drm
, "Port %s: PHY not ready\n",
358 dig_port
->tc_port_name
);
359 goto out_set_tbt_alt_mode
;
362 if (!icl_tc_phy_set_safe_mode(dig_port
, false) &&
363 !drm_WARN_ON(&i915
->drm
, dig_port
->tc_legacy_port
))
364 goto out_set_tbt_alt_mode
;
366 max_lanes
= intel_tc_port_fia_max_lane_count(dig_port
);
367 if (dig_port
->tc_legacy_port
) {
368 drm_WARN_ON(&i915
->drm
, max_lanes
!= 4);
369 dig_port
->tc_mode
= TC_PORT_LEGACY
;
375 * Now we have to re-check the live state, in case the port recently
376 * became disconnected. Not necessary for legacy mode.
378 if (!(tc_port_live_status_mask(dig_port
) & BIT(TC_PORT_DP_ALT
))) {
379 drm_dbg_kms(&i915
->drm
, "Port %s: PHY sudden disconnect\n",
380 dig_port
->tc_port_name
);
381 goto out_set_safe_mode
;
384 if (max_lanes
< required_lanes
) {
385 drm_dbg_kms(&i915
->drm
,
386 "Port %s: PHY max lanes %d < required lanes %d\n",
387 dig_port
->tc_port_name
,
388 max_lanes
, required_lanes
);
389 goto out_set_safe_mode
;
392 dig_port
->tc_mode
= TC_PORT_DP_ALT
;
397 icl_tc_phy_set_safe_mode(dig_port
, true);
398 out_set_tbt_alt_mode
:
399 dig_port
->tc_mode
= TC_PORT_TBT_ALT
;
403 * See the comment at the connect function. This implements the Disconnect
406 static void icl_tc_phy_disconnect(struct intel_digital_port
*dig_port
)
408 switch (dig_port
->tc_mode
) {
410 /* Nothing to do, we never disconnect from legacy mode */
413 icl_tc_phy_set_safe_mode(dig_port
, true);
414 dig_port
->tc_mode
= TC_PORT_TBT_ALT
;
416 case TC_PORT_TBT_ALT
:
417 /* Nothing to do, we stay in TBT-alt mode */
420 MISSING_CASE(dig_port
->tc_mode
);
424 static bool icl_tc_phy_is_connected(struct intel_digital_port
*dig_port
)
426 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
428 if (!icl_tc_phy_status_complete(dig_port
)) {
429 drm_dbg_kms(&i915
->drm
, "Port %s: PHY status not complete\n",
430 dig_port
->tc_port_name
);
431 return dig_port
->tc_mode
== TC_PORT_TBT_ALT
;
434 if (icl_tc_phy_is_in_safe_mode(dig_port
)) {
435 drm_dbg_kms(&i915
->drm
, "Port %s: PHY still in safe mode\n",
436 dig_port
->tc_port_name
);
441 return dig_port
->tc_mode
== TC_PORT_DP_ALT
||
442 dig_port
->tc_mode
== TC_PORT_LEGACY
;
445 static enum tc_port_mode
446 intel_tc_port_get_current_mode(struct intel_digital_port
*dig_port
)
448 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
449 u32 live_status_mask
= tc_port_live_status_mask(dig_port
);
450 bool in_safe_mode
= icl_tc_phy_is_in_safe_mode(dig_port
);
451 enum tc_port_mode mode
;
454 drm_WARN_ON(&i915
->drm
, !icl_tc_phy_status_complete(dig_port
)))
455 return TC_PORT_TBT_ALT
;
457 mode
= dig_port
->tc_legacy_port
? TC_PORT_LEGACY
: TC_PORT_DP_ALT
;
458 if (live_status_mask
) {
459 enum tc_port_mode live_mode
= fls(live_status_mask
) - 1;
461 if (!drm_WARN_ON(&i915
->drm
, live_mode
== TC_PORT_TBT_ALT
))
468 static enum tc_port_mode
469 intel_tc_port_get_target_mode(struct intel_digital_port
*dig_port
)
471 u32 live_status_mask
= tc_port_live_status_mask(dig_port
);
473 if (live_status_mask
)
474 return fls(live_status_mask
) - 1;
476 return icl_tc_phy_status_complete(dig_port
) &&
477 dig_port
->tc_legacy_port
? TC_PORT_LEGACY
:
481 static void intel_tc_port_reset_mode(struct intel_digital_port
*dig_port
,
484 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
485 enum tc_port_mode old_tc_mode
= dig_port
->tc_mode
;
487 intel_display_power_flush_work(i915
);
488 if (INTEL_GEN(i915
) != 11 || !dig_port
->tc_legacy_port
) {
489 enum intel_display_power_domain aux_domain
;
492 aux_domain
= intel_aux_power_domain(dig_port
);
493 aux_powered
= intel_display_power_is_enabled(i915
, aux_domain
);
494 drm_WARN_ON(&i915
->drm
, aux_powered
);
497 icl_tc_phy_disconnect(dig_port
);
498 icl_tc_phy_connect(dig_port
, required_lanes
);
500 drm_dbg_kms(&i915
->drm
, "Port %s: TC port mode reset (%s -> %s)\n",
501 dig_port
->tc_port_name
,
502 tc_port_mode_name(old_tc_mode
),
503 tc_port_mode_name(dig_port
->tc_mode
));
507 intel_tc_port_link_init_refcount(struct intel_digital_port
*dig_port
,
510 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
512 drm_WARN_ON(&i915
->drm
, dig_port
->tc_link_refcount
);
513 dig_port
->tc_link_refcount
= refcount
;
516 void intel_tc_port_sanitize(struct intel_digital_port
*dig_port
)
518 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
519 struct intel_encoder
*encoder
= &dig_port
->base
;
520 intel_wakeref_t tc_cold_wref
;
521 int active_links
= 0;
523 mutex_lock(&dig_port
->tc_lock
);
524 tc_cold_wref
= tc_cold_block(dig_port
);
526 dig_port
->tc_mode
= intel_tc_port_get_current_mode(dig_port
);
527 if (dig_port
->dp
.is_mst
)
528 active_links
= intel_dp_mst_encoder_active_links(dig_port
);
529 else if (encoder
->base
.crtc
)
530 active_links
= to_intel_crtc(encoder
->base
.crtc
)->active
;
533 if (!icl_tc_phy_is_connected(dig_port
))
534 drm_dbg_kms(&i915
->drm
,
535 "Port %s: PHY disconnected with %d active link(s)\n",
536 dig_port
->tc_port_name
, active_links
);
537 intel_tc_port_link_init_refcount(dig_port
, active_links
);
542 if (dig_port
->tc_legacy_port
)
543 icl_tc_phy_connect(dig_port
, 1);
546 drm_dbg_kms(&i915
->drm
, "Port %s: sanitize mode (%s)\n",
547 dig_port
->tc_port_name
,
548 tc_port_mode_name(dig_port
->tc_mode
));
550 tc_cold_unblock(dig_port
, tc_cold_wref
);
551 mutex_unlock(&dig_port
->tc_lock
);
554 static bool intel_tc_port_needs_reset(struct intel_digital_port
*dig_port
)
556 return intel_tc_port_get_target_mode(dig_port
) != dig_port
->tc_mode
;
560 * The type-C ports are different because even when they are connected, they may
561 * not be available/usable by the graphics driver: see the comment on
562 * icl_tc_phy_connect(). So in our driver instead of adding the additional
563 * concept of "usable" and make everything check for "connected and usable" we
564 * define a port as "connected" when it is not only connected, but also when it
565 * is usable by the rest of the driver. That maintains the old assumption that
566 * connected ports are usable, and avoids exposing to the users objects they
569 bool intel_tc_port_connected(struct intel_encoder
*encoder
)
571 struct intel_digital_port
*dig_port
= enc_to_dig_port(encoder
);
573 intel_wakeref_t tc_cold_wref
;
575 intel_tc_port_lock(dig_port
);
576 tc_cold_wref
= tc_cold_block(dig_port
);
578 is_connected
= tc_port_live_status_mask(dig_port
) &
579 BIT(dig_port
->tc_mode
);
581 tc_cold_unblock(dig_port
, tc_cold_wref
);
582 intel_tc_port_unlock(dig_port
);
587 static void __intel_tc_port_lock(struct intel_digital_port
*dig_port
,
590 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
591 intel_wakeref_t wakeref
;
593 wakeref
= intel_display_power_get(i915
, POWER_DOMAIN_DISPLAY_CORE
);
595 mutex_lock(&dig_port
->tc_lock
);
597 if (!dig_port
->tc_link_refcount
) {
598 intel_wakeref_t tc_cold_wref
;
600 tc_cold_wref
= tc_cold_block(dig_port
);
602 if (intel_tc_port_needs_reset(dig_port
))
603 intel_tc_port_reset_mode(dig_port
, required_lanes
);
605 tc_cold_unblock(dig_port
, tc_cold_wref
);
608 drm_WARN_ON(&i915
->drm
, dig_port
->tc_lock_wakeref
);
609 dig_port
->tc_lock_wakeref
= wakeref
;
612 void intel_tc_port_lock(struct intel_digital_port
*dig_port
)
614 __intel_tc_port_lock(dig_port
, 1);
617 void intel_tc_port_unlock(struct intel_digital_port
*dig_port
)
619 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
620 intel_wakeref_t wakeref
= fetch_and_zero(&dig_port
->tc_lock_wakeref
);
622 mutex_unlock(&dig_port
->tc_lock
);
624 intel_display_power_put_async(i915
, POWER_DOMAIN_DISPLAY_CORE
,
628 bool intel_tc_port_ref_held(struct intel_digital_port
*dig_port
)
630 return mutex_is_locked(&dig_port
->tc_lock
) ||
631 dig_port
->tc_link_refcount
;
634 void intel_tc_port_get_link(struct intel_digital_port
*dig_port
,
637 __intel_tc_port_lock(dig_port
, required_lanes
);
638 dig_port
->tc_link_refcount
++;
639 intel_tc_port_unlock(dig_port
);
642 void intel_tc_port_put_link(struct intel_digital_port
*dig_port
)
644 mutex_lock(&dig_port
->tc_lock
);
645 dig_port
->tc_link_refcount
--;
646 mutex_unlock(&dig_port
->tc_lock
);
649 void intel_tc_port_init(struct intel_digital_port
*dig_port
, bool is_legacy
)
651 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
652 enum port port
= dig_port
->base
.port
;
653 enum tc_port tc_port
= intel_port_to_tc(i915
, port
);
655 if (drm_WARN_ON(&i915
->drm
, tc_port
== TC_PORT_NONE
))
658 snprintf(dig_port
->tc_port_name
, sizeof(dig_port
->tc_port_name
),
659 "%c/TC#%d", port_name(port
), tc_port
+ 1);
661 mutex_init(&dig_port
->tc_lock
);
662 dig_port
->tc_legacy_port
= is_legacy
;
663 dig_port
->tc_link_refcount
= 0;
664 tc_port_load_fia_params(i915
, dig_port
);